1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_lock.h> 58 #include <net/netdev_queues.h> 59 #include <net/netdev_rx_queue.h> 60 #include <linux/pci-tph.h> 61 62 #include "bnxt_hsi.h" 63 #include "bnxt.h" 64 #include "bnxt_hwrm.h" 65 #include "bnxt_ulp.h" 66 #include "bnxt_sriov.h" 67 #include "bnxt_ethtool.h" 68 #include "bnxt_dcb.h" 69 #include "bnxt_xdp.h" 70 #include "bnxt_ptp.h" 71 #include "bnxt_vfr.h" 72 #include "bnxt_tc.h" 73 #include "bnxt_devlink.h" 74 #include "bnxt_debugfs.h" 75 #include "bnxt_coredump.h" 76 #include "bnxt_hwmon.h" 77 78 #define BNXT_TX_TIMEOUT (5 * HZ) 79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 80 NETIF_MSG_TX_ERR) 81 82 MODULE_IMPORT_NS("NETDEV_INTERNAL"); 83 MODULE_LICENSE("GPL"); 84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); 85 86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 88 89 #define BNXT_TX_PUSH_THRESH 164 90 91 /* indexed by enum board_idx */ 92 static const struct { 93 char *name; 94 } board_info[] = { 95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, 130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, 145 }; 146 147 static const struct pci_device_id bnxt_pci_tbl[] = { 148 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 149 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 150 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 151 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 152 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 153 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 154 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 155 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 156 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 157 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 158 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 159 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 160 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 162 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 163 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 164 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 165 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 166 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 167 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 168 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 170 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 171 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 172 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 175 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 176 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 179 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 180 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 182 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 183 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 184 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 185 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 186 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, 187 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, 188 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, 189 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, 190 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 192 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 193 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 194 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 195 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 196 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 197 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 198 #ifdef CONFIG_BNXT_SRIOV 199 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 201 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 202 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 203 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 204 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 205 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 206 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 207 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 208 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 209 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 210 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 211 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 212 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 213 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 214 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 215 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 216 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 217 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 218 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 219 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, 220 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 221 #endif 222 { 0 } 223 }; 224 225 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 226 227 static const u16 bnxt_vf_req_snif[] = { 228 HWRM_FUNC_CFG, 229 HWRM_FUNC_VF_CFG, 230 HWRM_PORT_PHY_QCFG, 231 HWRM_CFA_L2_FILTER_ALLOC, 232 }; 233 234 static const u16 bnxt_async_events_arr[] = { 235 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 236 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 237 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 238 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 239 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 240 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 241 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 242 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 243 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 244 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 245 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 246 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 247 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 248 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 249 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 250 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 251 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER, 252 }; 253 254 const u16 bnxt_bstore_to_trace[] = { 255 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE, 256 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE, 257 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE, 258 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE, 259 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE, 260 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE, 261 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE, 262 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE, 263 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE, 264 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE, 265 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE, 266 }; 267 268 static struct workqueue_struct *bnxt_pf_wq; 269 270 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 271 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} 272 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} 273 274 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { 275 .ports = { 276 .src = 0, 277 .dst = 0, 278 }, 279 .addrs = { 280 .v6addrs = { 281 .src = BNXT_IPV6_MASK_NONE, 282 .dst = BNXT_IPV6_MASK_NONE, 283 }, 284 }, 285 }; 286 287 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { 288 .ports = { 289 .src = cpu_to_be16(0xffff), 290 .dst = cpu_to_be16(0xffff), 291 }, 292 .addrs = { 293 .v6addrs = { 294 .src = BNXT_IPV6_MASK_ALL, 295 .dst = BNXT_IPV6_MASK_ALL, 296 }, 297 }, 298 }; 299 300 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { 301 .ports = { 302 .src = cpu_to_be16(0xffff), 303 .dst = cpu_to_be16(0xffff), 304 }, 305 .addrs = { 306 .v4addrs = { 307 .src = cpu_to_be32(0xffffffff), 308 .dst = cpu_to_be32(0xffffffff), 309 }, 310 }, 311 }; 312 313 static bool bnxt_vf_pciid(enum board_idx idx) 314 { 315 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 316 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 317 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 318 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); 319 } 320 321 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 322 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 323 324 #define BNXT_DB_CQ(db, idx) \ 325 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 326 327 #define BNXT_DB_NQ_P5(db, idx) \ 328 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 329 (db)->doorbell) 330 331 #define BNXT_DB_NQ_P7(db, idx) \ 332 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ 333 DB_RING_IDX(db, idx), (db)->doorbell) 334 335 #define BNXT_DB_CQ_ARM(db, idx) \ 336 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 337 338 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 339 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 340 DB_RING_IDX(db, idx), (db)->doorbell) 341 342 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 343 { 344 if (bp->flags & BNXT_FLAG_CHIP_P7) 345 BNXT_DB_NQ_P7(db, idx); 346 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 347 BNXT_DB_NQ_P5(db, idx); 348 else 349 BNXT_DB_CQ(db, idx); 350 } 351 352 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 353 { 354 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 355 BNXT_DB_NQ_ARM_P5(db, idx); 356 else 357 BNXT_DB_CQ_ARM(db, idx); 358 } 359 360 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 361 { 362 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 363 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 364 DB_RING_IDX(db, idx), db->doorbell); 365 else 366 BNXT_DB_CQ(db, idx); 367 } 368 369 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 370 { 371 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 372 return; 373 374 if (BNXT_PF(bp)) 375 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 376 else 377 schedule_delayed_work(&bp->fw_reset_task, delay); 378 } 379 380 static void __bnxt_queue_sp_work(struct bnxt *bp) 381 { 382 if (BNXT_PF(bp)) 383 queue_work(bnxt_pf_wq, &bp->sp_task); 384 else 385 schedule_work(&bp->sp_task); 386 } 387 388 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 389 { 390 set_bit(event, &bp->sp_event); 391 __bnxt_queue_sp_work(bp); 392 } 393 394 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 395 { 396 if (!rxr->bnapi->in_reset) { 397 rxr->bnapi->in_reset = true; 398 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 399 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 400 else 401 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 402 __bnxt_queue_sp_work(bp); 403 } 404 rxr->rx_next_cons = 0xffff; 405 } 406 407 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 408 u16 curr) 409 { 410 struct bnxt_napi *bnapi = txr->bnapi; 411 412 if (bnapi->tx_fault) 413 return; 414 415 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 416 txr->txq_index, txr->tx_hw_cons, 417 txr->tx_cons, txr->tx_prod, curr); 418 WARN_ON_ONCE(1); 419 bnapi->tx_fault = 1; 420 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 421 } 422 423 const u16 bnxt_lhint_arr[] = { 424 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 425 TX_BD_FLAGS_LHINT_512_TO_1023, 426 TX_BD_FLAGS_LHINT_1024_TO_2047, 427 TX_BD_FLAGS_LHINT_1024_TO_2047, 428 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 429 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 430 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 431 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 432 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 433 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 434 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 435 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 436 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 437 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 438 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 439 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 440 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 441 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 442 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 443 }; 444 445 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 446 { 447 struct metadata_dst *md_dst = skb_metadata_dst(skb); 448 449 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 450 return 0; 451 452 return md_dst->u.port_info.port_id; 453 } 454 455 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 456 u16 prod) 457 { 458 /* Sync BD data before updating doorbell */ 459 wmb(); 460 bnxt_db_write(bp, &txr->tx_db, prod); 461 txr->kick_pending = 0; 462 } 463 464 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 465 { 466 struct bnxt *bp = netdev_priv(dev); 467 struct tx_bd *txbd, *txbd0; 468 struct tx_bd_ext *txbd1; 469 struct netdev_queue *txq; 470 int i; 471 dma_addr_t mapping; 472 unsigned int length, pad = 0; 473 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 474 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 475 struct pci_dev *pdev = bp->pdev; 476 u16 prod, last_frag, txts_prod; 477 struct bnxt_tx_ring_info *txr; 478 struct bnxt_sw_tx_bd *tx_buf; 479 __le32 lflags = 0; 480 481 i = skb_get_queue_mapping(skb); 482 if (unlikely(i >= bp->tx_nr_rings)) { 483 dev_kfree_skb_any(skb); 484 dev_core_stats_tx_dropped_inc(dev); 485 return NETDEV_TX_OK; 486 } 487 488 txq = netdev_get_tx_queue(dev, i); 489 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 490 prod = txr->tx_prod; 491 492 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS) 493 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) { 494 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n", 495 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS); 496 if (skb_linearize(skb)) { 497 dev_kfree_skb_any(skb); 498 dev_core_stats_tx_dropped_inc(dev); 499 return NETDEV_TX_OK; 500 } 501 } 502 #endif 503 free_size = bnxt_tx_avail(bp, txr); 504 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 505 /* We must have raced with NAPI cleanup */ 506 if (net_ratelimit() && txr->kick_pending) 507 netif_warn(bp, tx_err, dev, 508 "bnxt: ring busy w/ flush pending!\n"); 509 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 510 bp->tx_wake_thresh)) 511 return NETDEV_TX_BUSY; 512 } 513 514 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 515 goto tx_free; 516 517 length = skb->len; 518 len = skb_headlen(skb); 519 last_frag = skb_shinfo(skb)->nr_frags; 520 521 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 522 523 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 524 tx_buf->skb = skb; 525 tx_buf->nr_frags = last_frag; 526 527 vlan_tag_flags = 0; 528 cfa_action = bnxt_xmit_get_cfa_action(skb); 529 if (skb_vlan_tag_present(skb)) { 530 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 531 skb_vlan_tag_get(skb); 532 /* Currently supports 8021Q, 8021AD vlan offloads 533 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 534 */ 535 if (skb->vlan_proto == htons(ETH_P_8021Q)) 536 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 537 } 538 539 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && 540 ptp->tx_tstamp_en) { 541 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { 542 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 543 tx_buf->is_ts_pkt = 1; 544 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 545 } else if (!skb_is_gso(skb)) { 546 u16 seq_id, hdr_off; 547 548 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && 549 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { 550 if (vlan_tag_flags) 551 hdr_off += VLAN_HLEN; 552 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 553 tx_buf->is_ts_pkt = 1; 554 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 555 556 ptp->txts_req[txts_prod].tx_seqid = seq_id; 557 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; 558 tx_buf->txts_prod = txts_prod; 559 } 560 } 561 } 562 if (unlikely(skb->no_fcs)) 563 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 564 565 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 566 !lflags) { 567 struct tx_push_buffer *tx_push_buf = txr->tx_push; 568 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 569 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 570 void __iomem *db = txr->tx_db.doorbell; 571 void *pdata = tx_push_buf->data; 572 u64 *end; 573 int j, push_len; 574 575 /* Set COAL_NOW to be ready quickly for the next push */ 576 tx_push->tx_bd_len_flags_type = 577 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 578 TX_BD_TYPE_LONG_TX_BD | 579 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 580 TX_BD_FLAGS_COAL_NOW | 581 TX_BD_FLAGS_PACKET_END | 582 TX_BD_CNT(2)); 583 584 if (skb->ip_summed == CHECKSUM_PARTIAL) 585 tx_push1->tx_bd_hsize_lflags = 586 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 587 else 588 tx_push1->tx_bd_hsize_lflags = 0; 589 590 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 591 tx_push1->tx_bd_cfa_action = 592 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 593 594 end = pdata + length; 595 end = PTR_ALIGN(end, 8) - 1; 596 *end = 0; 597 598 skb_copy_from_linear_data(skb, pdata, len); 599 pdata += len; 600 for (j = 0; j < last_frag; j++) { 601 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 602 void *fptr; 603 604 fptr = skb_frag_address_safe(frag); 605 if (!fptr) 606 goto normal_tx; 607 608 memcpy(pdata, fptr, skb_frag_size(frag)); 609 pdata += skb_frag_size(frag); 610 } 611 612 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 613 txbd->tx_bd_haddr = txr->data_mapping; 614 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 615 prod = NEXT_TX(prod); 616 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 617 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 618 memcpy(txbd, tx_push1, sizeof(*txbd)); 619 prod = NEXT_TX(prod); 620 tx_push->doorbell = 621 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 622 DB_RING_IDX(&txr->tx_db, prod)); 623 WRITE_ONCE(txr->tx_prod, prod); 624 625 tx_buf->is_push = 1; 626 netdev_tx_sent_queue(txq, skb->len); 627 wmb(); /* Sync is_push and byte queue before pushing data */ 628 629 push_len = (length + sizeof(*tx_push) + 7) / 8; 630 if (push_len > 16) { 631 __iowrite64_copy(db, tx_push_buf, 16); 632 __iowrite32_copy(db + 4, tx_push_buf + 1, 633 (push_len - 16) << 1); 634 } else { 635 __iowrite64_copy(db, tx_push_buf, push_len); 636 } 637 638 goto tx_done; 639 } 640 641 normal_tx: 642 if (length < BNXT_MIN_PKT_SIZE) { 643 pad = BNXT_MIN_PKT_SIZE - length; 644 if (skb_pad(skb, pad)) 645 /* SKB already freed. */ 646 goto tx_kick_pending; 647 length = BNXT_MIN_PKT_SIZE; 648 } 649 650 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 651 652 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 653 goto tx_free; 654 655 dma_unmap_addr_set(tx_buf, mapping, mapping); 656 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 657 TX_BD_CNT(last_frag + 2); 658 659 txbd->tx_bd_haddr = cpu_to_le64(mapping); 660 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 661 662 prod = NEXT_TX(prod); 663 txbd1 = (struct tx_bd_ext *) 664 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 665 666 txbd1->tx_bd_hsize_lflags = lflags; 667 if (skb_is_gso(skb)) { 668 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); 669 u32 hdr_len; 670 671 if (skb->encapsulation) { 672 if (udp_gso) 673 hdr_len = skb_inner_transport_offset(skb) + 674 sizeof(struct udphdr); 675 else 676 hdr_len = skb_inner_tcp_all_headers(skb); 677 } else if (udp_gso) { 678 hdr_len = skb_transport_offset(skb) + 679 sizeof(struct udphdr); 680 } else { 681 hdr_len = skb_tcp_all_headers(skb); 682 } 683 684 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 685 TX_BD_FLAGS_T_IPID | 686 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 687 length = skb_shinfo(skb)->gso_size; 688 txbd1->tx_bd_mss = cpu_to_le32(length); 689 length += hdr_len; 690 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 691 txbd1->tx_bd_hsize_lflags |= 692 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 693 txbd1->tx_bd_mss = 0; 694 } 695 696 length >>= 9; 697 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 698 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 699 skb->len); 700 i = 0; 701 goto tx_dma_error; 702 } 703 flags |= bnxt_lhint_arr[length]; 704 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 705 706 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 707 txbd1->tx_bd_cfa_action = 708 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 709 txbd0 = txbd; 710 for (i = 0; i < last_frag; i++) { 711 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 712 713 prod = NEXT_TX(prod); 714 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 715 716 len = skb_frag_size(frag); 717 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 718 DMA_TO_DEVICE); 719 720 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 721 goto tx_dma_error; 722 723 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 724 dma_unmap_addr_set(tx_buf, mapping, mapping); 725 726 txbd->tx_bd_haddr = cpu_to_le64(mapping); 727 728 flags = len << TX_BD_LEN_SHIFT; 729 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 730 } 731 732 flags &= ~TX_BD_LEN; 733 txbd->tx_bd_len_flags_type = 734 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 735 TX_BD_FLAGS_PACKET_END); 736 737 netdev_tx_sent_queue(txq, skb->len); 738 739 skb_tx_timestamp(skb); 740 741 prod = NEXT_TX(prod); 742 WRITE_ONCE(txr->tx_prod, prod); 743 744 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 745 bnxt_txr_db_kick(bp, txr, prod); 746 } else { 747 if (free_size >= bp->tx_wake_thresh) 748 txbd0->tx_bd_len_flags_type |= 749 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 750 txr->kick_pending = 1; 751 } 752 753 tx_done: 754 755 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 756 if (netdev_xmit_more() && !tx_buf->is_push) { 757 txbd0->tx_bd_len_flags_type &= 758 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); 759 bnxt_txr_db_kick(bp, txr, prod); 760 } 761 762 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 763 bp->tx_wake_thresh); 764 } 765 return NETDEV_TX_OK; 766 767 tx_dma_error: 768 last_frag = i; 769 770 /* start back at beginning and unmap skb */ 771 prod = txr->tx_prod; 772 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 773 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 774 skb_headlen(skb), DMA_TO_DEVICE); 775 prod = NEXT_TX(prod); 776 777 /* unmap remaining mapped pages */ 778 for (i = 0; i < last_frag; i++) { 779 prod = NEXT_TX(prod); 780 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 781 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 782 skb_frag_size(&skb_shinfo(skb)->frags[i]), 783 DMA_TO_DEVICE); 784 } 785 786 tx_free: 787 dev_kfree_skb_any(skb); 788 tx_kick_pending: 789 if (BNXT_TX_PTP_IS_SET(lflags)) { 790 txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; 791 atomic64_inc(&bp->ptp_cfg->stats.ts_err); 792 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 793 /* set SKB to err so PTP worker will clean up */ 794 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO); 795 } 796 if (txr->kick_pending) 797 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 798 txr->tx_buf_ring[txr->tx_prod].skb = NULL; 799 dev_core_stats_tx_dropped_inc(dev); 800 return NETDEV_TX_OK; 801 } 802 803 /* Returns true if some remaining TX packets not processed. */ 804 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 805 int budget) 806 { 807 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 808 struct pci_dev *pdev = bp->pdev; 809 u16 hw_cons = txr->tx_hw_cons; 810 unsigned int tx_bytes = 0; 811 u16 cons = txr->tx_cons; 812 int tx_pkts = 0; 813 bool rc = false; 814 815 while (RING_TX(bp, cons) != hw_cons) { 816 struct bnxt_sw_tx_bd *tx_buf; 817 struct sk_buff *skb; 818 bool is_ts_pkt; 819 int j, last; 820 821 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 822 skb = tx_buf->skb; 823 824 if (unlikely(!skb)) { 825 bnxt_sched_reset_txr(bp, txr, cons); 826 return rc; 827 } 828 829 is_ts_pkt = tx_buf->is_ts_pkt; 830 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { 831 rc = true; 832 break; 833 } 834 835 cons = NEXT_TX(cons); 836 tx_pkts++; 837 tx_bytes += skb->len; 838 tx_buf->skb = NULL; 839 tx_buf->is_ts_pkt = 0; 840 841 if (tx_buf->is_push) { 842 tx_buf->is_push = 0; 843 goto next_tx_int; 844 } 845 846 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 847 skb_headlen(skb), DMA_TO_DEVICE); 848 last = tx_buf->nr_frags; 849 850 for (j = 0; j < last; j++) { 851 cons = NEXT_TX(cons); 852 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 853 dma_unmap_page( 854 &pdev->dev, 855 dma_unmap_addr(tx_buf, mapping), 856 skb_frag_size(&skb_shinfo(skb)->frags[j]), 857 DMA_TO_DEVICE); 858 } 859 if (unlikely(is_ts_pkt)) { 860 if (BNXT_CHIP_P5(bp)) { 861 /* PTP worker takes ownership of the skb */ 862 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); 863 skb = NULL; 864 } 865 } 866 867 next_tx_int: 868 cons = NEXT_TX(cons); 869 870 dev_consume_skb_any(skb); 871 } 872 873 WRITE_ONCE(txr->tx_cons, cons); 874 875 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 876 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 877 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 878 879 return rc; 880 } 881 882 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 883 { 884 struct bnxt_tx_ring_info *txr; 885 bool more = false; 886 int i; 887 888 bnxt_for_each_napi_tx(i, bnapi, txr) { 889 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) 890 more |= __bnxt_tx_int(bp, txr, budget); 891 } 892 if (!more) 893 bnapi->events &= ~BNXT_TX_CMP_EVENT; 894 } 895 896 static bool bnxt_separate_head_pool(void) 897 { 898 return PAGE_SIZE > BNXT_RX_PAGE_SIZE; 899 } 900 901 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 902 struct bnxt_rx_ring_info *rxr, 903 unsigned int *offset, 904 gfp_t gfp) 905 { 906 struct page *page; 907 908 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 909 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 910 BNXT_RX_PAGE_SIZE); 911 } else { 912 page = page_pool_dev_alloc_pages(rxr->page_pool); 913 *offset = 0; 914 } 915 if (!page) 916 return NULL; 917 918 *mapping = page_pool_get_dma_addr(page) + *offset; 919 return page; 920 } 921 922 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 923 struct bnxt_rx_ring_info *rxr, 924 gfp_t gfp) 925 { 926 unsigned int offset; 927 struct page *page; 928 929 page = page_pool_alloc_frag(rxr->head_pool, &offset, 930 bp->rx_buf_size, gfp); 931 if (!page) 932 return NULL; 933 934 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset; 935 return page_address(page) + offset; 936 } 937 938 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 939 u16 prod, gfp_t gfp) 940 { 941 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 942 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 943 dma_addr_t mapping; 944 945 if (BNXT_RX_PAGE_MODE(bp)) { 946 unsigned int offset; 947 struct page *page = 948 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 949 950 if (!page) 951 return -ENOMEM; 952 953 mapping += bp->rx_dma_offset; 954 rx_buf->data = page; 955 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 956 } else { 957 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp); 958 959 if (!data) 960 return -ENOMEM; 961 962 rx_buf->data = data; 963 rx_buf->data_ptr = data + bp->rx_offset; 964 } 965 rx_buf->mapping = mapping; 966 967 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 968 return 0; 969 } 970 971 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 972 { 973 u16 prod = rxr->rx_prod; 974 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 975 struct bnxt *bp = rxr->bnapi->bp; 976 struct rx_bd *cons_bd, *prod_bd; 977 978 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 979 cons_rx_buf = &rxr->rx_buf_ring[cons]; 980 981 prod_rx_buf->data = data; 982 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 983 984 prod_rx_buf->mapping = cons_rx_buf->mapping; 985 986 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 987 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 988 989 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 990 } 991 992 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 993 { 994 u16 next, max = rxr->rx_agg_bmap_size; 995 996 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 997 if (next >= max) 998 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 999 return next; 1000 } 1001 1002 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 1003 struct bnxt_rx_ring_info *rxr, 1004 u16 prod, gfp_t gfp) 1005 { 1006 struct rx_bd *rxbd = 1007 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1008 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 1009 struct page *page; 1010 dma_addr_t mapping; 1011 u16 sw_prod = rxr->rx_sw_agg_prod; 1012 unsigned int offset = 0; 1013 1014 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 1015 1016 if (!page) 1017 return -ENOMEM; 1018 1019 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1020 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1021 1022 __set_bit(sw_prod, rxr->rx_agg_bmap); 1023 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 1024 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1025 1026 rx_agg_buf->page = page; 1027 rx_agg_buf->offset = offset; 1028 rx_agg_buf->mapping = mapping; 1029 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 1030 rxbd->rx_bd_opaque = sw_prod; 1031 return 0; 1032 } 1033 1034 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 1035 struct bnxt_cp_ring_info *cpr, 1036 u16 cp_cons, u16 curr) 1037 { 1038 struct rx_agg_cmp *agg; 1039 1040 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 1041 agg = (struct rx_agg_cmp *) 1042 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1043 return agg; 1044 } 1045 1046 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 1047 struct bnxt_rx_ring_info *rxr, 1048 u16 agg_id, u16 curr) 1049 { 1050 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 1051 1052 return &tpa_info->agg_arr[curr]; 1053 } 1054 1055 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 1056 u16 start, u32 agg_bufs, bool tpa) 1057 { 1058 struct bnxt_napi *bnapi = cpr->bnapi; 1059 struct bnxt *bp = bnapi->bp; 1060 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1061 u16 prod = rxr->rx_agg_prod; 1062 u16 sw_prod = rxr->rx_sw_agg_prod; 1063 bool p5_tpa = false; 1064 u32 i; 1065 1066 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1067 p5_tpa = true; 1068 1069 for (i = 0; i < agg_bufs; i++) { 1070 u16 cons; 1071 struct rx_agg_cmp *agg; 1072 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 1073 struct rx_bd *prod_bd; 1074 struct page *page; 1075 1076 if (p5_tpa) 1077 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 1078 else 1079 agg = bnxt_get_agg(bp, cpr, idx, start + i); 1080 cons = agg->rx_agg_cmp_opaque; 1081 __clear_bit(cons, rxr->rx_agg_bmap); 1082 1083 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1084 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1085 1086 __set_bit(sw_prod, rxr->rx_agg_bmap); 1087 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 1088 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1089 1090 /* It is possible for sw_prod to be equal to cons, so 1091 * set cons_rx_buf->page to NULL first. 1092 */ 1093 page = cons_rx_buf->page; 1094 cons_rx_buf->page = NULL; 1095 prod_rx_buf->page = page; 1096 prod_rx_buf->offset = cons_rx_buf->offset; 1097 1098 prod_rx_buf->mapping = cons_rx_buf->mapping; 1099 1100 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1101 1102 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 1103 prod_bd->rx_bd_opaque = sw_prod; 1104 1105 prod = NEXT_RX_AGG(prod); 1106 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1107 } 1108 rxr->rx_agg_prod = prod; 1109 rxr->rx_sw_agg_prod = sw_prod; 1110 } 1111 1112 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 1113 struct bnxt_rx_ring_info *rxr, 1114 u16 cons, void *data, u8 *data_ptr, 1115 dma_addr_t dma_addr, 1116 unsigned int offset_and_len) 1117 { 1118 unsigned int len = offset_and_len & 0xffff; 1119 struct page *page = data; 1120 u16 prod = rxr->rx_prod; 1121 struct sk_buff *skb; 1122 int err; 1123 1124 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1125 if (unlikely(err)) { 1126 bnxt_reuse_rx_data(rxr, cons, data); 1127 return NULL; 1128 } 1129 dma_addr -= bp->rx_dma_offset; 1130 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1131 bp->rx_dir); 1132 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1133 if (!skb) { 1134 page_pool_recycle_direct(rxr->page_pool, page); 1135 return NULL; 1136 } 1137 skb_mark_for_recycle(skb); 1138 skb_reserve(skb, bp->rx_offset); 1139 __skb_put(skb, len); 1140 1141 return skb; 1142 } 1143 1144 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1145 struct bnxt_rx_ring_info *rxr, 1146 u16 cons, void *data, u8 *data_ptr, 1147 dma_addr_t dma_addr, 1148 unsigned int offset_and_len) 1149 { 1150 unsigned int payload = offset_and_len >> 16; 1151 unsigned int len = offset_and_len & 0xffff; 1152 skb_frag_t *frag; 1153 struct page *page = data; 1154 u16 prod = rxr->rx_prod; 1155 struct sk_buff *skb; 1156 int off, err; 1157 1158 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1159 if (unlikely(err)) { 1160 bnxt_reuse_rx_data(rxr, cons, data); 1161 return NULL; 1162 } 1163 dma_addr -= bp->rx_dma_offset; 1164 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1165 bp->rx_dir); 1166 1167 if (unlikely(!payload)) 1168 payload = eth_get_headlen(bp->dev, data_ptr, len); 1169 1170 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1171 if (!skb) { 1172 page_pool_recycle_direct(rxr->page_pool, page); 1173 return NULL; 1174 } 1175 1176 skb_mark_for_recycle(skb); 1177 off = (void *)data_ptr - page_address(page); 1178 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1179 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1180 payload + NET_IP_ALIGN); 1181 1182 frag = &skb_shinfo(skb)->frags[0]; 1183 skb_frag_size_sub(frag, payload); 1184 skb_frag_off_add(frag, payload); 1185 skb->data_len -= payload; 1186 skb->tail += payload; 1187 1188 return skb; 1189 } 1190 1191 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1192 struct bnxt_rx_ring_info *rxr, u16 cons, 1193 void *data, u8 *data_ptr, 1194 dma_addr_t dma_addr, 1195 unsigned int offset_and_len) 1196 { 1197 u16 prod = rxr->rx_prod; 1198 struct sk_buff *skb; 1199 int err; 1200 1201 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1202 if (unlikely(err)) { 1203 bnxt_reuse_rx_data(rxr, cons, data); 1204 return NULL; 1205 } 1206 1207 skb = napi_build_skb(data, bp->rx_buf_size); 1208 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1209 bp->rx_dir); 1210 if (!skb) { 1211 page_pool_free_va(rxr->head_pool, data, true); 1212 return NULL; 1213 } 1214 1215 skb_mark_for_recycle(skb); 1216 skb_reserve(skb, bp->rx_offset); 1217 skb_put(skb, offset_and_len & 0xffff); 1218 return skb; 1219 } 1220 1221 static u32 __bnxt_rx_agg_pages(struct bnxt *bp, 1222 struct bnxt_cp_ring_info *cpr, 1223 struct skb_shared_info *shinfo, 1224 u16 idx, u32 agg_bufs, bool tpa, 1225 struct xdp_buff *xdp) 1226 { 1227 struct bnxt_napi *bnapi = cpr->bnapi; 1228 struct pci_dev *pdev = bp->pdev; 1229 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1230 u16 prod = rxr->rx_agg_prod; 1231 u32 i, total_frag_len = 0; 1232 bool p5_tpa = false; 1233 1234 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1235 p5_tpa = true; 1236 1237 for (i = 0; i < agg_bufs; i++) { 1238 skb_frag_t *frag = &shinfo->frags[i]; 1239 u16 cons, frag_len; 1240 struct rx_agg_cmp *agg; 1241 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1242 struct page *page; 1243 dma_addr_t mapping; 1244 1245 if (p5_tpa) 1246 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1247 else 1248 agg = bnxt_get_agg(bp, cpr, idx, i); 1249 cons = agg->rx_agg_cmp_opaque; 1250 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1251 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1252 1253 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1254 skb_frag_fill_page_desc(frag, cons_rx_buf->page, 1255 cons_rx_buf->offset, frag_len); 1256 shinfo->nr_frags = i + 1; 1257 __clear_bit(cons, rxr->rx_agg_bmap); 1258 1259 /* It is possible for bnxt_alloc_rx_page() to allocate 1260 * a sw_prod index that equals the cons index, so we 1261 * need to clear the cons entry now. 1262 */ 1263 mapping = cons_rx_buf->mapping; 1264 page = cons_rx_buf->page; 1265 cons_rx_buf->page = NULL; 1266 1267 if (xdp && page_is_pfmemalloc(page)) 1268 xdp_buff_set_frag_pfmemalloc(xdp); 1269 1270 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1271 --shinfo->nr_frags; 1272 cons_rx_buf->page = page; 1273 1274 /* Update prod since possibly some pages have been 1275 * allocated already. 1276 */ 1277 rxr->rx_agg_prod = prod; 1278 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1279 return 0; 1280 } 1281 1282 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1283 bp->rx_dir); 1284 1285 total_frag_len += frag_len; 1286 prod = NEXT_RX_AGG(prod); 1287 } 1288 rxr->rx_agg_prod = prod; 1289 return total_frag_len; 1290 } 1291 1292 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, 1293 struct bnxt_cp_ring_info *cpr, 1294 struct sk_buff *skb, u16 idx, 1295 u32 agg_bufs, bool tpa) 1296 { 1297 struct skb_shared_info *shinfo = skb_shinfo(skb); 1298 u32 total_frag_len = 0; 1299 1300 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, 1301 agg_bufs, tpa, NULL); 1302 if (!total_frag_len) { 1303 skb_mark_for_recycle(skb); 1304 dev_kfree_skb(skb); 1305 return NULL; 1306 } 1307 1308 skb->data_len += total_frag_len; 1309 skb->len += total_frag_len; 1310 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; 1311 return skb; 1312 } 1313 1314 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, 1315 struct bnxt_cp_ring_info *cpr, 1316 struct xdp_buff *xdp, u16 idx, 1317 u32 agg_bufs, bool tpa) 1318 { 1319 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1320 u32 total_frag_len = 0; 1321 1322 if (!xdp_buff_has_frags(xdp)) 1323 shinfo->nr_frags = 0; 1324 1325 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, 1326 idx, agg_bufs, tpa, xdp); 1327 if (total_frag_len) { 1328 xdp_buff_set_frags_flag(xdp); 1329 shinfo->nr_frags = agg_bufs; 1330 shinfo->xdp_frags_size = total_frag_len; 1331 } 1332 return total_frag_len; 1333 } 1334 1335 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1336 u8 agg_bufs, u32 *raw_cons) 1337 { 1338 u16 last; 1339 struct rx_agg_cmp *agg; 1340 1341 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1342 last = RING_CMP(*raw_cons); 1343 agg = (struct rx_agg_cmp *) 1344 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1345 return RX_AGG_CMP_VALID(agg, *raw_cons); 1346 } 1347 1348 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, 1349 unsigned int len, 1350 dma_addr_t mapping) 1351 { 1352 struct bnxt *bp = bnapi->bp; 1353 struct pci_dev *pdev = bp->pdev; 1354 struct sk_buff *skb; 1355 1356 skb = napi_alloc_skb(&bnapi->napi, len); 1357 if (!skb) 1358 return NULL; 1359 1360 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak, 1361 bp->rx_dir); 1362 1363 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1364 len + NET_IP_ALIGN); 1365 1366 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak, 1367 bp->rx_dir); 1368 1369 skb_put(skb, len); 1370 1371 return skb; 1372 } 1373 1374 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1375 unsigned int len, 1376 dma_addr_t mapping) 1377 { 1378 return bnxt_copy_data(bnapi, data, len, mapping); 1379 } 1380 1381 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, 1382 struct xdp_buff *xdp, 1383 unsigned int len, 1384 dma_addr_t mapping) 1385 { 1386 unsigned int metasize = 0; 1387 u8 *data = xdp->data; 1388 struct sk_buff *skb; 1389 1390 len = xdp->data_end - xdp->data_meta; 1391 metasize = xdp->data - xdp->data_meta; 1392 data = xdp->data_meta; 1393 1394 skb = bnxt_copy_data(bnapi, data, len, mapping); 1395 if (!skb) 1396 return skb; 1397 1398 if (metasize) { 1399 skb_metadata_set(skb, metasize); 1400 __skb_pull(skb, metasize); 1401 } 1402 1403 return skb; 1404 } 1405 1406 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1407 u32 *raw_cons, void *cmp) 1408 { 1409 struct rx_cmp *rxcmp = cmp; 1410 u32 tmp_raw_cons = *raw_cons; 1411 u8 cmp_type, agg_bufs = 0; 1412 1413 cmp_type = RX_CMP_TYPE(rxcmp); 1414 1415 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1416 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1417 RX_CMP_AGG_BUFS) >> 1418 RX_CMP_AGG_BUFS_SHIFT; 1419 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1420 struct rx_tpa_end_cmp *tpa_end = cmp; 1421 1422 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1423 return 0; 1424 1425 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1426 } 1427 1428 if (agg_bufs) { 1429 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1430 return -EBUSY; 1431 } 1432 *raw_cons = tmp_raw_cons; 1433 return 0; 1434 } 1435 1436 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1437 { 1438 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1439 u16 idx = agg_id & MAX_TPA_P5_MASK; 1440 1441 if (test_bit(idx, map->agg_idx_bmap)) 1442 idx = find_first_zero_bit(map->agg_idx_bmap, 1443 BNXT_AGG_IDX_BMAP_SIZE); 1444 __set_bit(idx, map->agg_idx_bmap); 1445 map->agg_id_tbl[agg_id] = idx; 1446 return idx; 1447 } 1448 1449 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1450 { 1451 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1452 1453 __clear_bit(idx, map->agg_idx_bmap); 1454 } 1455 1456 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1457 { 1458 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1459 1460 return map->agg_id_tbl[agg_id]; 1461 } 1462 1463 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, 1464 struct rx_tpa_start_cmp *tpa_start, 1465 struct rx_tpa_start_cmp_ext *tpa_start1) 1466 { 1467 tpa_info->cfa_code_valid = 1; 1468 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1469 tpa_info->vlan_valid = 0; 1470 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1471 tpa_info->vlan_valid = 1; 1472 tpa_info->metadata = 1473 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1474 } 1475 } 1476 1477 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, 1478 struct rx_tpa_start_cmp *tpa_start, 1479 struct rx_tpa_start_cmp_ext *tpa_start1) 1480 { 1481 tpa_info->vlan_valid = 0; 1482 if (TPA_START_VLAN_VALID(tpa_start)) { 1483 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 1484 u32 vlan_proto = ETH_P_8021Q; 1485 1486 tpa_info->vlan_valid = 1; 1487 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 1488 vlan_proto = ETH_P_8021AD; 1489 tpa_info->metadata = vlan_proto << 16 | 1490 TPA_START_METADATA0_TCI(tpa_start1); 1491 } 1492 } 1493 1494 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1495 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 1496 struct rx_tpa_start_cmp_ext *tpa_start1) 1497 { 1498 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1499 struct bnxt_tpa_info *tpa_info; 1500 u16 cons, prod, agg_id; 1501 struct rx_bd *prod_bd; 1502 dma_addr_t mapping; 1503 1504 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1505 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1506 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1507 } else { 1508 agg_id = TPA_START_AGG_ID(tpa_start); 1509 } 1510 cons = tpa_start->rx_tpa_start_cmp_opaque; 1511 prod = rxr->rx_prod; 1512 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1513 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1514 tpa_info = &rxr->rx_tpa[agg_id]; 1515 1516 if (unlikely(cons != rxr->rx_next_cons || 1517 TPA_START_ERROR(tpa_start))) { 1518 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1519 cons, rxr->rx_next_cons, 1520 TPA_START_ERROR_CODE(tpa_start1)); 1521 bnxt_sched_reset_rxr(bp, rxr); 1522 return; 1523 } 1524 prod_rx_buf->data = tpa_info->data; 1525 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1526 1527 mapping = tpa_info->mapping; 1528 prod_rx_buf->mapping = mapping; 1529 1530 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1531 1532 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1533 1534 tpa_info->data = cons_rx_buf->data; 1535 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1536 cons_rx_buf->data = NULL; 1537 tpa_info->mapping = cons_rx_buf->mapping; 1538 1539 tpa_info->len = 1540 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1541 RX_TPA_START_CMP_LEN_SHIFT; 1542 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1543 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1544 tpa_info->gso_type = SKB_GSO_TCPV4; 1545 if (TPA_START_IS_IPV6(tpa_start1)) 1546 tpa_info->gso_type = SKB_GSO_TCPV6; 1547 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1548 else if (!BNXT_CHIP_P4_PLUS(bp) && 1549 TPA_START_HASH_TYPE(tpa_start) == 3) 1550 tpa_info->gso_type = SKB_GSO_TCPV6; 1551 tpa_info->rss_hash = 1552 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1553 } else { 1554 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1555 tpa_info->gso_type = 0; 1556 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1557 } 1558 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1559 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1560 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 1561 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); 1562 else 1563 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 1564 tpa_info->agg_count = 0; 1565 1566 rxr->rx_prod = NEXT_RX(prod); 1567 cons = RING_RX(bp, NEXT_RX(cons)); 1568 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1569 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1570 1571 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1572 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1573 cons_rx_buf->data = NULL; 1574 } 1575 1576 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1577 { 1578 if (agg_bufs) 1579 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1580 } 1581 1582 #ifdef CONFIG_INET 1583 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1584 { 1585 struct udphdr *uh = NULL; 1586 1587 if (ip_proto == htons(ETH_P_IP)) { 1588 struct iphdr *iph = (struct iphdr *)skb->data; 1589 1590 if (iph->protocol == IPPROTO_UDP) 1591 uh = (struct udphdr *)(iph + 1); 1592 } else { 1593 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1594 1595 if (iph->nexthdr == IPPROTO_UDP) 1596 uh = (struct udphdr *)(iph + 1); 1597 } 1598 if (uh) { 1599 if (uh->check) 1600 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1601 else 1602 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1603 } 1604 } 1605 #endif 1606 1607 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1608 int payload_off, int tcp_ts, 1609 struct sk_buff *skb) 1610 { 1611 #ifdef CONFIG_INET 1612 struct tcphdr *th; 1613 int len, nw_off; 1614 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1615 u32 hdr_info = tpa_info->hdr_info; 1616 bool loopback = false; 1617 1618 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1619 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1620 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1621 1622 /* If the packet is an internal loopback packet, the offsets will 1623 * have an extra 4 bytes. 1624 */ 1625 if (inner_mac_off == 4) { 1626 loopback = true; 1627 } else if (inner_mac_off > 4) { 1628 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1629 ETH_HLEN - 2)); 1630 1631 /* We only support inner iPv4/ipv6. If we don't see the 1632 * correct protocol ID, it must be a loopback packet where 1633 * the offsets are off by 4. 1634 */ 1635 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1636 loopback = true; 1637 } 1638 if (loopback) { 1639 /* internal loopback packet, subtract all offsets by 4 */ 1640 inner_ip_off -= 4; 1641 inner_mac_off -= 4; 1642 outer_ip_off -= 4; 1643 } 1644 1645 nw_off = inner_ip_off - ETH_HLEN; 1646 skb_set_network_header(skb, nw_off); 1647 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1648 struct ipv6hdr *iph = ipv6_hdr(skb); 1649 1650 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1651 len = skb->len - skb_transport_offset(skb); 1652 th = tcp_hdr(skb); 1653 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1654 } else { 1655 struct iphdr *iph = ip_hdr(skb); 1656 1657 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1658 len = skb->len - skb_transport_offset(skb); 1659 th = tcp_hdr(skb); 1660 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1661 } 1662 1663 if (inner_mac_off) { /* tunnel */ 1664 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1665 ETH_HLEN - 2)); 1666 1667 bnxt_gro_tunnel(skb, proto); 1668 } 1669 #endif 1670 return skb; 1671 } 1672 1673 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1674 int payload_off, int tcp_ts, 1675 struct sk_buff *skb) 1676 { 1677 #ifdef CONFIG_INET 1678 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1679 u32 hdr_info = tpa_info->hdr_info; 1680 int iphdr_len, nw_off; 1681 1682 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1683 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1684 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1685 1686 nw_off = inner_ip_off - ETH_HLEN; 1687 skb_set_network_header(skb, nw_off); 1688 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1689 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1690 skb_set_transport_header(skb, nw_off + iphdr_len); 1691 1692 if (inner_mac_off) { /* tunnel */ 1693 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1694 ETH_HLEN - 2)); 1695 1696 bnxt_gro_tunnel(skb, proto); 1697 } 1698 #endif 1699 return skb; 1700 } 1701 1702 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1703 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1704 1705 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1706 int payload_off, int tcp_ts, 1707 struct sk_buff *skb) 1708 { 1709 #ifdef CONFIG_INET 1710 struct tcphdr *th; 1711 int len, nw_off, tcp_opt_len = 0; 1712 1713 if (tcp_ts) 1714 tcp_opt_len = 12; 1715 1716 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1717 struct iphdr *iph; 1718 1719 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1720 ETH_HLEN; 1721 skb_set_network_header(skb, nw_off); 1722 iph = ip_hdr(skb); 1723 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1724 len = skb->len - skb_transport_offset(skb); 1725 th = tcp_hdr(skb); 1726 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1727 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1728 struct ipv6hdr *iph; 1729 1730 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1731 ETH_HLEN; 1732 skb_set_network_header(skb, nw_off); 1733 iph = ipv6_hdr(skb); 1734 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1735 len = skb->len - skb_transport_offset(skb); 1736 th = tcp_hdr(skb); 1737 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1738 } else { 1739 dev_kfree_skb_any(skb); 1740 return NULL; 1741 } 1742 1743 if (nw_off) /* tunnel */ 1744 bnxt_gro_tunnel(skb, skb->protocol); 1745 #endif 1746 return skb; 1747 } 1748 1749 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1750 struct bnxt_tpa_info *tpa_info, 1751 struct rx_tpa_end_cmp *tpa_end, 1752 struct rx_tpa_end_cmp_ext *tpa_end1, 1753 struct sk_buff *skb) 1754 { 1755 #ifdef CONFIG_INET 1756 int payload_off; 1757 u16 segs; 1758 1759 segs = TPA_END_TPA_SEGS(tpa_end); 1760 if (segs == 1) 1761 return skb; 1762 1763 NAPI_GRO_CB(skb)->count = segs; 1764 skb_shinfo(skb)->gso_size = 1765 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1766 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1767 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1768 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1769 else 1770 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1771 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1772 if (likely(skb)) 1773 tcp_gro_complete(skb); 1774 #endif 1775 return skb; 1776 } 1777 1778 /* Given the cfa_code of a received packet determine which 1779 * netdev (vf-rep or PF) the packet is destined to. 1780 */ 1781 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1782 { 1783 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1784 1785 /* if vf-rep dev is NULL, the must belongs to the PF */ 1786 return dev ? dev : bp->dev; 1787 } 1788 1789 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1790 struct bnxt_cp_ring_info *cpr, 1791 u32 *raw_cons, 1792 struct rx_tpa_end_cmp *tpa_end, 1793 struct rx_tpa_end_cmp_ext *tpa_end1, 1794 u8 *event) 1795 { 1796 struct bnxt_napi *bnapi = cpr->bnapi; 1797 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1798 struct net_device *dev = bp->dev; 1799 u8 *data_ptr, agg_bufs; 1800 unsigned int len; 1801 struct bnxt_tpa_info *tpa_info; 1802 dma_addr_t mapping; 1803 struct sk_buff *skb; 1804 u16 idx = 0, agg_id; 1805 void *data; 1806 bool gro; 1807 1808 if (unlikely(bnapi->in_reset)) { 1809 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1810 1811 if (rc < 0) 1812 return ERR_PTR(-EBUSY); 1813 return NULL; 1814 } 1815 1816 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1817 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1818 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1819 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1820 tpa_info = &rxr->rx_tpa[agg_id]; 1821 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1822 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1823 agg_bufs, tpa_info->agg_count); 1824 agg_bufs = tpa_info->agg_count; 1825 } 1826 tpa_info->agg_count = 0; 1827 *event |= BNXT_AGG_EVENT; 1828 bnxt_free_agg_idx(rxr, agg_id); 1829 idx = agg_id; 1830 gro = !!(bp->flags & BNXT_FLAG_GRO); 1831 } else { 1832 agg_id = TPA_END_AGG_ID(tpa_end); 1833 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1834 tpa_info = &rxr->rx_tpa[agg_id]; 1835 idx = RING_CMP(*raw_cons); 1836 if (agg_bufs) { 1837 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1838 return ERR_PTR(-EBUSY); 1839 1840 *event |= BNXT_AGG_EVENT; 1841 idx = NEXT_CMP(idx); 1842 } 1843 gro = !!TPA_END_GRO(tpa_end); 1844 } 1845 data = tpa_info->data; 1846 data_ptr = tpa_info->data_ptr; 1847 prefetch(data_ptr); 1848 len = tpa_info->len; 1849 mapping = tpa_info->mapping; 1850 1851 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1852 bnxt_abort_tpa(cpr, idx, agg_bufs); 1853 if (agg_bufs > MAX_SKB_FRAGS) 1854 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1855 agg_bufs, (int)MAX_SKB_FRAGS); 1856 return NULL; 1857 } 1858 1859 if (len <= bp->rx_copybreak) { 1860 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1861 if (!skb) { 1862 bnxt_abort_tpa(cpr, idx, agg_bufs); 1863 cpr->sw_stats->rx.rx_oom_discards += 1; 1864 return NULL; 1865 } 1866 } else { 1867 u8 *new_data; 1868 dma_addr_t new_mapping; 1869 1870 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr, 1871 GFP_ATOMIC); 1872 if (!new_data) { 1873 bnxt_abort_tpa(cpr, idx, agg_bufs); 1874 cpr->sw_stats->rx.rx_oom_discards += 1; 1875 return NULL; 1876 } 1877 1878 tpa_info->data = new_data; 1879 tpa_info->data_ptr = new_data + bp->rx_offset; 1880 tpa_info->mapping = new_mapping; 1881 1882 skb = napi_build_skb(data, bp->rx_buf_size); 1883 dma_sync_single_for_cpu(&bp->pdev->dev, mapping, 1884 bp->rx_buf_use_size, bp->rx_dir); 1885 1886 if (!skb) { 1887 page_pool_free_va(rxr->head_pool, data, true); 1888 bnxt_abort_tpa(cpr, idx, agg_bufs); 1889 cpr->sw_stats->rx.rx_oom_discards += 1; 1890 return NULL; 1891 } 1892 skb_mark_for_recycle(skb); 1893 skb_reserve(skb, bp->rx_offset); 1894 skb_put(skb, len); 1895 } 1896 1897 if (agg_bufs) { 1898 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); 1899 if (!skb) { 1900 /* Page reuse already handled by bnxt_rx_pages(). */ 1901 cpr->sw_stats->rx.rx_oom_discards += 1; 1902 return NULL; 1903 } 1904 } 1905 1906 if (tpa_info->cfa_code_valid) 1907 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); 1908 skb->protocol = eth_type_trans(skb, dev); 1909 1910 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1911 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1912 1913 if (tpa_info->vlan_valid && 1914 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1915 __be16 vlan_proto = htons(tpa_info->metadata >> 1916 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1917 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1918 1919 if (eth_type_vlan(vlan_proto)) { 1920 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1921 } else { 1922 dev_kfree_skb(skb); 1923 return NULL; 1924 } 1925 } 1926 1927 skb_checksum_none_assert(skb); 1928 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1929 skb->ip_summed = CHECKSUM_UNNECESSARY; 1930 skb->csum_level = 1931 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1932 } 1933 1934 if (gro) 1935 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1936 1937 return skb; 1938 } 1939 1940 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1941 struct rx_agg_cmp *rx_agg) 1942 { 1943 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1944 struct bnxt_tpa_info *tpa_info; 1945 1946 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1947 tpa_info = &rxr->rx_tpa[agg_id]; 1948 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1949 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1950 } 1951 1952 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1953 struct sk_buff *skb) 1954 { 1955 skb_mark_for_recycle(skb); 1956 1957 if (skb->dev != bp->dev) { 1958 /* this packet belongs to a vf-rep */ 1959 bnxt_vf_rep_rx(bp, skb); 1960 return; 1961 } 1962 skb_record_rx_queue(skb, bnapi->index); 1963 napi_gro_receive(&bnapi->napi, skb); 1964 } 1965 1966 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, 1967 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) 1968 { 1969 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 1970 1971 if (BNXT_PTP_RX_TS_VALID(flags)) 1972 goto ts_valid; 1973 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) 1974 return false; 1975 1976 ts_valid: 1977 *cmpl_ts = ts; 1978 return true; 1979 } 1980 1981 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, 1982 struct rx_cmp *rxcmp, 1983 struct rx_cmp_ext *rxcmp1) 1984 { 1985 __be16 vlan_proto; 1986 u16 vtag; 1987 1988 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1989 __le32 flags2 = rxcmp1->rx_cmp_flags2; 1990 u32 meta_data; 1991 1992 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 1993 return skb; 1994 1995 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1996 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1997 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 1998 if (eth_type_vlan(vlan_proto)) 1999 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 2000 else 2001 goto vlan_err; 2002 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2003 if (RX_CMP_VLAN_VALID(rxcmp)) { 2004 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 2005 2006 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 2007 vlan_proto = htons(ETH_P_8021Q); 2008 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 2009 vlan_proto = htons(ETH_P_8021AD); 2010 else 2011 goto vlan_err; 2012 vtag = RX_CMP_METADATA0_TCI(rxcmp1); 2013 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 2014 } 2015 } 2016 return skb; 2017 vlan_err: 2018 dev_kfree_skb(skb); 2019 return NULL; 2020 } 2021 2022 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, 2023 struct rx_cmp *rxcmp) 2024 { 2025 u8 ext_op; 2026 2027 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); 2028 switch (ext_op) { 2029 case EXT_OP_INNER_4: 2030 case EXT_OP_OUTER_4: 2031 case EXT_OP_INNFL_3: 2032 case EXT_OP_OUTFL_3: 2033 return PKT_HASH_TYPE_L4; 2034 default: 2035 return PKT_HASH_TYPE_L3; 2036 } 2037 } 2038 2039 /* returns the following: 2040 * 1 - 1 packet successfully received 2041 * 0 - successful TPA_START, packet not completed yet 2042 * -EBUSY - completion ring does not have all the agg buffers yet 2043 * -ENOMEM - packet aborted due to out of memory 2044 * -EIO - packet aborted due to hw error indicated in BD 2045 */ 2046 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2047 u32 *raw_cons, u8 *event) 2048 { 2049 struct bnxt_napi *bnapi = cpr->bnapi; 2050 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2051 struct net_device *dev = bp->dev; 2052 struct rx_cmp *rxcmp; 2053 struct rx_cmp_ext *rxcmp1; 2054 u32 tmp_raw_cons = *raw_cons; 2055 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 2056 struct skb_shared_info *sinfo; 2057 struct bnxt_sw_rx_bd *rx_buf; 2058 unsigned int len; 2059 u8 *data_ptr, agg_bufs, cmp_type; 2060 bool xdp_active = false; 2061 dma_addr_t dma_addr; 2062 struct sk_buff *skb; 2063 struct xdp_buff xdp; 2064 u32 flags, misc; 2065 u32 cmpl_ts; 2066 void *data; 2067 int rc = 0; 2068 2069 rxcmp = (struct rx_cmp *) 2070 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2071 2072 cmp_type = RX_CMP_TYPE(rxcmp); 2073 2074 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 2075 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 2076 goto next_rx_no_prod_no_len; 2077 } 2078 2079 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2080 cp_cons = RING_CMP(tmp_raw_cons); 2081 rxcmp1 = (struct rx_cmp_ext *) 2082 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2083 2084 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2085 return -EBUSY; 2086 2087 /* The valid test of the entry must be done first before 2088 * reading any further. 2089 */ 2090 dma_rmb(); 2091 prod = rxr->rx_prod; 2092 2093 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 2094 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2095 bnxt_tpa_start(bp, rxr, cmp_type, 2096 (struct rx_tpa_start_cmp *)rxcmp, 2097 (struct rx_tpa_start_cmp_ext *)rxcmp1); 2098 2099 *event |= BNXT_RX_EVENT; 2100 goto next_rx_no_prod_no_len; 2101 2102 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2103 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 2104 (struct rx_tpa_end_cmp *)rxcmp, 2105 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 2106 2107 if (IS_ERR(skb)) 2108 return -EBUSY; 2109 2110 rc = -ENOMEM; 2111 if (likely(skb)) { 2112 bnxt_deliver_skb(bp, bnapi, skb); 2113 rc = 1; 2114 } 2115 *event |= BNXT_RX_EVENT; 2116 goto next_rx_no_prod_no_len; 2117 } 2118 2119 cons = rxcmp->rx_cmp_opaque; 2120 if (unlikely(cons != rxr->rx_next_cons)) { 2121 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 2122 2123 /* 0xffff is forced error, don't print it */ 2124 if (rxr->rx_next_cons != 0xffff) 2125 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 2126 cons, rxr->rx_next_cons); 2127 bnxt_sched_reset_rxr(bp, rxr); 2128 if (rc1) 2129 return rc1; 2130 goto next_rx_no_prod_no_len; 2131 } 2132 rx_buf = &rxr->rx_buf_ring[cons]; 2133 data = rx_buf->data; 2134 data_ptr = rx_buf->data_ptr; 2135 prefetch(data_ptr); 2136 2137 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 2138 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 2139 2140 if (agg_bufs) { 2141 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 2142 return -EBUSY; 2143 2144 cp_cons = NEXT_CMP(cp_cons); 2145 *event |= BNXT_AGG_EVENT; 2146 } 2147 *event |= BNXT_RX_EVENT; 2148 2149 rx_buf->data = NULL; 2150 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 2151 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 2152 2153 bnxt_reuse_rx_data(rxr, cons, data); 2154 if (agg_bufs) 2155 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 2156 false); 2157 2158 rc = -EIO; 2159 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 2160 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; 2161 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 2162 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 2163 netdev_warn_once(bp->dev, "RX buffer error %x\n", 2164 rx_err); 2165 bnxt_sched_reset_rxr(bp, rxr); 2166 } 2167 } 2168 goto next_rx_no_len; 2169 } 2170 2171 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 2172 len = flags >> RX_CMP_LEN_SHIFT; 2173 dma_addr = rx_buf->mapping; 2174 2175 if (bnxt_xdp_attached(bp, rxr)) { 2176 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 2177 if (agg_bufs) { 2178 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, 2179 cp_cons, agg_bufs, 2180 false); 2181 if (!frag_len) 2182 goto oom_next_rx; 2183 2184 } 2185 xdp_active = true; 2186 } 2187 2188 if (xdp_active) { 2189 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { 2190 rc = 1; 2191 goto next_rx; 2192 } 2193 if (xdp_buff_has_frags(&xdp)) { 2194 sinfo = xdp_get_shared_info_from_buff(&xdp); 2195 agg_bufs = sinfo->nr_frags; 2196 } else { 2197 agg_bufs = 0; 2198 } 2199 } 2200 2201 if (len <= bp->rx_copybreak) { 2202 if (!xdp_active) 2203 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 2204 else 2205 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); 2206 bnxt_reuse_rx_data(rxr, cons, data); 2207 if (!skb) { 2208 if (agg_bufs) { 2209 if (!xdp_active) 2210 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 2211 agg_bufs, false); 2212 else 2213 bnxt_xdp_buff_frags_free(rxr, &xdp); 2214 } 2215 goto oom_next_rx; 2216 } 2217 } else { 2218 u32 payload; 2219 2220 if (rx_buf->data_ptr == data_ptr) 2221 payload = misc & RX_CMP_PAYLOAD_OFFSET; 2222 else 2223 payload = 0; 2224 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 2225 payload | len); 2226 if (!skb) 2227 goto oom_next_rx; 2228 } 2229 2230 if (agg_bufs) { 2231 if (!xdp_active) { 2232 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); 2233 if (!skb) 2234 goto oom_next_rx; 2235 } else { 2236 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, 2237 rxr->page_pool, &xdp); 2238 if (!skb) { 2239 /* we should be able to free the old skb here */ 2240 bnxt_xdp_buff_frags_free(rxr, &xdp); 2241 goto oom_next_rx; 2242 } 2243 } 2244 } 2245 2246 if (RX_CMP_HASH_VALID(rxcmp)) { 2247 enum pkt_hash_types type; 2248 2249 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2250 type = bnxt_rss_ext_op(bp, rxcmp); 2251 } else { 2252 u32 itypes = RX_CMP_ITYPES(rxcmp); 2253 2254 if (itypes == RX_CMP_FLAGS_ITYPE_TCP || 2255 itypes == RX_CMP_FLAGS_ITYPE_UDP) 2256 type = PKT_HASH_TYPE_L4; 2257 else 2258 type = PKT_HASH_TYPE_L3; 2259 } 2260 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 2261 } 2262 2263 if (cmp_type == CMP_TYPE_RX_L2_CMP) 2264 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); 2265 skb->protocol = eth_type_trans(skb, dev); 2266 2267 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { 2268 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 2269 if (!skb) 2270 goto next_rx; 2271 } 2272 2273 skb_checksum_none_assert(skb); 2274 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2275 if (dev->features & NETIF_F_RXCSUM) { 2276 skb->ip_summed = CHECKSUM_UNNECESSARY; 2277 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2278 } 2279 } else { 2280 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2281 if (dev->features & NETIF_F_RXCSUM) 2282 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; 2283 } 2284 } 2285 2286 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { 2287 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2288 u64 ns, ts; 2289 2290 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2291 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2292 2293 ns = bnxt_timecounter_cyc2time(ptp, ts); 2294 memset(skb_hwtstamps(skb), 0, 2295 sizeof(*skb_hwtstamps(skb))); 2296 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2297 } 2298 } 2299 } 2300 bnxt_deliver_skb(bp, bnapi, skb); 2301 rc = 1; 2302 2303 next_rx: 2304 cpr->rx_packets += 1; 2305 cpr->rx_bytes += len; 2306 2307 next_rx_no_len: 2308 rxr->rx_prod = NEXT_RX(prod); 2309 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2310 2311 next_rx_no_prod_no_len: 2312 *raw_cons = tmp_raw_cons; 2313 2314 return rc; 2315 2316 oom_next_rx: 2317 cpr->sw_stats->rx.rx_oom_discards += 1; 2318 rc = -ENOMEM; 2319 goto next_rx; 2320 } 2321 2322 /* In netpoll mode, if we are using a combined completion ring, we need to 2323 * discard the rx packets and recycle the buffers. 2324 */ 2325 static int bnxt_force_rx_discard(struct bnxt *bp, 2326 struct bnxt_cp_ring_info *cpr, 2327 u32 *raw_cons, u8 *event) 2328 { 2329 u32 tmp_raw_cons = *raw_cons; 2330 struct rx_cmp_ext *rxcmp1; 2331 struct rx_cmp *rxcmp; 2332 u16 cp_cons; 2333 u8 cmp_type; 2334 int rc; 2335 2336 cp_cons = RING_CMP(tmp_raw_cons); 2337 rxcmp = (struct rx_cmp *) 2338 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2339 2340 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2341 cp_cons = RING_CMP(tmp_raw_cons); 2342 rxcmp1 = (struct rx_cmp_ext *) 2343 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2344 2345 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2346 return -EBUSY; 2347 2348 /* The valid test of the entry must be done first before 2349 * reading any further. 2350 */ 2351 dma_rmb(); 2352 cmp_type = RX_CMP_TYPE(rxcmp); 2353 if (cmp_type == CMP_TYPE_RX_L2_CMP || 2354 cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2355 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2356 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2357 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2358 struct rx_tpa_end_cmp_ext *tpa_end1; 2359 2360 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2361 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2362 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2363 } 2364 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2365 if (rc && rc != -EBUSY) 2366 cpr->sw_stats->rx.rx_netpoll_discards += 1; 2367 return rc; 2368 } 2369 2370 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2371 { 2372 struct bnxt_fw_health *fw_health = bp->fw_health; 2373 u32 reg = fw_health->regs[reg_idx]; 2374 u32 reg_type, reg_off, val = 0; 2375 2376 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2377 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2378 switch (reg_type) { 2379 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2380 pci_read_config_dword(bp->pdev, reg_off, &val); 2381 break; 2382 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2383 reg_off = fw_health->mapped_regs[reg_idx]; 2384 fallthrough; 2385 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2386 val = readl(bp->bar0 + reg_off); 2387 break; 2388 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2389 val = readl(bp->bar1 + reg_off); 2390 break; 2391 } 2392 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2393 val &= fw_health->fw_reset_inprog_reg_mask; 2394 return val; 2395 } 2396 2397 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2398 { 2399 int i; 2400 2401 for (i = 0; i < bp->rx_nr_rings; i++) { 2402 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2403 struct bnxt_ring_grp_info *grp_info; 2404 2405 grp_info = &bp->grp_info[grp_idx]; 2406 if (grp_info->agg_fw_ring_id == ring_id) 2407 return grp_idx; 2408 } 2409 return INVALID_HW_RING_ID; 2410 } 2411 2412 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2413 { 2414 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2415 2416 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 2417 return link_info->force_link_speed2; 2418 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2419 return link_info->force_pam4_link_speed; 2420 return link_info->force_link_speed; 2421 } 2422 2423 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2424 { 2425 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2426 2427 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2428 link_info->req_link_speed = link_info->force_link_speed2; 2429 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2430 switch (link_info->req_link_speed) { 2431 case BNXT_LINK_SPEED_50GB_PAM4: 2432 case BNXT_LINK_SPEED_100GB_PAM4: 2433 case BNXT_LINK_SPEED_200GB_PAM4: 2434 case BNXT_LINK_SPEED_400GB_PAM4: 2435 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2436 break; 2437 case BNXT_LINK_SPEED_100GB_PAM4_112: 2438 case BNXT_LINK_SPEED_200GB_PAM4_112: 2439 case BNXT_LINK_SPEED_400GB_PAM4_112: 2440 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; 2441 break; 2442 default: 2443 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2444 } 2445 return; 2446 } 2447 link_info->req_link_speed = link_info->force_link_speed; 2448 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2449 if (link_info->force_pam4_link_speed) { 2450 link_info->req_link_speed = link_info->force_pam4_link_speed; 2451 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2452 } 2453 } 2454 2455 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2456 { 2457 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2458 2459 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2460 link_info->advertising = link_info->auto_link_speeds2; 2461 return; 2462 } 2463 link_info->advertising = link_info->auto_link_speeds; 2464 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2465 } 2466 2467 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2468 { 2469 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2470 2471 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2472 if (link_info->req_link_speed != link_info->force_link_speed2) 2473 return true; 2474 return false; 2475 } 2476 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2477 link_info->req_link_speed != link_info->force_link_speed) 2478 return true; 2479 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2480 link_info->req_link_speed != link_info->force_pam4_link_speed) 2481 return true; 2482 return false; 2483 } 2484 2485 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2486 { 2487 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2488 2489 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2490 if (link_info->advertising != link_info->auto_link_speeds2) 2491 return true; 2492 return false; 2493 } 2494 if (link_info->advertising != link_info->auto_link_speeds || 2495 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2496 return true; 2497 return false; 2498 } 2499 2500 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type) 2501 { 2502 u32 flags = bp->ctx->ctx_arr[type].flags; 2503 2504 return (flags & BNXT_CTX_MEM_TYPE_VALID) && 2505 ((flags & BNXT_CTX_MEM_FW_TRACE) || 2506 (flags & BNXT_CTX_MEM_FW_BIN_TRACE)); 2507 } 2508 2509 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm) 2510 { 2511 u32 mem_size, pages, rem_bytes, magic_byte_offset; 2512 u16 trace_type = bnxt_bstore_to_trace[ctxm->type]; 2513 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 2514 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl; 2515 struct bnxt_bs_trace_info *bs_trace; 2516 int last_pg; 2517 2518 if (ctxm->instance_bmap && ctxm->instance_bmap > 1) 2519 return; 2520 2521 mem_size = ctxm->max_entries * ctxm->entry_size; 2522 rem_bytes = mem_size % BNXT_PAGE_SIZE; 2523 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 2524 2525 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1); 2526 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1; 2527 2528 rmem = &ctx_pg[0].ring_mem; 2529 bs_trace = &bp->bs_trace[trace_type]; 2530 bs_trace->ctx_type = ctxm->type; 2531 bs_trace->trace_type = trace_type; 2532 if (pages > MAX_CTX_PAGES) { 2533 int last_pg_dir = rmem->nr_pages - 1; 2534 2535 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem; 2536 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg]; 2537 } else { 2538 bs_trace->magic_byte = rmem->pg_arr[last_pg]; 2539 } 2540 bs_trace->magic_byte += magic_byte_offset; 2541 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE; 2542 } 2543 2544 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \ 2545 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\ 2546 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT) 2547 2548 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \ 2549 (((data2) & \ 2550 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\ 2551 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT) 2552 2553 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2554 ((data2) & \ 2555 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2556 2557 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2558 (((data2) & \ 2559 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2560 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2561 2562 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2563 ((data1) & \ 2564 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2565 2566 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2567 (((data1) & \ 2568 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2569 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2570 2571 /* Return true if the workqueue has to be scheduled */ 2572 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2573 { 2574 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2575 2576 switch (err_type) { 2577 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2578 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2579 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2580 break; 2581 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2582 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2583 break; 2584 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2585 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2586 break; 2587 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2588 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2589 char *threshold_type; 2590 bool notify = false; 2591 char *dir_str; 2592 2593 switch (type) { 2594 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2595 threshold_type = "warning"; 2596 break; 2597 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2598 threshold_type = "critical"; 2599 break; 2600 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2601 threshold_type = "fatal"; 2602 break; 2603 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2604 threshold_type = "shutdown"; 2605 break; 2606 default: 2607 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2608 return false; 2609 } 2610 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2611 dir_str = "above"; 2612 notify = true; 2613 } else { 2614 dir_str = "below"; 2615 } 2616 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2617 dir_str, threshold_type); 2618 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2619 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2620 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2621 if (notify) { 2622 bp->thermal_threshold_type = type; 2623 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2624 return true; 2625 } 2626 return false; 2627 } 2628 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: 2629 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); 2630 break; 2631 default: 2632 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2633 err_type); 2634 break; 2635 } 2636 return false; 2637 } 2638 2639 #define BNXT_GET_EVENT_PORT(data) \ 2640 ((data) & \ 2641 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2642 2643 #define BNXT_EVENT_RING_TYPE(data2) \ 2644 ((data2) & \ 2645 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2646 2647 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2648 (BNXT_EVENT_RING_TYPE(data2) == \ 2649 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2650 2651 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2652 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2653 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2654 2655 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2656 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2657 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2658 2659 #define BNXT_PHC_BITS 48 2660 2661 static int bnxt_async_event_process(struct bnxt *bp, 2662 struct hwrm_async_event_cmpl *cmpl) 2663 { 2664 u16 event_id = le16_to_cpu(cmpl->event_id); 2665 u32 data1 = le32_to_cpu(cmpl->event_data1); 2666 u32 data2 = le32_to_cpu(cmpl->event_data2); 2667 2668 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2669 event_id, data1, data2); 2670 2671 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2672 switch (event_id) { 2673 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2674 struct bnxt_link_info *link_info = &bp->link_info; 2675 2676 if (BNXT_VF(bp)) 2677 goto async_event_process_exit; 2678 2679 /* print unsupported speed warning in forced speed mode only */ 2680 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2681 (data1 & 0x20000)) { 2682 u16 fw_speed = bnxt_get_force_speed(link_info); 2683 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2684 2685 if (speed != SPEED_UNKNOWN) 2686 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2687 speed); 2688 } 2689 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2690 } 2691 fallthrough; 2692 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2693 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2694 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2695 fallthrough; 2696 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2697 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2698 break; 2699 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2700 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2701 break; 2702 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2703 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2704 2705 if (BNXT_VF(bp)) 2706 break; 2707 2708 if (bp->pf.port_id != port_id) 2709 break; 2710 2711 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2712 break; 2713 } 2714 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2715 if (BNXT_PF(bp)) 2716 goto async_event_process_exit; 2717 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2718 break; 2719 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2720 char *type_str = "Solicited"; 2721 2722 if (!bp->fw_health) 2723 goto async_event_process_exit; 2724 2725 bp->fw_reset_timestamp = jiffies; 2726 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2727 if (!bp->fw_reset_min_dsecs) 2728 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2729 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2730 if (!bp->fw_reset_max_dsecs) 2731 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2732 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2733 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2734 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2735 type_str = "Fatal"; 2736 bp->fw_health->fatalities++; 2737 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2738 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2739 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2740 type_str = "Non-fatal"; 2741 bp->fw_health->survivals++; 2742 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2743 } 2744 netif_warn(bp, hw, bp->dev, 2745 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2746 type_str, data1, data2, 2747 bp->fw_reset_min_dsecs * 100, 2748 bp->fw_reset_max_dsecs * 100); 2749 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2750 break; 2751 } 2752 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2753 struct bnxt_fw_health *fw_health = bp->fw_health; 2754 char *status_desc = "healthy"; 2755 u32 status; 2756 2757 if (!fw_health) 2758 goto async_event_process_exit; 2759 2760 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2761 fw_health->enabled = false; 2762 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2763 break; 2764 } 2765 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2766 fw_health->tmr_multiplier = 2767 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2768 bp->current_interval * 10); 2769 fw_health->tmr_counter = fw_health->tmr_multiplier; 2770 if (!fw_health->enabled) 2771 fw_health->last_fw_heartbeat = 2772 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2773 fw_health->last_fw_reset_cnt = 2774 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2775 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2776 if (status != BNXT_FW_STATUS_HEALTHY) 2777 status_desc = "unhealthy"; 2778 netif_info(bp, drv, bp->dev, 2779 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2780 fw_health->primary ? "primary" : "backup", status, 2781 status_desc, fw_health->last_fw_reset_cnt); 2782 if (!fw_health->enabled) { 2783 /* Make sure tmr_counter is set and visible to 2784 * bnxt_health_check() before setting enabled to true. 2785 */ 2786 smp_wmb(); 2787 fw_health->enabled = true; 2788 } 2789 goto async_event_process_exit; 2790 } 2791 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2792 netif_notice(bp, hw, bp->dev, 2793 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2794 data1, data2); 2795 goto async_event_process_exit; 2796 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2797 struct bnxt_rx_ring_info *rxr; 2798 u16 grp_idx; 2799 2800 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2801 goto async_event_process_exit; 2802 2803 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2804 BNXT_EVENT_RING_TYPE(data2), data1); 2805 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2806 goto async_event_process_exit; 2807 2808 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2809 if (grp_idx == INVALID_HW_RING_ID) { 2810 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2811 data1); 2812 goto async_event_process_exit; 2813 } 2814 rxr = bp->bnapi[grp_idx]->rx_ring; 2815 bnxt_sched_reset_rxr(bp, rxr); 2816 goto async_event_process_exit; 2817 } 2818 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2819 struct bnxt_fw_health *fw_health = bp->fw_health; 2820 2821 netif_notice(bp, hw, bp->dev, 2822 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2823 data1, data2); 2824 if (fw_health) { 2825 fw_health->echo_req_data1 = data1; 2826 fw_health->echo_req_data2 = data2; 2827 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2828 break; 2829 } 2830 goto async_event_process_exit; 2831 } 2832 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2833 bnxt_ptp_pps_event(bp, data1, data2); 2834 goto async_event_process_exit; 2835 } 2836 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2837 if (bnxt_event_error_report(bp, data1, data2)) 2838 break; 2839 goto async_event_process_exit; 2840 } 2841 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2842 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2843 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2844 if (BNXT_PTP_USE_RTC(bp)) { 2845 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2846 unsigned long flags; 2847 u64 ns; 2848 2849 if (!ptp) 2850 goto async_event_process_exit; 2851 2852 bnxt_ptp_update_current_time(bp); 2853 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2854 BNXT_PHC_BITS) | ptp->current_time); 2855 write_seqlock_irqsave(&ptp->ptp_lock, flags); 2856 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2857 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 2858 } 2859 break; 2860 } 2861 goto async_event_process_exit; 2862 } 2863 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2864 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2865 2866 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2867 goto async_event_process_exit; 2868 } 2869 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: { 2870 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1); 2871 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2); 2872 2873 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset); 2874 goto async_event_process_exit; 2875 } 2876 default: 2877 goto async_event_process_exit; 2878 } 2879 __bnxt_queue_sp_work(bp); 2880 async_event_process_exit: 2881 bnxt_ulp_async_events(bp, cmpl); 2882 return 0; 2883 } 2884 2885 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2886 { 2887 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2888 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2889 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2890 (struct hwrm_fwd_req_cmpl *)txcmp; 2891 2892 switch (cmpl_type) { 2893 case CMPL_BASE_TYPE_HWRM_DONE: 2894 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2895 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2896 break; 2897 2898 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2899 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2900 2901 if ((vf_id < bp->pf.first_vf_id) || 2902 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2903 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2904 vf_id); 2905 return -EINVAL; 2906 } 2907 2908 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2909 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2910 break; 2911 2912 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2913 bnxt_async_event_process(bp, 2914 (struct hwrm_async_event_cmpl *)txcmp); 2915 break; 2916 2917 default: 2918 break; 2919 } 2920 2921 return 0; 2922 } 2923 2924 static bool bnxt_vnic_is_active(struct bnxt *bp) 2925 { 2926 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 2927 2928 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0; 2929 } 2930 2931 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2932 { 2933 struct bnxt_napi *bnapi = dev_instance; 2934 struct bnxt *bp = bnapi->bp; 2935 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2936 u32 cons = RING_CMP(cpr->cp_raw_cons); 2937 2938 cpr->event_ctr++; 2939 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2940 napi_schedule(&bnapi->napi); 2941 return IRQ_HANDLED; 2942 } 2943 2944 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2945 { 2946 u32 raw_cons = cpr->cp_raw_cons; 2947 u16 cons = RING_CMP(raw_cons); 2948 struct tx_cmp *txcmp; 2949 2950 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2951 2952 return TX_CMP_VALID(txcmp, raw_cons); 2953 } 2954 2955 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2956 int budget) 2957 { 2958 struct bnxt_napi *bnapi = cpr->bnapi; 2959 u32 raw_cons = cpr->cp_raw_cons; 2960 u32 cons; 2961 int rx_pkts = 0; 2962 u8 event = 0; 2963 struct tx_cmp *txcmp; 2964 2965 cpr->has_more_work = 0; 2966 cpr->had_work_done = 1; 2967 while (1) { 2968 u8 cmp_type; 2969 int rc; 2970 2971 cons = RING_CMP(raw_cons); 2972 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2973 2974 if (!TX_CMP_VALID(txcmp, raw_cons)) 2975 break; 2976 2977 /* The valid test of the entry must be done first before 2978 * reading any further. 2979 */ 2980 dma_rmb(); 2981 cmp_type = TX_CMP_TYPE(txcmp); 2982 if (cmp_type == CMP_TYPE_TX_L2_CMP || 2983 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { 2984 u32 opaque = txcmp->tx_cmp_opaque; 2985 struct bnxt_tx_ring_info *txr; 2986 u16 tx_freed; 2987 2988 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 2989 event |= BNXT_TX_CMP_EVENT; 2990 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) 2991 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); 2992 else 2993 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 2994 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 2995 bp->tx_ring_mask; 2996 /* return full budget so NAPI will complete. */ 2997 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 2998 rx_pkts = budget; 2999 raw_cons = NEXT_RAW_CMP(raw_cons); 3000 if (budget) 3001 cpr->has_more_work = 1; 3002 break; 3003 } 3004 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { 3005 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); 3006 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 3007 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 3008 if (likely(budget)) 3009 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3010 else 3011 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 3012 &event); 3013 if (likely(rc >= 0)) 3014 rx_pkts += rc; 3015 /* Increment rx_pkts when rc is -ENOMEM to count towards 3016 * the NAPI budget. Otherwise, we may potentially loop 3017 * here forever if we consistently cannot allocate 3018 * buffers. 3019 */ 3020 else if (rc == -ENOMEM && budget) 3021 rx_pkts++; 3022 else if (rc == -EBUSY) /* partial completion */ 3023 break; 3024 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 3025 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 3026 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { 3027 bnxt_hwrm_handler(bp, txcmp); 3028 } 3029 raw_cons = NEXT_RAW_CMP(raw_cons); 3030 3031 if (rx_pkts && rx_pkts == budget) { 3032 cpr->has_more_work = 1; 3033 break; 3034 } 3035 } 3036 3037 if (event & BNXT_REDIRECT_EVENT) { 3038 xdp_do_flush(); 3039 event &= ~BNXT_REDIRECT_EVENT; 3040 } 3041 3042 if (event & BNXT_TX_EVENT) { 3043 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 3044 u16 prod = txr->tx_prod; 3045 3046 /* Sync BD data before updating doorbell */ 3047 wmb(); 3048 3049 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 3050 event &= ~BNXT_TX_EVENT; 3051 } 3052 3053 cpr->cp_raw_cons = raw_cons; 3054 bnapi->events |= event; 3055 return rx_pkts; 3056 } 3057 3058 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3059 int budget) 3060 { 3061 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 3062 bnapi->tx_int(bp, bnapi, budget); 3063 3064 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 3065 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3066 3067 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3068 bnapi->events &= ~BNXT_RX_EVENT; 3069 } 3070 if (bnapi->events & BNXT_AGG_EVENT) { 3071 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3072 3073 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3074 bnapi->events &= ~BNXT_AGG_EVENT; 3075 } 3076 } 3077 3078 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3079 int budget) 3080 { 3081 struct bnxt_napi *bnapi = cpr->bnapi; 3082 int rx_pkts; 3083 3084 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 3085 3086 /* ACK completion ring before freeing tx ring and producing new 3087 * buffers in rx/agg rings to prevent overflowing the completion 3088 * ring. 3089 */ 3090 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 3091 3092 __bnxt_poll_work_done(bp, bnapi, budget); 3093 return rx_pkts; 3094 } 3095 3096 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 3097 { 3098 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3099 struct bnxt *bp = bnapi->bp; 3100 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3101 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3102 struct tx_cmp *txcmp; 3103 struct rx_cmp_ext *rxcmp1; 3104 u32 cp_cons, tmp_raw_cons; 3105 u32 raw_cons = cpr->cp_raw_cons; 3106 bool flush_xdp = false; 3107 u32 rx_pkts = 0; 3108 u8 event = 0; 3109 3110 while (1) { 3111 int rc; 3112 3113 cp_cons = RING_CMP(raw_cons); 3114 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3115 3116 if (!TX_CMP_VALID(txcmp, raw_cons)) 3117 break; 3118 3119 /* The valid test of the entry must be done first before 3120 * reading any further. 3121 */ 3122 dma_rmb(); 3123 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 3124 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 3125 cp_cons = RING_CMP(tmp_raw_cons); 3126 rxcmp1 = (struct rx_cmp_ext *) 3127 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3128 3129 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 3130 break; 3131 3132 /* force an error to recycle the buffer */ 3133 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 3134 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 3135 3136 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3137 if (likely(rc == -EIO) && budget) 3138 rx_pkts++; 3139 else if (rc == -EBUSY) /* partial completion */ 3140 break; 3141 if (event & BNXT_REDIRECT_EVENT) 3142 flush_xdp = true; 3143 } else if (unlikely(TX_CMP_TYPE(txcmp) == 3144 CMPL_BASE_TYPE_HWRM_DONE)) { 3145 bnxt_hwrm_handler(bp, txcmp); 3146 } else { 3147 netdev_err(bp->dev, 3148 "Invalid completion received on special ring\n"); 3149 } 3150 raw_cons = NEXT_RAW_CMP(raw_cons); 3151 3152 if (rx_pkts == budget) 3153 break; 3154 } 3155 3156 cpr->cp_raw_cons = raw_cons; 3157 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 3158 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3159 3160 if (event & BNXT_AGG_EVENT) 3161 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3162 if (flush_xdp) 3163 xdp_do_flush(); 3164 3165 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 3166 napi_complete_done(napi, rx_pkts); 3167 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3168 } 3169 return rx_pkts; 3170 } 3171 3172 static int bnxt_poll(struct napi_struct *napi, int budget) 3173 { 3174 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3175 struct bnxt *bp = bnapi->bp; 3176 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3177 int work_done = 0; 3178 3179 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3180 napi_complete(napi); 3181 return 0; 3182 } 3183 while (1) { 3184 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 3185 3186 if (work_done >= budget) { 3187 if (!budget) 3188 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3189 break; 3190 } 3191 3192 if (!bnxt_has_work(bp, cpr)) { 3193 if (napi_complete_done(napi, work_done)) 3194 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3195 break; 3196 } 3197 } 3198 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { 3199 struct dim_sample dim_sample = {}; 3200 3201 dim_update_sample(cpr->event_ctr, 3202 cpr->rx_packets, 3203 cpr->rx_bytes, 3204 &dim_sample); 3205 net_dim(&cpr->dim, &dim_sample); 3206 } 3207 return work_done; 3208 } 3209 3210 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 3211 { 3212 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3213 int i, work_done = 0; 3214 3215 for (i = 0; i < cpr->cp_ring_count; i++) { 3216 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3217 3218 if (cpr2->had_nqe_notify) { 3219 work_done += __bnxt_poll_work(bp, cpr2, 3220 budget - work_done); 3221 cpr->has_more_work |= cpr2->has_more_work; 3222 } 3223 } 3224 return work_done; 3225 } 3226 3227 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3228 u64 dbr_type, int budget) 3229 { 3230 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3231 int i; 3232 3233 for (i = 0; i < cpr->cp_ring_count; i++) { 3234 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3235 struct bnxt_db_info *db; 3236 3237 if (cpr2->had_work_done) { 3238 u32 tgl = 0; 3239 3240 if (dbr_type == DBR_TYPE_CQ_ARMALL) { 3241 cpr2->had_nqe_notify = 0; 3242 tgl = cpr2->toggle; 3243 } 3244 db = &cpr2->cp_db; 3245 bnxt_writeq(bp, 3246 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 3247 DB_RING_IDX(db, cpr2->cp_raw_cons), 3248 db->doorbell); 3249 cpr2->had_work_done = 0; 3250 } 3251 } 3252 __bnxt_poll_work_done(bp, bnapi, budget); 3253 } 3254 3255 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 3256 { 3257 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3258 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3259 struct bnxt_cp_ring_info *cpr_rx; 3260 u32 raw_cons = cpr->cp_raw_cons; 3261 struct bnxt *bp = bnapi->bp; 3262 struct nqe_cn *nqcmp; 3263 int work_done = 0; 3264 u32 cons; 3265 3266 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3267 napi_complete(napi); 3268 return 0; 3269 } 3270 if (cpr->has_more_work) { 3271 cpr->has_more_work = 0; 3272 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 3273 } 3274 while (1) { 3275 u16 type; 3276 3277 cons = RING_CMP(raw_cons); 3278 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3279 3280 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 3281 if (cpr->has_more_work) 3282 break; 3283 3284 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 3285 budget); 3286 cpr->cp_raw_cons = raw_cons; 3287 if (napi_complete_done(napi, work_done)) 3288 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 3289 cpr->cp_raw_cons); 3290 goto poll_done; 3291 } 3292 3293 /* The valid test of the entry must be done first before 3294 * reading any further. 3295 */ 3296 dma_rmb(); 3297 3298 type = le16_to_cpu(nqcmp->type); 3299 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 3300 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 3301 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 3302 struct bnxt_cp_ring_info *cpr2; 3303 3304 /* No more budget for RX work */ 3305 if (budget && work_done >= budget && 3306 cq_type == BNXT_NQ_HDL_TYPE_RX) 3307 break; 3308 3309 idx = BNXT_NQ_HDL_IDX(idx); 3310 cpr2 = &cpr->cp_ring_arr[idx]; 3311 cpr2->had_nqe_notify = 1; 3312 cpr2->toggle = NQE_CN_TOGGLE(type); 3313 work_done += __bnxt_poll_work(bp, cpr2, 3314 budget - work_done); 3315 cpr->has_more_work |= cpr2->has_more_work; 3316 } else { 3317 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 3318 } 3319 raw_cons = NEXT_RAW_CMP(raw_cons); 3320 } 3321 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 3322 if (raw_cons != cpr->cp_raw_cons) { 3323 cpr->cp_raw_cons = raw_cons; 3324 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 3325 } 3326 poll_done: 3327 cpr_rx = &cpr->cp_ring_arr[0]; 3328 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 3329 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { 3330 struct dim_sample dim_sample = {}; 3331 3332 dim_update_sample(cpr->event_ctr, 3333 cpr_rx->rx_packets, 3334 cpr_rx->rx_bytes, 3335 &dim_sample); 3336 net_dim(&cpr->dim, &dim_sample); 3337 } 3338 return work_done; 3339 } 3340 3341 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, 3342 struct bnxt_tx_ring_info *txr, int idx) 3343 { 3344 int i, max_idx; 3345 struct pci_dev *pdev = bp->pdev; 3346 3347 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 3348 3349 for (i = 0; i < max_idx;) { 3350 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i]; 3351 struct sk_buff *skb; 3352 int j, last; 3353 3354 if (idx < bp->tx_nr_rings_xdp && 3355 tx_buf->action == XDP_REDIRECT) { 3356 dma_unmap_single(&pdev->dev, 3357 dma_unmap_addr(tx_buf, mapping), 3358 dma_unmap_len(tx_buf, len), 3359 DMA_TO_DEVICE); 3360 xdp_return_frame(tx_buf->xdpf); 3361 tx_buf->action = 0; 3362 tx_buf->xdpf = NULL; 3363 i++; 3364 continue; 3365 } 3366 3367 skb = tx_buf->skb; 3368 if (!skb) { 3369 i++; 3370 continue; 3371 } 3372 3373 tx_buf->skb = NULL; 3374 3375 if (tx_buf->is_push) { 3376 dev_kfree_skb(skb); 3377 i += 2; 3378 continue; 3379 } 3380 3381 dma_unmap_single(&pdev->dev, 3382 dma_unmap_addr(tx_buf, mapping), 3383 skb_headlen(skb), 3384 DMA_TO_DEVICE); 3385 3386 last = tx_buf->nr_frags; 3387 i += 2; 3388 for (j = 0; j < last; j++, i++) { 3389 int ring_idx = i & bp->tx_ring_mask; 3390 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 3391 3392 tx_buf = &txr->tx_buf_ring[ring_idx]; 3393 dma_unmap_page(&pdev->dev, 3394 dma_unmap_addr(tx_buf, mapping), 3395 skb_frag_size(frag), DMA_TO_DEVICE); 3396 } 3397 dev_kfree_skb(skb); 3398 } 3399 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx)); 3400 } 3401 3402 static void bnxt_free_tx_skbs(struct bnxt *bp) 3403 { 3404 int i; 3405 3406 if (!bp->tx_ring) 3407 return; 3408 3409 for (i = 0; i < bp->tx_nr_rings; i++) { 3410 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3411 3412 if (!txr->tx_buf_ring) 3413 continue; 3414 3415 bnxt_free_one_tx_ring_skbs(bp, txr, i); 3416 } 3417 } 3418 3419 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3420 { 3421 int i, max_idx; 3422 3423 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3424 3425 for (i = 0; i < max_idx; i++) { 3426 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3427 void *data = rx_buf->data; 3428 3429 if (!data) 3430 continue; 3431 3432 rx_buf->data = NULL; 3433 if (BNXT_RX_PAGE_MODE(bp)) 3434 page_pool_recycle_direct(rxr->page_pool, data); 3435 else 3436 page_pool_free_va(rxr->head_pool, data, true); 3437 } 3438 } 3439 3440 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3441 { 3442 int i, max_idx; 3443 3444 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3445 3446 for (i = 0; i < max_idx; i++) { 3447 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3448 struct page *page = rx_agg_buf->page; 3449 3450 if (!page) 3451 continue; 3452 3453 rx_agg_buf->page = NULL; 3454 __clear_bit(i, rxr->rx_agg_bmap); 3455 3456 page_pool_recycle_direct(rxr->page_pool, page); 3457 } 3458 } 3459 3460 static void bnxt_free_one_tpa_info_data(struct bnxt *bp, 3461 struct bnxt_rx_ring_info *rxr) 3462 { 3463 int i; 3464 3465 for (i = 0; i < bp->max_tpa; i++) { 3466 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3467 u8 *data = tpa_info->data; 3468 3469 if (!data) 3470 continue; 3471 3472 tpa_info->data = NULL; 3473 page_pool_free_va(rxr->head_pool, data, false); 3474 } 3475 } 3476 3477 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, 3478 struct bnxt_rx_ring_info *rxr) 3479 { 3480 struct bnxt_tpa_idx_map *map; 3481 3482 if (!rxr->rx_tpa) 3483 goto skip_rx_tpa_free; 3484 3485 bnxt_free_one_tpa_info_data(bp, rxr); 3486 3487 skip_rx_tpa_free: 3488 if (!rxr->rx_buf_ring) 3489 goto skip_rx_buf_free; 3490 3491 bnxt_free_one_rx_ring(bp, rxr); 3492 3493 skip_rx_buf_free: 3494 if (!rxr->rx_agg_ring) 3495 goto skip_rx_agg_free; 3496 3497 bnxt_free_one_rx_agg_ring(bp, rxr); 3498 3499 skip_rx_agg_free: 3500 map = rxr->rx_tpa_idx_map; 3501 if (map) 3502 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3503 } 3504 3505 static void bnxt_free_rx_skbs(struct bnxt *bp) 3506 { 3507 int i; 3508 3509 if (!bp->rx_ring) 3510 return; 3511 3512 for (i = 0; i < bp->rx_nr_rings; i++) 3513 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]); 3514 } 3515 3516 static void bnxt_free_skbs(struct bnxt *bp) 3517 { 3518 bnxt_free_tx_skbs(bp); 3519 bnxt_free_rx_skbs(bp); 3520 } 3521 3522 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3523 { 3524 u8 init_val = ctxm->init_value; 3525 u16 offset = ctxm->init_offset; 3526 u8 *p2 = p; 3527 int i; 3528 3529 if (!init_val) 3530 return; 3531 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3532 memset(p, init_val, len); 3533 return; 3534 } 3535 for (i = 0; i < len; i += ctxm->entry_size) 3536 *(p2 + i + offset) = init_val; 3537 } 3538 3539 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem, 3540 void *buf, size_t offset, size_t head, 3541 size_t tail) 3542 { 3543 int i, head_page, start_idx, source_offset; 3544 size_t len, rem_len, total_len, max_bytes; 3545 3546 head_page = head / rmem->page_size; 3547 source_offset = head % rmem->page_size; 3548 total_len = (tail - head) & MAX_CTX_BYTES_MASK; 3549 if (!total_len) 3550 total_len = MAX_CTX_BYTES; 3551 start_idx = head_page % MAX_CTX_PAGES; 3552 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size - 3553 source_offset; 3554 total_len = min(total_len, max_bytes); 3555 rem_len = total_len; 3556 3557 for (i = start_idx; rem_len; i++, source_offset = 0) { 3558 len = min((size_t)(rmem->page_size - source_offset), rem_len); 3559 if (buf) 3560 memcpy(buf + offset, rmem->pg_arr[i] + source_offset, 3561 len); 3562 offset += len; 3563 rem_len -= len; 3564 } 3565 return total_len; 3566 } 3567 3568 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3569 { 3570 struct pci_dev *pdev = bp->pdev; 3571 int i; 3572 3573 if (!rmem->pg_arr) 3574 goto skip_pages; 3575 3576 for (i = 0; i < rmem->nr_pages; i++) { 3577 if (!rmem->pg_arr[i]) 3578 continue; 3579 3580 dma_free_coherent(&pdev->dev, rmem->page_size, 3581 rmem->pg_arr[i], rmem->dma_arr[i]); 3582 3583 rmem->pg_arr[i] = NULL; 3584 } 3585 skip_pages: 3586 if (rmem->pg_tbl) { 3587 size_t pg_tbl_size = rmem->nr_pages * 8; 3588 3589 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3590 pg_tbl_size = rmem->page_size; 3591 dma_free_coherent(&pdev->dev, pg_tbl_size, 3592 rmem->pg_tbl, rmem->pg_tbl_map); 3593 rmem->pg_tbl = NULL; 3594 } 3595 if (rmem->vmem_size && *rmem->vmem) { 3596 vfree(*rmem->vmem); 3597 *rmem->vmem = NULL; 3598 } 3599 } 3600 3601 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3602 { 3603 struct pci_dev *pdev = bp->pdev; 3604 u64 valid_bit = 0; 3605 int i; 3606 3607 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3608 valid_bit = PTU_PTE_VALID; 3609 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3610 size_t pg_tbl_size = rmem->nr_pages * 8; 3611 3612 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3613 pg_tbl_size = rmem->page_size; 3614 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3615 &rmem->pg_tbl_map, 3616 GFP_KERNEL); 3617 if (!rmem->pg_tbl) 3618 return -ENOMEM; 3619 } 3620 3621 for (i = 0; i < rmem->nr_pages; i++) { 3622 u64 extra_bits = valid_bit; 3623 3624 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3625 rmem->page_size, 3626 &rmem->dma_arr[i], 3627 GFP_KERNEL); 3628 if (!rmem->pg_arr[i]) 3629 return -ENOMEM; 3630 3631 if (rmem->ctx_mem) 3632 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3633 rmem->page_size); 3634 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3635 if (i == rmem->nr_pages - 2 && 3636 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3637 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3638 else if (i == rmem->nr_pages - 1 && 3639 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3640 extra_bits |= PTU_PTE_LAST; 3641 rmem->pg_tbl[i] = 3642 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3643 } 3644 } 3645 3646 if (rmem->vmem_size) { 3647 *rmem->vmem = vzalloc(rmem->vmem_size); 3648 if (!(*rmem->vmem)) 3649 return -ENOMEM; 3650 } 3651 return 0; 3652 } 3653 3654 static void bnxt_free_one_tpa_info(struct bnxt *bp, 3655 struct bnxt_rx_ring_info *rxr) 3656 { 3657 int i; 3658 3659 kfree(rxr->rx_tpa_idx_map); 3660 rxr->rx_tpa_idx_map = NULL; 3661 if (rxr->rx_tpa) { 3662 for (i = 0; i < bp->max_tpa; i++) { 3663 kfree(rxr->rx_tpa[i].agg_arr); 3664 rxr->rx_tpa[i].agg_arr = NULL; 3665 } 3666 } 3667 kfree(rxr->rx_tpa); 3668 rxr->rx_tpa = NULL; 3669 } 3670 3671 static void bnxt_free_tpa_info(struct bnxt *bp) 3672 { 3673 int i; 3674 3675 for (i = 0; i < bp->rx_nr_rings; i++) { 3676 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3677 3678 bnxt_free_one_tpa_info(bp, rxr); 3679 } 3680 } 3681 3682 static int bnxt_alloc_one_tpa_info(struct bnxt *bp, 3683 struct bnxt_rx_ring_info *rxr) 3684 { 3685 struct rx_agg_cmp *agg; 3686 int i; 3687 3688 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3689 GFP_KERNEL); 3690 if (!rxr->rx_tpa) 3691 return -ENOMEM; 3692 3693 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3694 return 0; 3695 for (i = 0; i < bp->max_tpa; i++) { 3696 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3697 if (!agg) 3698 return -ENOMEM; 3699 rxr->rx_tpa[i].agg_arr = agg; 3700 } 3701 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3702 GFP_KERNEL); 3703 if (!rxr->rx_tpa_idx_map) 3704 return -ENOMEM; 3705 3706 return 0; 3707 } 3708 3709 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3710 { 3711 int i, rc; 3712 3713 bp->max_tpa = MAX_TPA; 3714 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3715 if (!bp->max_tpa_v2) 3716 return 0; 3717 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3718 } 3719 3720 for (i = 0; i < bp->rx_nr_rings; i++) { 3721 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3722 3723 rc = bnxt_alloc_one_tpa_info(bp, rxr); 3724 if (rc) 3725 return rc; 3726 } 3727 return 0; 3728 } 3729 3730 static void bnxt_free_rx_rings(struct bnxt *bp) 3731 { 3732 int i; 3733 3734 if (!bp->rx_ring) 3735 return; 3736 3737 bnxt_free_tpa_info(bp); 3738 for (i = 0; i < bp->rx_nr_rings; i++) { 3739 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3740 struct bnxt_ring_struct *ring; 3741 3742 if (rxr->xdp_prog) 3743 bpf_prog_put(rxr->xdp_prog); 3744 3745 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3746 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3747 3748 page_pool_destroy(rxr->page_pool); 3749 if (bnxt_separate_head_pool()) 3750 page_pool_destroy(rxr->head_pool); 3751 rxr->page_pool = rxr->head_pool = NULL; 3752 3753 kfree(rxr->rx_agg_bmap); 3754 rxr->rx_agg_bmap = NULL; 3755 3756 ring = &rxr->rx_ring_struct; 3757 bnxt_free_ring(bp, &ring->ring_mem); 3758 3759 ring = &rxr->rx_agg_ring_struct; 3760 bnxt_free_ring(bp, &ring->ring_mem); 3761 } 3762 } 3763 3764 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3765 struct bnxt_rx_ring_info *rxr, 3766 int numa_node) 3767 { 3768 struct page_pool_params pp = { 0 }; 3769 struct page_pool *pool; 3770 3771 pp.pool_size = bp->rx_agg_ring_size; 3772 if (BNXT_RX_PAGE_MODE(bp)) 3773 pp.pool_size += bp->rx_ring_size; 3774 pp.nid = numa_node; 3775 pp.napi = &rxr->bnapi->napi; 3776 pp.netdev = bp->dev; 3777 pp.dev = &bp->pdev->dev; 3778 pp.dma_dir = bp->rx_dir; 3779 pp.max_len = PAGE_SIZE; 3780 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3781 3782 pool = page_pool_create(&pp); 3783 if (IS_ERR(pool)) 3784 return PTR_ERR(pool); 3785 rxr->page_pool = pool; 3786 3787 if (bnxt_separate_head_pool()) { 3788 pp.pool_size = max(bp->rx_ring_size, 1024); 3789 pool = page_pool_create(&pp); 3790 if (IS_ERR(pool)) 3791 goto err_destroy_pp; 3792 } 3793 rxr->head_pool = pool; 3794 3795 return 0; 3796 3797 err_destroy_pp: 3798 page_pool_destroy(rxr->page_pool); 3799 rxr->page_pool = NULL; 3800 return PTR_ERR(pool); 3801 } 3802 3803 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3804 { 3805 u16 mem_size; 3806 3807 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3808 mem_size = rxr->rx_agg_bmap_size / 8; 3809 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3810 if (!rxr->rx_agg_bmap) 3811 return -ENOMEM; 3812 3813 return 0; 3814 } 3815 3816 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3817 { 3818 int numa_node = dev_to_node(&bp->pdev->dev); 3819 int i, rc = 0, agg_rings = 0, cpu; 3820 3821 if (!bp->rx_ring) 3822 return -ENOMEM; 3823 3824 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3825 agg_rings = 1; 3826 3827 for (i = 0; i < bp->rx_nr_rings; i++) { 3828 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3829 struct bnxt_ring_struct *ring; 3830 int cpu_node; 3831 3832 ring = &rxr->rx_ring_struct; 3833 3834 cpu = cpumask_local_spread(i, numa_node); 3835 cpu_node = cpu_to_node(cpu); 3836 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", 3837 i, cpu_node); 3838 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); 3839 if (rc) 3840 return rc; 3841 3842 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3843 if (rc < 0) 3844 return rc; 3845 3846 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3847 MEM_TYPE_PAGE_POOL, 3848 rxr->page_pool); 3849 if (rc) { 3850 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3851 return rc; 3852 } 3853 3854 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3855 if (rc) 3856 return rc; 3857 3858 ring->grp_idx = i; 3859 if (agg_rings) { 3860 ring = &rxr->rx_agg_ring_struct; 3861 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3862 if (rc) 3863 return rc; 3864 3865 ring->grp_idx = i; 3866 rc = bnxt_alloc_rx_agg_bmap(bp, rxr); 3867 if (rc) 3868 return rc; 3869 } 3870 } 3871 if (bp->flags & BNXT_FLAG_TPA) 3872 rc = bnxt_alloc_tpa_info(bp); 3873 return rc; 3874 } 3875 3876 static void bnxt_free_tx_rings(struct bnxt *bp) 3877 { 3878 int i; 3879 struct pci_dev *pdev = bp->pdev; 3880 3881 if (!bp->tx_ring) 3882 return; 3883 3884 for (i = 0; i < bp->tx_nr_rings; i++) { 3885 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3886 struct bnxt_ring_struct *ring; 3887 3888 if (txr->tx_push) { 3889 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3890 txr->tx_push, txr->tx_push_mapping); 3891 txr->tx_push = NULL; 3892 } 3893 3894 ring = &txr->tx_ring_struct; 3895 3896 bnxt_free_ring(bp, &ring->ring_mem); 3897 } 3898 } 3899 3900 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3901 ((tc) * (bp)->tx_nr_rings_per_tc) 3902 3903 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3904 ((tx) % (bp)->tx_nr_rings_per_tc) 3905 3906 #define BNXT_RING_TO_TC(bp, tx) \ 3907 ((tx) / (bp)->tx_nr_rings_per_tc) 3908 3909 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3910 { 3911 int i, j, rc; 3912 struct pci_dev *pdev = bp->pdev; 3913 3914 bp->tx_push_size = 0; 3915 if (bp->tx_push_thresh) { 3916 int push_size; 3917 3918 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3919 bp->tx_push_thresh); 3920 3921 if (push_size > 256) { 3922 push_size = 0; 3923 bp->tx_push_thresh = 0; 3924 } 3925 3926 bp->tx_push_size = push_size; 3927 } 3928 3929 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3930 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3931 struct bnxt_ring_struct *ring; 3932 u8 qidx; 3933 3934 ring = &txr->tx_ring_struct; 3935 3936 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3937 if (rc) 3938 return rc; 3939 3940 ring->grp_idx = txr->bnapi->index; 3941 if (bp->tx_push_size) { 3942 dma_addr_t mapping; 3943 3944 /* One pre-allocated DMA buffer to backup 3945 * TX push operation 3946 */ 3947 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3948 bp->tx_push_size, 3949 &txr->tx_push_mapping, 3950 GFP_KERNEL); 3951 3952 if (!txr->tx_push) 3953 return -ENOMEM; 3954 3955 mapping = txr->tx_push_mapping + 3956 sizeof(struct tx_push_bd); 3957 txr->data_mapping = cpu_to_le64(mapping); 3958 } 3959 qidx = bp->tc_to_qidx[j]; 3960 ring->queue_id = bp->q_info[qidx].queue_id; 3961 spin_lock_init(&txr->xdp_tx_lock); 3962 if (i < bp->tx_nr_rings_xdp) 3963 continue; 3964 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 3965 j++; 3966 } 3967 return 0; 3968 } 3969 3970 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3971 { 3972 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3973 3974 kfree(cpr->cp_desc_ring); 3975 cpr->cp_desc_ring = NULL; 3976 ring->ring_mem.pg_arr = NULL; 3977 kfree(cpr->cp_desc_mapping); 3978 cpr->cp_desc_mapping = NULL; 3979 ring->ring_mem.dma_arr = NULL; 3980 } 3981 3982 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 3983 { 3984 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 3985 if (!cpr->cp_desc_ring) 3986 return -ENOMEM; 3987 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 3988 GFP_KERNEL); 3989 if (!cpr->cp_desc_mapping) 3990 return -ENOMEM; 3991 return 0; 3992 } 3993 3994 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 3995 { 3996 int i; 3997 3998 if (!bp->bnapi) 3999 return; 4000 for (i = 0; i < bp->cp_nr_rings; i++) { 4001 struct bnxt_napi *bnapi = bp->bnapi[i]; 4002 4003 if (!bnapi) 4004 continue; 4005 bnxt_free_cp_arrays(&bnapi->cp_ring); 4006 } 4007 } 4008 4009 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 4010 { 4011 int i, n = bp->cp_nr_pages; 4012 4013 for (i = 0; i < bp->cp_nr_rings; i++) { 4014 struct bnxt_napi *bnapi = bp->bnapi[i]; 4015 int rc; 4016 4017 if (!bnapi) 4018 continue; 4019 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 4020 if (rc) 4021 return rc; 4022 } 4023 return 0; 4024 } 4025 4026 static void bnxt_free_cp_rings(struct bnxt *bp) 4027 { 4028 int i; 4029 4030 if (!bp->bnapi) 4031 return; 4032 4033 for (i = 0; i < bp->cp_nr_rings; i++) { 4034 struct bnxt_napi *bnapi = bp->bnapi[i]; 4035 struct bnxt_cp_ring_info *cpr; 4036 struct bnxt_ring_struct *ring; 4037 int j; 4038 4039 if (!bnapi) 4040 continue; 4041 4042 cpr = &bnapi->cp_ring; 4043 ring = &cpr->cp_ring_struct; 4044 4045 bnxt_free_ring(bp, &ring->ring_mem); 4046 4047 if (!cpr->cp_ring_arr) 4048 continue; 4049 4050 for (j = 0; j < cpr->cp_ring_count; j++) { 4051 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4052 4053 ring = &cpr2->cp_ring_struct; 4054 bnxt_free_ring(bp, &ring->ring_mem); 4055 bnxt_free_cp_arrays(cpr2); 4056 } 4057 kfree(cpr->cp_ring_arr); 4058 cpr->cp_ring_arr = NULL; 4059 cpr->cp_ring_count = 0; 4060 } 4061 } 4062 4063 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 4064 struct bnxt_cp_ring_info *cpr) 4065 { 4066 struct bnxt_ring_mem_info *rmem; 4067 struct bnxt_ring_struct *ring; 4068 int rc; 4069 4070 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 4071 if (rc) { 4072 bnxt_free_cp_arrays(cpr); 4073 return -ENOMEM; 4074 } 4075 ring = &cpr->cp_ring_struct; 4076 rmem = &ring->ring_mem; 4077 rmem->nr_pages = bp->cp_nr_pages; 4078 rmem->page_size = HW_CMPD_RING_SIZE; 4079 rmem->pg_arr = (void **)cpr->cp_desc_ring; 4080 rmem->dma_arr = cpr->cp_desc_mapping; 4081 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 4082 rc = bnxt_alloc_ring(bp, rmem); 4083 if (rc) { 4084 bnxt_free_ring(bp, rmem); 4085 bnxt_free_cp_arrays(cpr); 4086 } 4087 return rc; 4088 } 4089 4090 static int bnxt_alloc_cp_rings(struct bnxt *bp) 4091 { 4092 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 4093 int i, j, rc, ulp_msix; 4094 int tcs = bp->num_tc; 4095 4096 if (!tcs) 4097 tcs = 1; 4098 ulp_msix = bnxt_get_ulp_msix_num(bp); 4099 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 4100 struct bnxt_napi *bnapi = bp->bnapi[i]; 4101 struct bnxt_cp_ring_info *cpr, *cpr2; 4102 struct bnxt_ring_struct *ring; 4103 int cp_count = 0, k; 4104 int rx = 0, tx = 0; 4105 4106 if (!bnapi) 4107 continue; 4108 4109 cpr = &bnapi->cp_ring; 4110 cpr->bnapi = bnapi; 4111 ring = &cpr->cp_ring_struct; 4112 4113 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 4114 if (rc) 4115 return rc; 4116 4117 ring->map_idx = ulp_msix + i; 4118 4119 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4120 continue; 4121 4122 if (i < bp->rx_nr_rings) { 4123 cp_count++; 4124 rx = 1; 4125 } 4126 if (i < bp->tx_nr_rings_xdp) { 4127 cp_count++; 4128 tx = 1; 4129 } else if ((sh && i < bp->tx_nr_rings) || 4130 (!sh && i >= bp->rx_nr_rings)) { 4131 cp_count += tcs; 4132 tx = 1; 4133 } 4134 4135 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 4136 GFP_KERNEL); 4137 if (!cpr->cp_ring_arr) 4138 return -ENOMEM; 4139 cpr->cp_ring_count = cp_count; 4140 4141 for (k = 0; k < cp_count; k++) { 4142 cpr2 = &cpr->cp_ring_arr[k]; 4143 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 4144 if (rc) 4145 return rc; 4146 cpr2->bnapi = bnapi; 4147 cpr2->sw_stats = cpr->sw_stats; 4148 cpr2->cp_idx = k; 4149 if (!k && rx) { 4150 bp->rx_ring[i].rx_cpr = cpr2; 4151 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 4152 } else { 4153 int n, tc = k - rx; 4154 4155 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 4156 bp->tx_ring[n].tx_cpr = cpr2; 4157 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 4158 } 4159 } 4160 if (tx) 4161 j++; 4162 } 4163 return 0; 4164 } 4165 4166 static void bnxt_init_rx_ring_struct(struct bnxt *bp, 4167 struct bnxt_rx_ring_info *rxr) 4168 { 4169 struct bnxt_ring_mem_info *rmem; 4170 struct bnxt_ring_struct *ring; 4171 4172 ring = &rxr->rx_ring_struct; 4173 rmem = &ring->ring_mem; 4174 rmem->nr_pages = bp->rx_nr_pages; 4175 rmem->page_size = HW_RXBD_RING_SIZE; 4176 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4177 rmem->dma_arr = rxr->rx_desc_mapping; 4178 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4179 rmem->vmem = (void **)&rxr->rx_buf_ring; 4180 4181 ring = &rxr->rx_agg_ring_struct; 4182 rmem = &ring->ring_mem; 4183 rmem->nr_pages = bp->rx_agg_nr_pages; 4184 rmem->page_size = HW_RXBD_RING_SIZE; 4185 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4186 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4187 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4188 rmem->vmem = (void **)&rxr->rx_agg_ring; 4189 } 4190 4191 static void bnxt_reset_rx_ring_struct(struct bnxt *bp, 4192 struct bnxt_rx_ring_info *rxr) 4193 { 4194 struct bnxt_ring_mem_info *rmem; 4195 struct bnxt_ring_struct *ring; 4196 int i; 4197 4198 rxr->page_pool->p.napi = NULL; 4199 rxr->page_pool = NULL; 4200 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info)); 4201 4202 ring = &rxr->rx_ring_struct; 4203 rmem = &ring->ring_mem; 4204 rmem->pg_tbl = NULL; 4205 rmem->pg_tbl_map = 0; 4206 for (i = 0; i < rmem->nr_pages; i++) { 4207 rmem->pg_arr[i] = NULL; 4208 rmem->dma_arr[i] = 0; 4209 } 4210 *rmem->vmem = NULL; 4211 4212 ring = &rxr->rx_agg_ring_struct; 4213 rmem = &ring->ring_mem; 4214 rmem->pg_tbl = NULL; 4215 rmem->pg_tbl_map = 0; 4216 for (i = 0; i < rmem->nr_pages; i++) { 4217 rmem->pg_arr[i] = NULL; 4218 rmem->dma_arr[i] = 0; 4219 } 4220 *rmem->vmem = NULL; 4221 } 4222 4223 static void bnxt_init_ring_struct(struct bnxt *bp) 4224 { 4225 int i, j; 4226 4227 for (i = 0; i < bp->cp_nr_rings; i++) { 4228 struct bnxt_napi *bnapi = bp->bnapi[i]; 4229 struct bnxt_ring_mem_info *rmem; 4230 struct bnxt_cp_ring_info *cpr; 4231 struct bnxt_rx_ring_info *rxr; 4232 struct bnxt_tx_ring_info *txr; 4233 struct bnxt_ring_struct *ring; 4234 4235 if (!bnapi) 4236 continue; 4237 4238 cpr = &bnapi->cp_ring; 4239 ring = &cpr->cp_ring_struct; 4240 rmem = &ring->ring_mem; 4241 rmem->nr_pages = bp->cp_nr_pages; 4242 rmem->page_size = HW_CMPD_RING_SIZE; 4243 rmem->pg_arr = (void **)cpr->cp_desc_ring; 4244 rmem->dma_arr = cpr->cp_desc_mapping; 4245 rmem->vmem_size = 0; 4246 4247 rxr = bnapi->rx_ring; 4248 if (!rxr) 4249 goto skip_rx; 4250 4251 ring = &rxr->rx_ring_struct; 4252 rmem = &ring->ring_mem; 4253 rmem->nr_pages = bp->rx_nr_pages; 4254 rmem->page_size = HW_RXBD_RING_SIZE; 4255 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4256 rmem->dma_arr = rxr->rx_desc_mapping; 4257 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4258 rmem->vmem = (void **)&rxr->rx_buf_ring; 4259 4260 ring = &rxr->rx_agg_ring_struct; 4261 rmem = &ring->ring_mem; 4262 rmem->nr_pages = bp->rx_agg_nr_pages; 4263 rmem->page_size = HW_RXBD_RING_SIZE; 4264 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4265 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4266 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4267 rmem->vmem = (void **)&rxr->rx_agg_ring; 4268 4269 skip_rx: 4270 bnxt_for_each_napi_tx(j, bnapi, txr) { 4271 ring = &txr->tx_ring_struct; 4272 rmem = &ring->ring_mem; 4273 rmem->nr_pages = bp->tx_nr_pages; 4274 rmem->page_size = HW_TXBD_RING_SIZE; 4275 rmem->pg_arr = (void **)txr->tx_desc_ring; 4276 rmem->dma_arr = txr->tx_desc_mapping; 4277 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 4278 rmem->vmem = (void **)&txr->tx_buf_ring; 4279 } 4280 } 4281 } 4282 4283 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 4284 { 4285 int i; 4286 u32 prod; 4287 struct rx_bd **rx_buf_ring; 4288 4289 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 4290 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 4291 int j; 4292 struct rx_bd *rxbd; 4293 4294 rxbd = rx_buf_ring[i]; 4295 if (!rxbd) 4296 continue; 4297 4298 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 4299 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 4300 rxbd->rx_bd_opaque = prod; 4301 } 4302 } 4303 } 4304 4305 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, 4306 struct bnxt_rx_ring_info *rxr, 4307 int ring_nr) 4308 { 4309 u32 prod; 4310 int i; 4311 4312 prod = rxr->rx_prod; 4313 for (i = 0; i < bp->rx_ring_size; i++) { 4314 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 4315 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", 4316 ring_nr, i, bp->rx_ring_size); 4317 break; 4318 } 4319 prod = NEXT_RX(prod); 4320 } 4321 rxr->rx_prod = prod; 4322 } 4323 4324 static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, 4325 struct bnxt_rx_ring_info *rxr, 4326 int ring_nr) 4327 { 4328 u32 prod; 4329 int i; 4330 4331 prod = rxr->rx_agg_prod; 4332 for (i = 0; i < bp->rx_agg_ring_size; i++) { 4333 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 4334 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", 4335 ring_nr, i, bp->rx_ring_size); 4336 break; 4337 } 4338 prod = NEXT_RX_AGG(prod); 4339 } 4340 rxr->rx_agg_prod = prod; 4341 } 4342 4343 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp, 4344 struct bnxt_rx_ring_info *rxr) 4345 { 4346 dma_addr_t mapping; 4347 u8 *data; 4348 int i; 4349 4350 for (i = 0; i < bp->max_tpa; i++) { 4351 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, 4352 GFP_KERNEL); 4353 if (!data) 4354 return -ENOMEM; 4355 4356 rxr->rx_tpa[i].data = data; 4357 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 4358 rxr->rx_tpa[i].mapping = mapping; 4359 } 4360 4361 return 0; 4362 } 4363 4364 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 4365 { 4366 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 4367 int rc; 4368 4369 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); 4370 4371 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 4372 return 0; 4373 4374 bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); 4375 4376 if (rxr->rx_tpa) { 4377 rc = bnxt_alloc_one_tpa_info_data(bp, rxr); 4378 if (rc) 4379 return rc; 4380 } 4381 return 0; 4382 } 4383 4384 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, 4385 struct bnxt_rx_ring_info *rxr) 4386 { 4387 struct bnxt_ring_struct *ring; 4388 u32 type; 4389 4390 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 4391 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 4392 4393 if (NET_IP_ALIGN == 2) 4394 type |= RX_BD_FLAGS_SOP; 4395 4396 ring = &rxr->rx_ring_struct; 4397 bnxt_init_rxbd_pages(ring, type); 4398 ring->fw_ring_id = INVALID_HW_RING_ID; 4399 } 4400 4401 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, 4402 struct bnxt_rx_ring_info *rxr) 4403 { 4404 struct bnxt_ring_struct *ring; 4405 u32 type; 4406 4407 ring = &rxr->rx_agg_ring_struct; 4408 ring->fw_ring_id = INVALID_HW_RING_ID; 4409 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 4410 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 4411 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 4412 4413 bnxt_init_rxbd_pages(ring, type); 4414 } 4415 } 4416 4417 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 4418 { 4419 struct bnxt_rx_ring_info *rxr; 4420 4421 rxr = &bp->rx_ring[ring_nr]; 4422 bnxt_init_one_rx_ring_rxbd(bp, rxr); 4423 4424 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, 4425 &rxr->bnapi->napi); 4426 4427 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 4428 bpf_prog_add(bp->xdp_prog, 1); 4429 rxr->xdp_prog = bp->xdp_prog; 4430 } 4431 4432 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); 4433 4434 return bnxt_alloc_one_rx_ring(bp, ring_nr); 4435 } 4436 4437 static void bnxt_init_cp_rings(struct bnxt *bp) 4438 { 4439 int i, j; 4440 4441 for (i = 0; i < bp->cp_nr_rings; i++) { 4442 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 4443 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4444 4445 ring->fw_ring_id = INVALID_HW_RING_ID; 4446 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4447 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4448 if (!cpr->cp_ring_arr) 4449 continue; 4450 for (j = 0; j < cpr->cp_ring_count; j++) { 4451 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4452 4453 ring = &cpr2->cp_ring_struct; 4454 ring->fw_ring_id = INVALID_HW_RING_ID; 4455 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4456 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4457 } 4458 } 4459 } 4460 4461 static int bnxt_init_rx_rings(struct bnxt *bp) 4462 { 4463 int i, rc = 0; 4464 4465 if (BNXT_RX_PAGE_MODE(bp)) { 4466 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 4467 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 4468 } else { 4469 bp->rx_offset = BNXT_RX_OFFSET; 4470 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 4471 } 4472 4473 for (i = 0; i < bp->rx_nr_rings; i++) { 4474 rc = bnxt_init_one_rx_ring(bp, i); 4475 if (rc) 4476 break; 4477 } 4478 4479 return rc; 4480 } 4481 4482 static int bnxt_init_tx_rings(struct bnxt *bp) 4483 { 4484 u16 i; 4485 4486 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 4487 BNXT_MIN_TX_DESC_CNT); 4488 4489 for (i = 0; i < bp->tx_nr_rings; i++) { 4490 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4491 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4492 4493 ring->fw_ring_id = INVALID_HW_RING_ID; 4494 4495 if (i >= bp->tx_nr_rings_xdp) 4496 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, 4497 NETDEV_QUEUE_TYPE_TX, 4498 &txr->bnapi->napi); 4499 } 4500 4501 return 0; 4502 } 4503 4504 static void bnxt_free_ring_grps(struct bnxt *bp) 4505 { 4506 kfree(bp->grp_info); 4507 bp->grp_info = NULL; 4508 } 4509 4510 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 4511 { 4512 int i; 4513 4514 if (irq_re_init) { 4515 bp->grp_info = kcalloc(bp->cp_nr_rings, 4516 sizeof(struct bnxt_ring_grp_info), 4517 GFP_KERNEL); 4518 if (!bp->grp_info) 4519 return -ENOMEM; 4520 } 4521 for (i = 0; i < bp->cp_nr_rings; i++) { 4522 if (irq_re_init) 4523 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 4524 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4525 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 4526 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 4527 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4528 } 4529 return 0; 4530 } 4531 4532 static void bnxt_free_vnics(struct bnxt *bp) 4533 { 4534 kfree(bp->vnic_info); 4535 bp->vnic_info = NULL; 4536 bp->nr_vnics = 0; 4537 } 4538 4539 static int bnxt_alloc_vnics(struct bnxt *bp) 4540 { 4541 int num_vnics = 1; 4542 4543 #ifdef CONFIG_RFS_ACCEL 4544 if (bp->flags & BNXT_FLAG_RFS) { 4545 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 4546 num_vnics++; 4547 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4548 num_vnics += bp->rx_nr_rings; 4549 } 4550 #endif 4551 4552 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4553 num_vnics++; 4554 4555 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 4556 GFP_KERNEL); 4557 if (!bp->vnic_info) 4558 return -ENOMEM; 4559 4560 bp->nr_vnics = num_vnics; 4561 return 0; 4562 } 4563 4564 static void bnxt_init_vnics(struct bnxt *bp) 4565 { 4566 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 4567 int i; 4568 4569 for (i = 0; i < bp->nr_vnics; i++) { 4570 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4571 int j; 4572 4573 vnic->fw_vnic_id = INVALID_HW_RING_ID; 4574 vnic->vnic_id = i; 4575 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 4576 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 4577 4578 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4579 4580 if (bp->vnic_info[i].rss_hash_key) { 4581 if (i == BNXT_VNIC_DEFAULT) { 4582 u8 *key = (void *)vnic->rss_hash_key; 4583 int k; 4584 4585 if (!bp->rss_hash_key_valid && 4586 !bp->rss_hash_key_updated) { 4587 get_random_bytes(bp->rss_hash_key, 4588 HW_HASH_KEY_SIZE); 4589 bp->rss_hash_key_updated = true; 4590 } 4591 4592 memcpy(vnic->rss_hash_key, bp->rss_hash_key, 4593 HW_HASH_KEY_SIZE); 4594 4595 if (!bp->rss_hash_key_updated) 4596 continue; 4597 4598 bp->rss_hash_key_updated = false; 4599 bp->rss_hash_key_valid = true; 4600 4601 bp->toeplitz_prefix = 0; 4602 for (k = 0; k < 8; k++) { 4603 bp->toeplitz_prefix <<= 8; 4604 bp->toeplitz_prefix |= key[k]; 4605 } 4606 } else { 4607 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, 4608 HW_HASH_KEY_SIZE); 4609 } 4610 } 4611 } 4612 } 4613 4614 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4615 { 4616 int pages; 4617 4618 pages = ring_size / desc_per_pg; 4619 4620 if (!pages) 4621 return 1; 4622 4623 pages++; 4624 4625 while (pages & (pages - 1)) 4626 pages++; 4627 4628 return pages; 4629 } 4630 4631 void bnxt_set_tpa_flags(struct bnxt *bp) 4632 { 4633 bp->flags &= ~BNXT_FLAG_TPA; 4634 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4635 return; 4636 if (bp->dev->features & NETIF_F_LRO) 4637 bp->flags |= BNXT_FLAG_LRO; 4638 else if (bp->dev->features & NETIF_F_GRO_HW) 4639 bp->flags |= BNXT_FLAG_GRO; 4640 } 4641 4642 static void bnxt_init_ring_params(struct bnxt *bp) 4643 { 4644 unsigned int rx_size; 4645 4646 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK; 4647 /* Try to fit 4 chunks into a 4k page */ 4648 rx_size = SZ_1K - 4649 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4650 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size); 4651 } 4652 4653 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4654 * be set on entry. 4655 */ 4656 void bnxt_set_ring_params(struct bnxt *bp) 4657 { 4658 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4659 u32 agg_factor = 0, agg_ring_size = 0; 4660 4661 /* 8 for CRC and VLAN */ 4662 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4663 4664 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4665 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4666 4667 ring_size = bp->rx_ring_size; 4668 bp->rx_agg_ring_size = 0; 4669 bp->rx_agg_nr_pages = 0; 4670 4671 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS) 4672 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4673 4674 bp->flags &= ~BNXT_FLAG_JUMBO; 4675 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4676 u32 jumbo_factor; 4677 4678 bp->flags |= BNXT_FLAG_JUMBO; 4679 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4680 if (jumbo_factor > agg_factor) 4681 agg_factor = jumbo_factor; 4682 } 4683 if (agg_factor) { 4684 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4685 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4686 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4687 bp->rx_ring_size, ring_size); 4688 bp->rx_ring_size = ring_size; 4689 } 4690 agg_ring_size = ring_size * agg_factor; 4691 4692 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4693 RX_DESC_CNT); 4694 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4695 u32 tmp = agg_ring_size; 4696 4697 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4698 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4699 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4700 tmp, agg_ring_size); 4701 } 4702 bp->rx_agg_ring_size = agg_ring_size; 4703 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4704 4705 if (BNXT_RX_PAGE_MODE(bp)) { 4706 rx_space = PAGE_SIZE; 4707 rx_size = PAGE_SIZE - 4708 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4709 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4710 } else { 4711 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK, 4712 bp->rx_copybreak, 4713 bp->dev->cfg_pending->hds_thresh); 4714 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN); 4715 rx_space = rx_size + NET_SKB_PAD + 4716 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4717 } 4718 } 4719 4720 bp->rx_buf_use_size = rx_size; 4721 bp->rx_buf_size = rx_space; 4722 4723 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4724 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4725 4726 ring_size = bp->tx_ring_size; 4727 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4728 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4729 4730 max_rx_cmpl = bp->rx_ring_size; 4731 /* MAX TPA needs to be added because TPA_START completions are 4732 * immediately recycled, so the TPA completions are not bound by 4733 * the RX ring size. 4734 */ 4735 if (bp->flags & BNXT_FLAG_TPA) 4736 max_rx_cmpl += bp->max_tpa; 4737 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4738 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4739 bp->cp_ring_size = ring_size; 4740 4741 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4742 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4743 bp->cp_nr_pages = MAX_CP_PAGES; 4744 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4745 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4746 ring_size, bp->cp_ring_size); 4747 } 4748 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4749 bp->cp_ring_mask = bp->cp_bit - 1; 4750 } 4751 4752 /* Changing allocation mode of RX rings. 4753 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4754 */ 4755 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4756 { 4757 struct net_device *dev = bp->dev; 4758 4759 if (page_mode) { 4760 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS); 4761 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4762 4763 if (bp->xdp_prog->aux->xdp_has_frags) 4764 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4765 else 4766 dev->max_mtu = 4767 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4768 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4769 bp->flags |= BNXT_FLAG_JUMBO; 4770 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4771 } else { 4772 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4773 bp->rx_skb_func = bnxt_rx_page_skb; 4774 } 4775 bp->rx_dir = DMA_BIDIRECTIONAL; 4776 } else { 4777 dev->max_mtu = bp->max_mtu; 4778 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4779 bp->rx_dir = DMA_FROM_DEVICE; 4780 bp->rx_skb_func = bnxt_rx_skb; 4781 } 4782 } 4783 4784 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4785 { 4786 __bnxt_set_rx_skb_mode(bp, page_mode); 4787 4788 if (!page_mode) { 4789 int rx, tx; 4790 4791 bnxt_get_max_rings(bp, &rx, &tx, true); 4792 if (rx > 1) { 4793 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; 4794 bp->dev->hw_features |= NETIF_F_LRO; 4795 } 4796 } 4797 4798 /* Update LRO and GRO_HW availability */ 4799 netdev_update_features(bp->dev); 4800 } 4801 4802 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4803 { 4804 int i; 4805 struct bnxt_vnic_info *vnic; 4806 struct pci_dev *pdev = bp->pdev; 4807 4808 if (!bp->vnic_info) 4809 return; 4810 4811 for (i = 0; i < bp->nr_vnics; i++) { 4812 vnic = &bp->vnic_info[i]; 4813 4814 kfree(vnic->fw_grp_ids); 4815 vnic->fw_grp_ids = NULL; 4816 4817 kfree(vnic->uc_list); 4818 vnic->uc_list = NULL; 4819 4820 if (vnic->mc_list) { 4821 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4822 vnic->mc_list, vnic->mc_list_mapping); 4823 vnic->mc_list = NULL; 4824 } 4825 4826 if (vnic->rss_table) { 4827 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4828 vnic->rss_table, 4829 vnic->rss_table_dma_addr); 4830 vnic->rss_table = NULL; 4831 } 4832 4833 vnic->rss_hash_key = NULL; 4834 vnic->flags = 0; 4835 } 4836 } 4837 4838 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4839 { 4840 int i, rc = 0, size; 4841 struct bnxt_vnic_info *vnic; 4842 struct pci_dev *pdev = bp->pdev; 4843 int max_rings; 4844 4845 for (i = 0; i < bp->nr_vnics; i++) { 4846 vnic = &bp->vnic_info[i]; 4847 4848 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4849 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4850 4851 if (mem_size > 0) { 4852 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4853 if (!vnic->uc_list) { 4854 rc = -ENOMEM; 4855 goto out; 4856 } 4857 } 4858 } 4859 4860 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4861 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4862 vnic->mc_list = 4863 dma_alloc_coherent(&pdev->dev, 4864 vnic->mc_list_size, 4865 &vnic->mc_list_mapping, 4866 GFP_KERNEL); 4867 if (!vnic->mc_list) { 4868 rc = -ENOMEM; 4869 goto out; 4870 } 4871 } 4872 4873 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4874 goto vnic_skip_grps; 4875 4876 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4877 max_rings = bp->rx_nr_rings; 4878 else 4879 max_rings = 1; 4880 4881 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4882 if (!vnic->fw_grp_ids) { 4883 rc = -ENOMEM; 4884 goto out; 4885 } 4886 vnic_skip_grps: 4887 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 4888 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4889 continue; 4890 4891 /* Allocate rss table and hash key */ 4892 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4893 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4894 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4895 4896 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4897 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4898 vnic->rss_table_size, 4899 &vnic->rss_table_dma_addr, 4900 GFP_KERNEL); 4901 if (!vnic->rss_table) { 4902 rc = -ENOMEM; 4903 goto out; 4904 } 4905 4906 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4907 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4908 } 4909 return 0; 4910 4911 out: 4912 return rc; 4913 } 4914 4915 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4916 { 4917 struct bnxt_hwrm_wait_token *token; 4918 4919 dma_pool_destroy(bp->hwrm_dma_pool); 4920 bp->hwrm_dma_pool = NULL; 4921 4922 rcu_read_lock(); 4923 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4924 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4925 rcu_read_unlock(); 4926 } 4927 4928 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4929 { 4930 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4931 BNXT_HWRM_DMA_SIZE, 4932 BNXT_HWRM_DMA_ALIGN, 0); 4933 if (!bp->hwrm_dma_pool) 4934 return -ENOMEM; 4935 4936 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4937 4938 return 0; 4939 } 4940 4941 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 4942 { 4943 kfree(stats->hw_masks); 4944 stats->hw_masks = NULL; 4945 kfree(stats->sw_stats); 4946 stats->sw_stats = NULL; 4947 if (stats->hw_stats) { 4948 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 4949 stats->hw_stats_map); 4950 stats->hw_stats = NULL; 4951 } 4952 } 4953 4954 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 4955 bool alloc_masks) 4956 { 4957 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 4958 &stats->hw_stats_map, GFP_KERNEL); 4959 if (!stats->hw_stats) 4960 return -ENOMEM; 4961 4962 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 4963 if (!stats->sw_stats) 4964 goto stats_mem_err; 4965 4966 if (alloc_masks) { 4967 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 4968 if (!stats->hw_masks) 4969 goto stats_mem_err; 4970 } 4971 return 0; 4972 4973 stats_mem_err: 4974 bnxt_free_stats_mem(bp, stats); 4975 return -ENOMEM; 4976 } 4977 4978 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 4979 { 4980 int i; 4981 4982 for (i = 0; i < count; i++) 4983 mask_arr[i] = mask; 4984 } 4985 4986 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 4987 { 4988 int i; 4989 4990 for (i = 0; i < count; i++) 4991 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 4992 } 4993 4994 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 4995 struct bnxt_stats_mem *stats) 4996 { 4997 struct hwrm_func_qstats_ext_output *resp; 4998 struct hwrm_func_qstats_ext_input *req; 4999 __le64 *hw_masks; 5000 int rc; 5001 5002 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 5003 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5004 return -EOPNOTSUPP; 5005 5006 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 5007 if (rc) 5008 return rc; 5009 5010 req->fid = cpu_to_le16(0xffff); 5011 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 5012 5013 resp = hwrm_req_hold(bp, req); 5014 rc = hwrm_req_send(bp, req); 5015 if (!rc) { 5016 hw_masks = &resp->rx_ucast_pkts; 5017 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 5018 } 5019 hwrm_req_drop(bp, req); 5020 return rc; 5021 } 5022 5023 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 5024 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 5025 5026 static void bnxt_init_stats(struct bnxt *bp) 5027 { 5028 struct bnxt_napi *bnapi = bp->bnapi[0]; 5029 struct bnxt_cp_ring_info *cpr; 5030 struct bnxt_stats_mem *stats; 5031 __le64 *rx_stats, *tx_stats; 5032 int rc, rx_count, tx_count; 5033 u64 *rx_masks, *tx_masks; 5034 u64 mask; 5035 u8 flags; 5036 5037 cpr = &bnapi->cp_ring; 5038 stats = &cpr->stats; 5039 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 5040 if (rc) { 5041 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5042 mask = (1ULL << 48) - 1; 5043 else 5044 mask = -1ULL; 5045 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 5046 } 5047 if (bp->flags & BNXT_FLAG_PORT_STATS) { 5048 stats = &bp->port_stats; 5049 rx_stats = stats->hw_stats; 5050 rx_masks = stats->hw_masks; 5051 rx_count = sizeof(struct rx_port_stats) / 8; 5052 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5053 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5054 tx_count = sizeof(struct tx_port_stats) / 8; 5055 5056 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 5057 rc = bnxt_hwrm_port_qstats(bp, flags); 5058 if (rc) { 5059 mask = (1ULL << 40) - 1; 5060 5061 bnxt_fill_masks(rx_masks, mask, rx_count); 5062 bnxt_fill_masks(tx_masks, mask, tx_count); 5063 } else { 5064 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 5065 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 5066 bnxt_hwrm_port_qstats(bp, 0); 5067 } 5068 } 5069 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 5070 stats = &bp->rx_port_stats_ext; 5071 rx_stats = stats->hw_stats; 5072 rx_masks = stats->hw_masks; 5073 rx_count = sizeof(struct rx_port_stats_ext) / 8; 5074 stats = &bp->tx_port_stats_ext; 5075 tx_stats = stats->hw_stats; 5076 tx_masks = stats->hw_masks; 5077 tx_count = sizeof(struct tx_port_stats_ext) / 8; 5078 5079 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 5080 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 5081 if (rc) { 5082 mask = (1ULL << 40) - 1; 5083 5084 bnxt_fill_masks(rx_masks, mask, rx_count); 5085 if (tx_stats) 5086 bnxt_fill_masks(tx_masks, mask, tx_count); 5087 } else { 5088 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 5089 if (tx_stats) 5090 bnxt_copy_hw_masks(tx_masks, tx_stats, 5091 tx_count); 5092 bnxt_hwrm_port_qstats_ext(bp, 0); 5093 } 5094 } 5095 } 5096 5097 static void bnxt_free_port_stats(struct bnxt *bp) 5098 { 5099 bp->flags &= ~BNXT_FLAG_PORT_STATS; 5100 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 5101 5102 bnxt_free_stats_mem(bp, &bp->port_stats); 5103 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 5104 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 5105 } 5106 5107 static void bnxt_free_ring_stats(struct bnxt *bp) 5108 { 5109 int i; 5110 5111 if (!bp->bnapi) 5112 return; 5113 5114 for (i = 0; i < bp->cp_nr_rings; i++) { 5115 struct bnxt_napi *bnapi = bp->bnapi[i]; 5116 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5117 5118 bnxt_free_stats_mem(bp, &cpr->stats); 5119 5120 kfree(cpr->sw_stats); 5121 cpr->sw_stats = NULL; 5122 } 5123 } 5124 5125 static int bnxt_alloc_stats(struct bnxt *bp) 5126 { 5127 u32 size, i; 5128 int rc; 5129 5130 size = bp->hw_ring_stats_size; 5131 5132 for (i = 0; i < bp->cp_nr_rings; i++) { 5133 struct bnxt_napi *bnapi = bp->bnapi[i]; 5134 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5135 5136 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); 5137 if (!cpr->sw_stats) 5138 return -ENOMEM; 5139 5140 cpr->stats.len = size; 5141 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 5142 if (rc) 5143 return rc; 5144 5145 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 5146 } 5147 5148 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 5149 return 0; 5150 5151 if (bp->port_stats.hw_stats) 5152 goto alloc_ext_stats; 5153 5154 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 5155 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 5156 if (rc) 5157 return rc; 5158 5159 bp->flags |= BNXT_FLAG_PORT_STATS; 5160 5161 alloc_ext_stats: 5162 /* Display extended statistics only if FW supports it */ 5163 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 5164 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 5165 return 0; 5166 5167 if (bp->rx_port_stats_ext.hw_stats) 5168 goto alloc_tx_ext_stats; 5169 5170 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 5171 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 5172 /* Extended stats are optional */ 5173 if (rc) 5174 return 0; 5175 5176 alloc_tx_ext_stats: 5177 if (bp->tx_port_stats_ext.hw_stats) 5178 return 0; 5179 5180 if (bp->hwrm_spec_code >= 0x10902 || 5181 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 5182 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 5183 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 5184 /* Extended stats are optional */ 5185 if (rc) 5186 return 0; 5187 } 5188 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 5189 return 0; 5190 } 5191 5192 static void bnxt_clear_ring_indices(struct bnxt *bp) 5193 { 5194 int i, j; 5195 5196 if (!bp->bnapi) 5197 return; 5198 5199 for (i = 0; i < bp->cp_nr_rings; i++) { 5200 struct bnxt_napi *bnapi = bp->bnapi[i]; 5201 struct bnxt_cp_ring_info *cpr; 5202 struct bnxt_rx_ring_info *rxr; 5203 struct bnxt_tx_ring_info *txr; 5204 5205 if (!bnapi) 5206 continue; 5207 5208 cpr = &bnapi->cp_ring; 5209 cpr->cp_raw_cons = 0; 5210 5211 bnxt_for_each_napi_tx(j, bnapi, txr) { 5212 txr->tx_prod = 0; 5213 txr->tx_cons = 0; 5214 txr->tx_hw_cons = 0; 5215 } 5216 5217 rxr = bnapi->rx_ring; 5218 if (rxr) { 5219 rxr->rx_prod = 0; 5220 rxr->rx_agg_prod = 0; 5221 rxr->rx_sw_agg_prod = 0; 5222 rxr->rx_next_cons = 0; 5223 } 5224 bnapi->events = 0; 5225 } 5226 } 5227 5228 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5229 { 5230 u8 type = fltr->type, flags = fltr->flags; 5231 5232 INIT_LIST_HEAD(&fltr->list); 5233 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) || 5234 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING)) 5235 list_add_tail(&fltr->list, &bp->usr_fltr_list); 5236 } 5237 5238 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5239 { 5240 if (!list_empty(&fltr->list)) 5241 list_del_init(&fltr->list); 5242 } 5243 5244 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) 5245 { 5246 struct bnxt_filter_base *usr_fltr, *tmp; 5247 5248 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 5249 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) 5250 continue; 5251 bnxt_del_one_usr_fltr(bp, usr_fltr); 5252 } 5253 } 5254 5255 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5256 { 5257 hlist_del(&fltr->hash); 5258 bnxt_del_one_usr_fltr(bp, fltr); 5259 if (fltr->flags) { 5260 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 5261 bp->ntp_fltr_count--; 5262 } 5263 kfree(fltr); 5264 } 5265 5266 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) 5267 { 5268 int i; 5269 5270 netdev_assert_locked(bp->dev); 5271 5272 /* Under netdev instance lock and all our NAPIs have been disabled. 5273 * It's safe to delete the hash table. 5274 */ 5275 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 5276 struct hlist_head *head; 5277 struct hlist_node *tmp; 5278 struct bnxt_ntuple_filter *fltr; 5279 5280 head = &bp->ntp_fltr_hash_tbl[i]; 5281 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5282 bnxt_del_l2_filter(bp, fltr->l2_fltr); 5283 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5284 !list_empty(&fltr->base.list))) 5285 continue; 5286 bnxt_del_fltr(bp, &fltr->base); 5287 } 5288 } 5289 if (!all) 5290 return; 5291 5292 bitmap_free(bp->ntp_fltr_bmap); 5293 bp->ntp_fltr_bmap = NULL; 5294 bp->ntp_fltr_count = 0; 5295 } 5296 5297 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 5298 { 5299 int i, rc = 0; 5300 5301 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) 5302 return 0; 5303 5304 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 5305 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 5306 5307 bp->ntp_fltr_count = 0; 5308 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); 5309 5310 if (!bp->ntp_fltr_bmap) 5311 rc = -ENOMEM; 5312 5313 return rc; 5314 } 5315 5316 static void bnxt_free_l2_filters(struct bnxt *bp, bool all) 5317 { 5318 int i; 5319 5320 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { 5321 struct hlist_head *head; 5322 struct hlist_node *tmp; 5323 struct bnxt_l2_filter *fltr; 5324 5325 head = &bp->l2_fltr_hash_tbl[i]; 5326 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5327 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5328 !list_empty(&fltr->base.list))) 5329 continue; 5330 bnxt_del_fltr(bp, &fltr->base); 5331 } 5332 } 5333 } 5334 5335 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) 5336 { 5337 int i; 5338 5339 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) 5340 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); 5341 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); 5342 } 5343 5344 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 5345 { 5346 bnxt_free_vnic_attributes(bp); 5347 bnxt_free_tx_rings(bp); 5348 bnxt_free_rx_rings(bp); 5349 bnxt_free_cp_rings(bp); 5350 bnxt_free_all_cp_arrays(bp); 5351 bnxt_free_ntp_fltrs(bp, false); 5352 bnxt_free_l2_filters(bp, false); 5353 if (irq_re_init) { 5354 bnxt_free_ring_stats(bp); 5355 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 5356 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 5357 bnxt_free_port_stats(bp); 5358 bnxt_free_ring_grps(bp); 5359 bnxt_free_vnics(bp); 5360 kfree(bp->tx_ring_map); 5361 bp->tx_ring_map = NULL; 5362 kfree(bp->tx_ring); 5363 bp->tx_ring = NULL; 5364 kfree(bp->rx_ring); 5365 bp->rx_ring = NULL; 5366 kfree(bp->bnapi); 5367 bp->bnapi = NULL; 5368 } else { 5369 bnxt_clear_ring_indices(bp); 5370 } 5371 } 5372 5373 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 5374 { 5375 int i, j, rc, size, arr_size; 5376 void *bnapi; 5377 5378 if (irq_re_init) { 5379 /* Allocate bnapi mem pointer array and mem block for 5380 * all queues 5381 */ 5382 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 5383 bp->cp_nr_rings); 5384 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 5385 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 5386 if (!bnapi) 5387 return -ENOMEM; 5388 5389 bp->bnapi = bnapi; 5390 bnapi += arr_size; 5391 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 5392 bp->bnapi[i] = bnapi; 5393 bp->bnapi[i]->index = i; 5394 bp->bnapi[i]->bp = bp; 5395 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5396 struct bnxt_cp_ring_info *cpr = 5397 &bp->bnapi[i]->cp_ring; 5398 5399 cpr->cp_ring_struct.ring_mem.flags = 5400 BNXT_RMEM_RING_PTE_FLAG; 5401 } 5402 } 5403 5404 bp->rx_ring = kcalloc(bp->rx_nr_rings, 5405 sizeof(struct bnxt_rx_ring_info), 5406 GFP_KERNEL); 5407 if (!bp->rx_ring) 5408 return -ENOMEM; 5409 5410 for (i = 0; i < bp->rx_nr_rings; i++) { 5411 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5412 5413 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5414 rxr->rx_ring_struct.ring_mem.flags = 5415 BNXT_RMEM_RING_PTE_FLAG; 5416 rxr->rx_agg_ring_struct.ring_mem.flags = 5417 BNXT_RMEM_RING_PTE_FLAG; 5418 } else { 5419 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 5420 } 5421 rxr->bnapi = bp->bnapi[i]; 5422 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 5423 } 5424 5425 bp->tx_ring = kcalloc(bp->tx_nr_rings, 5426 sizeof(struct bnxt_tx_ring_info), 5427 GFP_KERNEL); 5428 if (!bp->tx_ring) 5429 return -ENOMEM; 5430 5431 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 5432 GFP_KERNEL); 5433 5434 if (!bp->tx_ring_map) 5435 return -ENOMEM; 5436 5437 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5438 j = 0; 5439 else 5440 j = bp->rx_nr_rings; 5441 5442 for (i = 0; i < bp->tx_nr_rings; i++) { 5443 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5444 struct bnxt_napi *bnapi2; 5445 5446 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5447 txr->tx_ring_struct.ring_mem.flags = 5448 BNXT_RMEM_RING_PTE_FLAG; 5449 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 5450 if (i >= bp->tx_nr_rings_xdp) { 5451 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 5452 5453 bnapi2 = bp->bnapi[k]; 5454 txr->txq_index = i - bp->tx_nr_rings_xdp; 5455 txr->tx_napi_idx = 5456 BNXT_RING_TO_TC(bp, txr->txq_index); 5457 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 5458 bnapi2->tx_int = bnxt_tx_int; 5459 } else { 5460 bnapi2 = bp->bnapi[j]; 5461 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 5462 bnapi2->tx_ring[0] = txr; 5463 bnapi2->tx_int = bnxt_tx_int_xdp; 5464 j++; 5465 } 5466 txr->bnapi = bnapi2; 5467 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5468 txr->tx_cpr = &bnapi2->cp_ring; 5469 } 5470 5471 rc = bnxt_alloc_stats(bp); 5472 if (rc) 5473 goto alloc_mem_err; 5474 bnxt_init_stats(bp); 5475 5476 rc = bnxt_alloc_ntp_fltrs(bp); 5477 if (rc) 5478 goto alloc_mem_err; 5479 5480 rc = bnxt_alloc_vnics(bp); 5481 if (rc) 5482 goto alloc_mem_err; 5483 } 5484 5485 rc = bnxt_alloc_all_cp_arrays(bp); 5486 if (rc) 5487 goto alloc_mem_err; 5488 5489 bnxt_init_ring_struct(bp); 5490 5491 rc = bnxt_alloc_rx_rings(bp); 5492 if (rc) 5493 goto alloc_mem_err; 5494 5495 rc = bnxt_alloc_tx_rings(bp); 5496 if (rc) 5497 goto alloc_mem_err; 5498 5499 rc = bnxt_alloc_cp_rings(bp); 5500 if (rc) 5501 goto alloc_mem_err; 5502 5503 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | 5504 BNXT_VNIC_MCAST_FLAG | 5505 BNXT_VNIC_UCAST_FLAG; 5506 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) 5507 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= 5508 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; 5509 5510 rc = bnxt_alloc_vnic_attributes(bp); 5511 if (rc) 5512 goto alloc_mem_err; 5513 return 0; 5514 5515 alloc_mem_err: 5516 bnxt_free_mem(bp, true); 5517 return rc; 5518 } 5519 5520 static void bnxt_disable_int(struct bnxt *bp) 5521 { 5522 int i; 5523 5524 if (!bp->bnapi) 5525 return; 5526 5527 for (i = 0; i < bp->cp_nr_rings; i++) { 5528 struct bnxt_napi *bnapi = bp->bnapi[i]; 5529 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5530 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5531 5532 if (ring->fw_ring_id != INVALID_HW_RING_ID) 5533 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5534 } 5535 } 5536 5537 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 5538 { 5539 struct bnxt_napi *bnapi = bp->bnapi[n]; 5540 struct bnxt_cp_ring_info *cpr; 5541 5542 cpr = &bnapi->cp_ring; 5543 return cpr->cp_ring_struct.map_idx; 5544 } 5545 5546 static void bnxt_disable_int_sync(struct bnxt *bp) 5547 { 5548 int i; 5549 5550 if (!bp->irq_tbl) 5551 return; 5552 5553 atomic_inc(&bp->intr_sem); 5554 5555 bnxt_disable_int(bp); 5556 for (i = 0; i < bp->cp_nr_rings; i++) { 5557 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 5558 5559 synchronize_irq(bp->irq_tbl[map_idx].vector); 5560 } 5561 } 5562 5563 static void bnxt_enable_int(struct bnxt *bp) 5564 { 5565 int i; 5566 5567 atomic_set(&bp->intr_sem, 0); 5568 for (i = 0; i < bp->cp_nr_rings; i++) { 5569 struct bnxt_napi *bnapi = bp->bnapi[i]; 5570 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5571 5572 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 5573 } 5574 } 5575 5576 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 5577 bool async_only) 5578 { 5579 DECLARE_BITMAP(async_events_bmap, 256); 5580 u32 *events = (u32 *)async_events_bmap; 5581 struct hwrm_func_drv_rgtr_output *resp; 5582 struct hwrm_func_drv_rgtr_input *req; 5583 u32 flags; 5584 int rc, i; 5585 5586 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 5587 if (rc) 5588 return rc; 5589 5590 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 5591 FUNC_DRV_RGTR_REQ_ENABLES_VER | 5592 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5593 5594 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 5595 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 5596 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 5597 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 5598 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 5599 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 5600 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 5601 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2) 5602 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT; 5603 req->flags = cpu_to_le32(flags); 5604 req->ver_maj_8b = DRV_VER_MAJ; 5605 req->ver_min_8b = DRV_VER_MIN; 5606 req->ver_upd_8b = DRV_VER_UPD; 5607 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 5608 req->ver_min = cpu_to_le16(DRV_VER_MIN); 5609 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 5610 5611 if (BNXT_PF(bp)) { 5612 u32 data[8]; 5613 int i; 5614 5615 memset(data, 0, sizeof(data)); 5616 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 5617 u16 cmd = bnxt_vf_req_snif[i]; 5618 unsigned int bit, idx; 5619 5620 idx = cmd / 32; 5621 bit = cmd % 32; 5622 data[idx] |= 1 << bit; 5623 } 5624 5625 for (i = 0; i < 8; i++) 5626 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 5627 5628 req->enables |= 5629 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 5630 } 5631 5632 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 5633 req->flags |= cpu_to_le32( 5634 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 5635 5636 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 5637 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 5638 u16 event_id = bnxt_async_events_arr[i]; 5639 5640 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 5641 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5642 continue; 5643 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 5644 !bp->ptp_cfg) 5645 continue; 5646 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 5647 } 5648 if (bmap && bmap_size) { 5649 for (i = 0; i < bmap_size; i++) { 5650 if (test_bit(i, bmap)) 5651 __set_bit(i, async_events_bmap); 5652 } 5653 } 5654 for (i = 0; i < 8; i++) 5655 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 5656 5657 if (async_only) 5658 req->enables = 5659 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5660 5661 resp = hwrm_req_hold(bp, req); 5662 rc = hwrm_req_send(bp, req); 5663 if (!rc) { 5664 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 5665 if (resp->flags & 5666 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 5667 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 5668 } 5669 hwrm_req_drop(bp, req); 5670 return rc; 5671 } 5672 5673 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 5674 { 5675 struct hwrm_func_drv_unrgtr_input *req; 5676 int rc; 5677 5678 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 5679 return 0; 5680 5681 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 5682 if (rc) 5683 return rc; 5684 return hwrm_req_send(bp, req); 5685 } 5686 5687 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); 5688 5689 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 5690 { 5691 struct hwrm_tunnel_dst_port_free_input *req; 5692 int rc; 5693 5694 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 5695 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 5696 return 0; 5697 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 5698 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 5699 return 0; 5700 5701 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 5702 if (rc) 5703 return rc; 5704 5705 req->tunnel_type = tunnel_type; 5706 5707 switch (tunnel_type) { 5708 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 5709 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 5710 bp->vxlan_port = 0; 5711 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 5712 break; 5713 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5714 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5715 bp->nge_port = 0; 5716 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5717 break; 5718 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: 5719 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); 5720 bp->vxlan_gpe_port = 0; 5721 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; 5722 break; 5723 default: 5724 break; 5725 } 5726 5727 rc = hwrm_req_send(bp, req); 5728 if (rc) 5729 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5730 rc); 5731 if (bp->flags & BNXT_FLAG_TPA) 5732 bnxt_set_tpa(bp, true); 5733 return rc; 5734 } 5735 5736 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5737 u8 tunnel_type) 5738 { 5739 struct hwrm_tunnel_dst_port_alloc_output *resp; 5740 struct hwrm_tunnel_dst_port_alloc_input *req; 5741 int rc; 5742 5743 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5744 if (rc) 5745 return rc; 5746 5747 req->tunnel_type = tunnel_type; 5748 req->tunnel_dst_port_val = port; 5749 5750 resp = hwrm_req_hold(bp, req); 5751 rc = hwrm_req_send(bp, req); 5752 if (rc) { 5753 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5754 rc); 5755 goto err_out; 5756 } 5757 5758 switch (tunnel_type) { 5759 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5760 bp->vxlan_port = port; 5761 bp->vxlan_fw_dst_port_id = 5762 le16_to_cpu(resp->tunnel_dst_port_id); 5763 break; 5764 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5765 bp->nge_port = port; 5766 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5767 break; 5768 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: 5769 bp->vxlan_gpe_port = port; 5770 bp->vxlan_gpe_fw_dst_port_id = 5771 le16_to_cpu(resp->tunnel_dst_port_id); 5772 break; 5773 default: 5774 break; 5775 } 5776 if (bp->flags & BNXT_FLAG_TPA) 5777 bnxt_set_tpa(bp, true); 5778 5779 err_out: 5780 hwrm_req_drop(bp, req); 5781 return rc; 5782 } 5783 5784 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5785 { 5786 struct hwrm_cfa_l2_set_rx_mask_input *req; 5787 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5788 int rc; 5789 5790 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5791 if (rc) 5792 return rc; 5793 5794 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5795 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5796 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5797 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5798 } 5799 req->mask = cpu_to_le32(vnic->rx_mask); 5800 return hwrm_req_send_silent(bp, req); 5801 } 5802 5803 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5804 { 5805 if (!atomic_dec_and_test(&fltr->refcnt)) 5806 return; 5807 spin_lock_bh(&bp->ntp_fltr_lock); 5808 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 5809 spin_unlock_bh(&bp->ntp_fltr_lock); 5810 return; 5811 } 5812 hlist_del_rcu(&fltr->base.hash); 5813 bnxt_del_one_usr_fltr(bp, &fltr->base); 5814 if (fltr->base.flags) { 5815 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 5816 bp->ntp_fltr_count--; 5817 } 5818 spin_unlock_bh(&bp->ntp_fltr_lock); 5819 kfree_rcu(fltr, base.rcu); 5820 } 5821 5822 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, 5823 struct bnxt_l2_key *key, 5824 u32 idx) 5825 { 5826 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; 5827 struct bnxt_l2_filter *fltr; 5828 5829 hlist_for_each_entry_rcu(fltr, head, base.hash) { 5830 struct bnxt_l2_key *l2_key = &fltr->l2_key; 5831 5832 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && 5833 l2_key->vlan == key->vlan) 5834 return fltr; 5835 } 5836 return NULL; 5837 } 5838 5839 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, 5840 struct bnxt_l2_key *key, 5841 u32 idx) 5842 { 5843 struct bnxt_l2_filter *fltr = NULL; 5844 5845 rcu_read_lock(); 5846 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5847 if (fltr) 5848 atomic_inc(&fltr->refcnt); 5849 rcu_read_unlock(); 5850 return fltr; 5851 } 5852 5853 #define BNXT_IPV4_4TUPLE(bp, fkeys) \ 5854 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5855 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ 5856 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5857 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) 5858 5859 #define BNXT_IPV6_4TUPLE(bp, fkeys) \ 5860 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5861 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ 5862 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5863 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) 5864 5865 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) 5866 { 5867 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5868 if (BNXT_IPV4_4TUPLE(bp, fkeys)) 5869 return sizeof(fkeys->addrs.v4addrs) + 5870 sizeof(fkeys->ports); 5871 5872 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 5873 return sizeof(fkeys->addrs.v4addrs); 5874 } 5875 5876 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 5877 if (BNXT_IPV6_4TUPLE(bp, fkeys)) 5878 return sizeof(fkeys->addrs.v6addrs) + 5879 sizeof(fkeys->ports); 5880 5881 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 5882 return sizeof(fkeys->addrs.v6addrs); 5883 } 5884 5885 return 0; 5886 } 5887 5888 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, 5889 const unsigned char *key) 5890 { 5891 u64 prefix = bp->toeplitz_prefix, hash = 0; 5892 struct bnxt_ipv4_tuple tuple4; 5893 struct bnxt_ipv6_tuple tuple6; 5894 int i, j, len = 0; 5895 u8 *four_tuple; 5896 5897 len = bnxt_get_rss_flow_tuple_len(bp, fkeys); 5898 if (!len) 5899 return 0; 5900 5901 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5902 tuple4.v4addrs = fkeys->addrs.v4addrs; 5903 tuple4.ports = fkeys->ports; 5904 four_tuple = (unsigned char *)&tuple4; 5905 } else { 5906 tuple6.v6addrs = fkeys->addrs.v6addrs; 5907 tuple6.ports = fkeys->ports; 5908 four_tuple = (unsigned char *)&tuple6; 5909 } 5910 5911 for (i = 0, j = 8; i < len; i++, j++) { 5912 u8 byte = four_tuple[i]; 5913 int bit; 5914 5915 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { 5916 if (byte & 0x80) 5917 hash ^= prefix; 5918 } 5919 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; 5920 } 5921 5922 /* The valid part of the hash is in the upper 32 bits. */ 5923 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; 5924 } 5925 5926 #ifdef CONFIG_RFS_ACCEL 5927 static struct bnxt_l2_filter * 5928 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) 5929 { 5930 struct bnxt_l2_filter *fltr; 5931 u32 idx; 5932 5933 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5934 BNXT_L2_FLTR_HASH_MASK; 5935 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5936 return fltr; 5937 } 5938 #endif 5939 5940 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, 5941 struct bnxt_l2_key *key, u32 idx) 5942 { 5943 struct hlist_head *head; 5944 5945 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); 5946 fltr->l2_key.vlan = key->vlan; 5947 fltr->base.type = BNXT_FLTR_TYPE_L2; 5948 if (fltr->base.flags) { 5949 int bit_id; 5950 5951 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5952 bp->max_fltr, 0); 5953 if (bit_id < 0) 5954 return -ENOMEM; 5955 fltr->base.sw_id = (u16)bit_id; 5956 bp->ntp_fltr_count++; 5957 } 5958 head = &bp->l2_fltr_hash_tbl[idx]; 5959 hlist_add_head_rcu(&fltr->base.hash, head); 5960 bnxt_insert_usr_fltr(bp, &fltr->base); 5961 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 5962 atomic_set(&fltr->refcnt, 1); 5963 return 0; 5964 } 5965 5966 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, 5967 struct bnxt_l2_key *key, 5968 gfp_t gfp) 5969 { 5970 struct bnxt_l2_filter *fltr; 5971 u32 idx; 5972 int rc; 5973 5974 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5975 BNXT_L2_FLTR_HASH_MASK; 5976 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5977 if (fltr) 5978 return fltr; 5979 5980 fltr = kzalloc(sizeof(*fltr), gfp); 5981 if (!fltr) 5982 return ERR_PTR(-ENOMEM); 5983 spin_lock_bh(&bp->ntp_fltr_lock); 5984 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 5985 spin_unlock_bh(&bp->ntp_fltr_lock); 5986 if (rc) { 5987 bnxt_del_l2_filter(bp, fltr); 5988 fltr = ERR_PTR(rc); 5989 } 5990 return fltr; 5991 } 5992 5993 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, 5994 struct bnxt_l2_key *key, 5995 u16 flags) 5996 { 5997 struct bnxt_l2_filter *fltr; 5998 u32 idx; 5999 int rc; 6000 6001 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 6002 BNXT_L2_FLTR_HASH_MASK; 6003 spin_lock_bh(&bp->ntp_fltr_lock); 6004 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 6005 if (fltr) { 6006 fltr = ERR_PTR(-EEXIST); 6007 goto l2_filter_exit; 6008 } 6009 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); 6010 if (!fltr) { 6011 fltr = ERR_PTR(-ENOMEM); 6012 goto l2_filter_exit; 6013 } 6014 fltr->base.flags = flags; 6015 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 6016 if (rc) { 6017 spin_unlock_bh(&bp->ntp_fltr_lock); 6018 bnxt_del_l2_filter(bp, fltr); 6019 return ERR_PTR(rc); 6020 } 6021 6022 l2_filter_exit: 6023 spin_unlock_bh(&bp->ntp_fltr_lock); 6024 return fltr; 6025 } 6026 6027 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) 6028 { 6029 #ifdef CONFIG_BNXT_SRIOV 6030 struct bnxt_vf_info *vf = &pf->vf[vf_idx]; 6031 6032 return vf->fw_fid; 6033 #else 6034 return INVALID_HW_RING_ID; 6035 #endif 6036 } 6037 6038 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) 6039 { 6040 struct hwrm_cfa_l2_filter_free_input *req; 6041 u16 target_id = 0xffff; 6042 int rc; 6043 6044 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 6045 struct bnxt_pf_info *pf = &bp->pf; 6046 6047 if (fltr->base.vf_idx >= pf->active_vfs) 6048 return -EINVAL; 6049 6050 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 6051 if (target_id == INVALID_HW_RING_ID) 6052 return -EINVAL; 6053 } 6054 6055 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 6056 if (rc) 6057 return rc; 6058 6059 req->target_id = cpu_to_le16(target_id); 6060 req->l2_filter_id = fltr->base.filter_id; 6061 return hwrm_req_send(bp, req); 6062 } 6063 6064 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) 6065 { 6066 struct hwrm_cfa_l2_filter_alloc_output *resp; 6067 struct hwrm_cfa_l2_filter_alloc_input *req; 6068 u16 target_id = 0xffff; 6069 int rc; 6070 6071 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 6072 struct bnxt_pf_info *pf = &bp->pf; 6073 6074 if (fltr->base.vf_idx >= pf->active_vfs) 6075 return -EINVAL; 6076 6077 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 6078 } 6079 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 6080 if (rc) 6081 return rc; 6082 6083 req->target_id = cpu_to_le16(target_id); 6084 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 6085 6086 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 6087 req->flags |= 6088 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 6089 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); 6090 req->enables = 6091 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 6092 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 6093 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 6094 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); 6095 eth_broadcast_addr(req->l2_addr_mask); 6096 6097 if (fltr->l2_key.vlan) { 6098 req->enables |= 6099 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | 6100 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | 6101 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); 6102 req->num_vlans = 1; 6103 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); 6104 req->l2_ivlan_mask = cpu_to_le16(0xfff); 6105 } 6106 6107 resp = hwrm_req_hold(bp, req); 6108 rc = hwrm_req_send(bp, req); 6109 if (!rc) { 6110 fltr->base.filter_id = resp->l2_filter_id; 6111 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 6112 } 6113 hwrm_req_drop(bp, req); 6114 return rc; 6115 } 6116 6117 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 6118 struct bnxt_ntuple_filter *fltr) 6119 { 6120 struct hwrm_cfa_ntuple_filter_free_input *req; 6121 int rc; 6122 6123 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); 6124 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 6125 if (rc) 6126 return rc; 6127 6128 req->ntuple_filter_id = fltr->base.filter_id; 6129 return hwrm_req_send(bp, req); 6130 } 6131 6132 #define BNXT_NTP_FLTR_FLAGS \ 6133 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 6134 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 6135 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 6136 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 6137 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 6138 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 6139 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 6140 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 6141 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 6142 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 6143 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 6144 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 6145 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 6146 6147 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 6148 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 6149 6150 void bnxt_fill_ipv6_mask(__be32 mask[4]) 6151 { 6152 int i; 6153 6154 for (i = 0; i < 4; i++) 6155 mask[i] = cpu_to_be32(~0); 6156 } 6157 6158 static void 6159 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, 6160 struct hwrm_cfa_ntuple_filter_alloc_input *req, 6161 struct bnxt_ntuple_filter *fltr) 6162 { 6163 u16 rxq = fltr->base.rxq; 6164 6165 if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 6166 struct ethtool_rxfh_context *ctx; 6167 struct bnxt_rss_ctx *rss_ctx; 6168 struct bnxt_vnic_info *vnic; 6169 6170 ctx = xa_load(&bp->dev->ethtool->rss_ctx, 6171 fltr->base.fw_vnic_id); 6172 if (ctx) { 6173 rss_ctx = ethtool_rxfh_context_priv(ctx); 6174 vnic = &rss_ctx->vnic; 6175 6176 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6177 } 6178 return; 6179 } 6180 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 6181 struct bnxt_vnic_info *vnic; 6182 u32 enables; 6183 6184 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 6185 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6186 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; 6187 req->enables |= cpu_to_le32(enables); 6188 req->rfs_ring_tbl_idx = cpu_to_le16(rxq); 6189 } else { 6190 u32 flags; 6191 6192 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 6193 req->flags |= cpu_to_le32(flags); 6194 req->dst_id = cpu_to_le16(rxq); 6195 } 6196 } 6197 6198 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 6199 struct bnxt_ntuple_filter *fltr) 6200 { 6201 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 6202 struct hwrm_cfa_ntuple_filter_alloc_input *req; 6203 struct bnxt_flow_masks *masks = &fltr->fmasks; 6204 struct flow_keys *keys = &fltr->fkeys; 6205 struct bnxt_l2_filter *l2_fltr; 6206 struct bnxt_vnic_info *vnic; 6207 int rc; 6208 6209 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 6210 if (rc) 6211 return rc; 6212 6213 l2_fltr = fltr->l2_fltr; 6214 req->l2_filter_id = l2_fltr->base.filter_id; 6215 6216 if (fltr->base.flags & BNXT_ACT_DROP) { 6217 req->flags = 6218 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); 6219 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 6220 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); 6221 } else { 6222 vnic = &bp->vnic_info[fltr->base.rxq + 1]; 6223 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6224 } 6225 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 6226 6227 req->ethertype = htons(ETH_P_IP); 6228 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 6229 req->ip_protocol = keys->basic.ip_proto; 6230 6231 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 6232 req->ethertype = htons(ETH_P_IPV6); 6233 req->ip_addr_type = 6234 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 6235 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; 6236 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; 6237 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; 6238 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; 6239 } else { 6240 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 6241 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; 6242 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 6243 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; 6244 } 6245 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 6246 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 6247 req->tunnel_type = 6248 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 6249 } 6250 6251 req->src_port = keys->ports.src; 6252 req->src_port_mask = masks->ports.src; 6253 req->dst_port = keys->ports.dst; 6254 req->dst_port_mask = masks->ports.dst; 6255 6256 resp = hwrm_req_hold(bp, req); 6257 rc = hwrm_req_send(bp, req); 6258 if (!rc) 6259 fltr->base.filter_id = resp->ntuple_filter_id; 6260 hwrm_req_drop(bp, req); 6261 return rc; 6262 } 6263 6264 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 6265 const u8 *mac_addr) 6266 { 6267 struct bnxt_l2_filter *fltr; 6268 struct bnxt_l2_key key; 6269 int rc; 6270 6271 ether_addr_copy(key.dst_mac_addr, mac_addr); 6272 key.vlan = 0; 6273 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); 6274 if (IS_ERR(fltr)) 6275 return PTR_ERR(fltr); 6276 6277 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; 6278 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 6279 if (rc) 6280 bnxt_del_l2_filter(bp, fltr); 6281 else 6282 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; 6283 return rc; 6284 } 6285 6286 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 6287 { 6288 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 6289 6290 /* Any associated ntuple filters will also be cleared by firmware. */ 6291 for (i = 0; i < num_of_vnics; i++) { 6292 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6293 6294 for (j = 0; j < vnic->uc_filter_count; j++) { 6295 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; 6296 6297 bnxt_hwrm_l2_filter_free(bp, fltr); 6298 bnxt_del_l2_filter(bp, fltr); 6299 } 6300 vnic->uc_filter_count = 0; 6301 } 6302 } 6303 6304 #define BNXT_DFLT_TUNL_TPA_BMAP \ 6305 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ 6306 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ 6307 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) 6308 6309 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, 6310 struct hwrm_vnic_tpa_cfg_input *req) 6311 { 6312 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; 6313 6314 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) 6315 return; 6316 6317 if (bp->vxlan_port) 6318 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; 6319 if (bp->vxlan_gpe_port) 6320 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; 6321 if (bp->nge_port) 6322 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; 6323 6324 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); 6325 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); 6326 } 6327 6328 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6329 u32 tpa_flags) 6330 { 6331 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 6332 struct hwrm_vnic_tpa_cfg_input *req; 6333 int rc; 6334 6335 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 6336 return 0; 6337 6338 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 6339 if (rc) 6340 return rc; 6341 6342 if (tpa_flags) { 6343 u16 mss = bp->dev->mtu - 40; 6344 u32 nsegs, n, segs = 0, flags; 6345 6346 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 6347 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 6348 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 6349 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 6350 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 6351 if (tpa_flags & BNXT_FLAG_GRO) 6352 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 6353 6354 req->flags = cpu_to_le32(flags); 6355 6356 req->enables = 6357 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 6358 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 6359 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 6360 6361 /* Number of segs are log2 units, and first packet is not 6362 * included as part of this units. 6363 */ 6364 if (mss <= BNXT_RX_PAGE_SIZE) { 6365 n = BNXT_RX_PAGE_SIZE / mss; 6366 nsegs = (MAX_SKB_FRAGS - 1) * n; 6367 } else { 6368 n = mss / BNXT_RX_PAGE_SIZE; 6369 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 6370 n++; 6371 nsegs = (MAX_SKB_FRAGS - n) / n; 6372 } 6373 6374 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6375 segs = MAX_TPA_SEGS_P5; 6376 max_aggs = bp->max_tpa; 6377 } else { 6378 segs = ilog2(nsegs); 6379 } 6380 req->max_agg_segs = cpu_to_le16(segs); 6381 req->max_aggs = cpu_to_le16(max_aggs); 6382 6383 req->min_agg_len = cpu_to_le32(512); 6384 bnxt_hwrm_vnic_update_tunl_tpa(bp, req); 6385 } 6386 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6387 6388 return hwrm_req_send(bp, req); 6389 } 6390 6391 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 6392 { 6393 struct bnxt_ring_grp_info *grp_info; 6394 6395 grp_info = &bp->grp_info[ring->grp_idx]; 6396 return grp_info->cp_fw_ring_id; 6397 } 6398 6399 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 6400 { 6401 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6402 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 6403 else 6404 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 6405 } 6406 6407 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 6408 { 6409 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6410 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 6411 else 6412 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 6413 } 6414 6415 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 6416 { 6417 int entries; 6418 6419 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6420 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 6421 else 6422 entries = HW_HASH_INDEX_SIZE; 6423 6424 bp->rss_indir_tbl_entries = entries; 6425 bp->rss_indir_tbl = 6426 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); 6427 if (!bp->rss_indir_tbl) 6428 return -ENOMEM; 6429 6430 return 0; 6431 } 6432 6433 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, 6434 struct ethtool_rxfh_context *rss_ctx) 6435 { 6436 u16 max_rings, max_entries, pad, i; 6437 u32 *rss_indir_tbl; 6438 6439 if (!bp->rx_nr_rings) 6440 return; 6441 6442 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6443 max_rings = bp->rx_nr_rings - 1; 6444 else 6445 max_rings = bp->rx_nr_rings; 6446 6447 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 6448 if (rss_ctx) 6449 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); 6450 else 6451 rss_indir_tbl = &bp->rss_indir_tbl[0]; 6452 6453 for (i = 0; i < max_entries; i++) 6454 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 6455 6456 pad = bp->rss_indir_tbl_entries - max_entries; 6457 if (pad) 6458 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); 6459 } 6460 6461 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 6462 { 6463 u32 i, tbl_size, max_ring = 0; 6464 6465 if (!bp->rss_indir_tbl) 6466 return 0; 6467 6468 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6469 for (i = 0; i < tbl_size; i++) 6470 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 6471 return max_ring; 6472 } 6473 6474 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 6475 { 6476 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6477 if (!rx_rings) 6478 return 0; 6479 return bnxt_calc_nr_ring_pages(rx_rings - 1, 6480 BNXT_RSS_TABLE_ENTRIES_P5); 6481 } 6482 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6483 return 2; 6484 return 1; 6485 } 6486 6487 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6488 { 6489 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 6490 u16 i, j; 6491 6492 /* Fill the RSS indirection table with ring group ids */ 6493 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 6494 if (!no_rss) 6495 j = bp->rss_indir_tbl[i]; 6496 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 6497 } 6498 } 6499 6500 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 6501 struct bnxt_vnic_info *vnic) 6502 { 6503 __le16 *ring_tbl = vnic->rss_table; 6504 struct bnxt_rx_ring_info *rxr; 6505 u16 tbl_size, i; 6506 6507 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6508 6509 for (i = 0; i < tbl_size; i++) { 6510 u16 ring_id, j; 6511 6512 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) 6513 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); 6514 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) 6515 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; 6516 else 6517 j = bp->rss_indir_tbl[i]; 6518 rxr = &bp->rx_ring[j]; 6519 6520 ring_id = rxr->rx_ring_struct.fw_ring_id; 6521 *ring_tbl++ = cpu_to_le16(ring_id); 6522 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6523 *ring_tbl++ = cpu_to_le16(ring_id); 6524 } 6525 } 6526 6527 static void 6528 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 6529 struct bnxt_vnic_info *vnic) 6530 { 6531 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6532 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 6533 if (bp->flags & BNXT_FLAG_CHIP_P7) 6534 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; 6535 } else { 6536 bnxt_fill_hw_rss_tbl(bp, vnic); 6537 } 6538 6539 if (bp->rss_hash_delta) { 6540 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 6541 if (bp->rss_hash_cfg & bp->rss_hash_delta) 6542 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 6543 else 6544 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 6545 } else { 6546 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 6547 } 6548 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 6549 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 6550 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 6551 } 6552 6553 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6554 bool set_rss) 6555 { 6556 struct hwrm_vnic_rss_cfg_input *req; 6557 int rc; 6558 6559 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 6560 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 6561 return 0; 6562 6563 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6564 if (rc) 6565 return rc; 6566 6567 if (set_rss) 6568 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6569 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6570 return hwrm_req_send(bp, req); 6571 } 6572 6573 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, 6574 struct bnxt_vnic_info *vnic, bool set_rss) 6575 { 6576 struct hwrm_vnic_rss_cfg_input *req; 6577 dma_addr_t ring_tbl_map; 6578 u32 i, nr_ctxs; 6579 int rc; 6580 6581 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6582 if (rc) 6583 return rc; 6584 6585 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6586 if (!set_rss) 6587 return hwrm_req_send(bp, req); 6588 6589 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6590 ring_tbl_map = vnic->rss_table_dma_addr; 6591 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 6592 6593 hwrm_req_hold(bp, req); 6594 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 6595 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 6596 req->ring_table_pair_index = i; 6597 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 6598 rc = hwrm_req_send(bp, req); 6599 if (rc) 6600 goto exit; 6601 } 6602 6603 exit: 6604 hwrm_req_drop(bp, req); 6605 return rc; 6606 } 6607 6608 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 6609 { 6610 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6611 struct hwrm_vnic_rss_qcfg_output *resp; 6612 struct hwrm_vnic_rss_qcfg_input *req; 6613 6614 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 6615 return; 6616 6617 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6618 /* all contexts configured to same hash_type, zero always exists */ 6619 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6620 resp = hwrm_req_hold(bp, req); 6621 if (!hwrm_req_send(bp, req)) { 6622 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 6623 bp->rss_hash_delta = 0; 6624 } 6625 hwrm_req_drop(bp, req); 6626 } 6627 6628 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6629 { 6630 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh; 6631 struct hwrm_vnic_plcmodes_cfg_input *req; 6632 int rc; 6633 6634 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 6635 if (rc) 6636 return rc; 6637 6638 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 6639 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 6640 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 6641 6642 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 6643 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 6644 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 6645 req->enables |= 6646 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 6647 req->hds_threshold = cpu_to_le16(hds_thresh); 6648 } 6649 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6650 return hwrm_req_send(bp, req); 6651 } 6652 6653 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, 6654 struct bnxt_vnic_info *vnic, 6655 u16 ctx_idx) 6656 { 6657 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 6658 6659 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 6660 return; 6661 6662 req->rss_cos_lb_ctx_id = 6663 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); 6664 6665 hwrm_req_send(bp, req); 6666 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 6667 } 6668 6669 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 6670 { 6671 int i, j; 6672 6673 for (i = 0; i < bp->nr_vnics; i++) { 6674 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6675 6676 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 6677 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 6678 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); 6679 } 6680 } 6681 bp->rsscos_nr_ctxs = 0; 6682 } 6683 6684 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, 6685 struct bnxt_vnic_info *vnic, u16 ctx_idx) 6686 { 6687 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 6688 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 6689 int rc; 6690 6691 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 6692 if (rc) 6693 return rc; 6694 6695 resp = hwrm_req_hold(bp, req); 6696 rc = hwrm_req_send(bp, req); 6697 if (!rc) 6698 vnic->fw_rss_cos_lb_ctx[ctx_idx] = 6699 le16_to_cpu(resp->rss_cos_lb_ctx_id); 6700 hwrm_req_drop(bp, req); 6701 6702 return rc; 6703 } 6704 6705 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 6706 { 6707 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 6708 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 6709 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 6710 } 6711 6712 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6713 { 6714 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6715 struct hwrm_vnic_cfg_input *req; 6716 unsigned int ring = 0, grp_idx; 6717 u16 def_vlan = 0; 6718 int rc; 6719 6720 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 6721 if (rc) 6722 return rc; 6723 6724 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6725 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 6726 6727 req->default_rx_ring_id = 6728 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 6729 req->default_cmpl_ring_id = 6730 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 6731 req->enables = 6732 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 6733 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 6734 goto vnic_mru; 6735 } 6736 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 6737 /* Only RSS support for now TBD: COS & LB */ 6738 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 6739 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6740 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6741 VNIC_CFG_REQ_ENABLES_MRU); 6742 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 6743 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); 6744 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6745 VNIC_CFG_REQ_ENABLES_MRU); 6746 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 6747 } else { 6748 req->rss_rule = cpu_to_le16(0xffff); 6749 } 6750 6751 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 6752 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 6753 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 6754 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 6755 } else { 6756 req->cos_rule = cpu_to_le16(0xffff); 6757 } 6758 6759 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 6760 ring = 0; 6761 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 6762 ring = vnic->vnic_id - 1; 6763 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 6764 ring = bp->rx_nr_rings - 1; 6765 6766 grp_idx = bp->rx_ring[ring].bnapi->index; 6767 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 6768 req->lb_rule = cpu_to_le16(0xffff); 6769 vnic_mru: 6770 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 6771 req->mru = cpu_to_le16(vnic->mru); 6772 6773 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6774 #ifdef CONFIG_BNXT_SRIOV 6775 if (BNXT_VF(bp)) 6776 def_vlan = bp->vf.vlan; 6777 #endif 6778 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 6779 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 6780 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) 6781 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 6782 6783 return hwrm_req_send(bp, req); 6784 } 6785 6786 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, 6787 struct bnxt_vnic_info *vnic) 6788 { 6789 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { 6790 struct hwrm_vnic_free_input *req; 6791 6792 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 6793 return; 6794 6795 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6796 6797 hwrm_req_send(bp, req); 6798 vnic->fw_vnic_id = INVALID_HW_RING_ID; 6799 } 6800 } 6801 6802 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 6803 { 6804 u16 i; 6805 6806 for (i = 0; i < bp->nr_vnics; i++) 6807 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); 6808 } 6809 6810 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6811 unsigned int start_rx_ring_idx, 6812 unsigned int nr_rings) 6813 { 6814 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 6815 struct hwrm_vnic_alloc_output *resp; 6816 struct hwrm_vnic_alloc_input *req; 6817 int rc; 6818 6819 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 6820 if (rc) 6821 return rc; 6822 6823 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6824 goto vnic_no_ring_grps; 6825 6826 /* map ring groups to this vnic */ 6827 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 6828 grp_idx = bp->rx_ring[i].bnapi->index; 6829 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 6830 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 6831 j, nr_rings); 6832 break; 6833 } 6834 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 6835 } 6836 6837 vnic_no_ring_grps: 6838 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 6839 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 6840 if (vnic->vnic_id == BNXT_VNIC_DEFAULT) 6841 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 6842 6843 resp = hwrm_req_hold(bp, req); 6844 rc = hwrm_req_send(bp, req); 6845 if (!rc) 6846 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 6847 hwrm_req_drop(bp, req); 6848 return rc; 6849 } 6850 6851 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 6852 { 6853 struct hwrm_vnic_qcaps_output *resp; 6854 struct hwrm_vnic_qcaps_input *req; 6855 int rc; 6856 6857 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 6858 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; 6859 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; 6860 if (bp->hwrm_spec_code < 0x10600) 6861 return 0; 6862 6863 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 6864 if (rc) 6865 return rc; 6866 6867 resp = hwrm_req_hold(bp, req); 6868 rc = hwrm_req_send(bp, req); 6869 if (!rc) { 6870 u32 flags = le32_to_cpu(resp->flags); 6871 6872 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 6873 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 6874 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; 6875 if (flags & 6876 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 6877 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 6878 6879 /* Older P5 fw before EXT_HW_STATS support did not set 6880 * VLAN_STRIP_CAP properly. 6881 */ 6882 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 6883 (BNXT_CHIP_P5(bp) && 6884 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 6885 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 6886 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 6887 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; 6888 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) 6889 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; 6890 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 6891 if (bp->max_tpa_v2) { 6892 if (BNXT_CHIP_P5(bp)) 6893 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 6894 else 6895 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; 6896 } 6897 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) 6898 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; 6899 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) 6900 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; 6901 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) 6902 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; 6903 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) 6904 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; 6905 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) 6906 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; 6907 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP) 6908 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH; 6909 } 6910 hwrm_req_drop(bp, req); 6911 return rc; 6912 } 6913 6914 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 6915 { 6916 struct hwrm_ring_grp_alloc_output *resp; 6917 struct hwrm_ring_grp_alloc_input *req; 6918 int rc; 6919 u16 i; 6920 6921 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6922 return 0; 6923 6924 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 6925 if (rc) 6926 return rc; 6927 6928 resp = hwrm_req_hold(bp, req); 6929 for (i = 0; i < bp->rx_nr_rings; i++) { 6930 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 6931 6932 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 6933 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 6934 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 6935 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 6936 6937 rc = hwrm_req_send(bp, req); 6938 6939 if (rc) 6940 break; 6941 6942 bp->grp_info[grp_idx].fw_grp_id = 6943 le32_to_cpu(resp->ring_group_id); 6944 } 6945 hwrm_req_drop(bp, req); 6946 return rc; 6947 } 6948 6949 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 6950 { 6951 struct hwrm_ring_grp_free_input *req; 6952 u16 i; 6953 6954 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6955 return; 6956 6957 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 6958 return; 6959 6960 hwrm_req_hold(bp, req); 6961 for (i = 0; i < bp->cp_nr_rings; i++) { 6962 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 6963 continue; 6964 req->ring_group_id = 6965 cpu_to_le32(bp->grp_info[i].fw_grp_id); 6966 6967 hwrm_req_send(bp, req); 6968 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 6969 } 6970 hwrm_req_drop(bp, req); 6971 } 6972 6973 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, 6974 struct hwrm_ring_alloc_input *req, 6975 struct bnxt_ring_struct *ring) 6976 { 6977 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx]; 6978 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID | 6979 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; 6980 6981 if (ring_type == HWRM_RING_ALLOC_AGG) { 6982 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 6983 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 6984 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 6985 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID; 6986 } else { 6987 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 6988 if (NET_IP_ALIGN == 2) 6989 req->flags = 6990 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD); 6991 } 6992 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6993 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 6994 req->enables |= cpu_to_le32(enables); 6995 } 6996 6997 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 6998 struct bnxt_ring_struct *ring, 6999 u32 ring_type, u32 map_index) 7000 { 7001 struct hwrm_ring_alloc_output *resp; 7002 struct hwrm_ring_alloc_input *req; 7003 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 7004 struct bnxt_ring_grp_info *grp_info; 7005 int rc, err = 0; 7006 u16 ring_id; 7007 7008 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 7009 if (rc) 7010 goto exit; 7011 7012 req->enables = 0; 7013 if (rmem->nr_pages > 1) { 7014 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 7015 /* Page size is in log2 units */ 7016 req->page_size = BNXT_PAGE_SHIFT; 7017 req->page_tbl_depth = 1; 7018 } else { 7019 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 7020 } 7021 req->fbo = 0; 7022 /* Association of ring index with doorbell index and MSIX number */ 7023 req->logical_id = cpu_to_le16(map_index); 7024 7025 switch (ring_type) { 7026 case HWRM_RING_ALLOC_TX: { 7027 struct bnxt_tx_ring_info *txr; 7028 u16 flags = 0; 7029 7030 txr = container_of(ring, struct bnxt_tx_ring_info, 7031 tx_ring_struct); 7032 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 7033 /* Association of transmit ring with completion ring */ 7034 grp_info = &bp->grp_info[ring->grp_idx]; 7035 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 7036 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 7037 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 7038 req->queue_id = cpu_to_le16(ring->queue_id); 7039 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) 7040 req->cmpl_coal_cnt = 7041 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; 7042 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) 7043 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; 7044 req->flags = cpu_to_le16(flags); 7045 break; 7046 } 7047 case HWRM_RING_ALLOC_RX: 7048 case HWRM_RING_ALLOC_AGG: 7049 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 7050 req->length = (ring_type == HWRM_RING_ALLOC_RX) ? 7051 cpu_to_le32(bp->rx_ring_mask + 1) : 7052 cpu_to_le32(bp->rx_agg_ring_mask + 1); 7053 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7054 bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); 7055 break; 7056 case HWRM_RING_ALLOC_CMPL: 7057 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 7058 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 7059 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7060 /* Association of cp ring with nq */ 7061 grp_info = &bp->grp_info[map_index]; 7062 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 7063 req->cq_handle = cpu_to_le64(ring->handle); 7064 req->enables |= cpu_to_le32( 7065 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 7066 } else { 7067 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 7068 } 7069 break; 7070 case HWRM_RING_ALLOC_NQ: 7071 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 7072 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 7073 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 7074 break; 7075 default: 7076 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 7077 ring_type); 7078 return -1; 7079 } 7080 7081 resp = hwrm_req_hold(bp, req); 7082 rc = hwrm_req_send(bp, req); 7083 err = le16_to_cpu(resp->error_code); 7084 ring_id = le16_to_cpu(resp->ring_id); 7085 hwrm_req_drop(bp, req); 7086 7087 exit: 7088 if (rc || err) { 7089 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 7090 ring_type, rc, err); 7091 return -EIO; 7092 } 7093 ring->fw_ring_id = ring_id; 7094 return rc; 7095 } 7096 7097 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 7098 { 7099 int rc; 7100 7101 if (BNXT_PF(bp)) { 7102 struct hwrm_func_cfg_input *req; 7103 7104 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 7105 if (rc) 7106 return rc; 7107 7108 req->fid = cpu_to_le16(0xffff); 7109 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 7110 req->async_event_cr = cpu_to_le16(idx); 7111 return hwrm_req_send(bp, req); 7112 } else { 7113 struct hwrm_func_vf_cfg_input *req; 7114 7115 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 7116 if (rc) 7117 return rc; 7118 7119 req->enables = 7120 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 7121 req->async_event_cr = cpu_to_le16(idx); 7122 return hwrm_req_send(bp, req); 7123 } 7124 } 7125 7126 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 7127 u32 ring_type) 7128 { 7129 switch (ring_type) { 7130 case HWRM_RING_ALLOC_TX: 7131 db->db_ring_mask = bp->tx_ring_mask; 7132 break; 7133 case HWRM_RING_ALLOC_RX: 7134 db->db_ring_mask = bp->rx_ring_mask; 7135 break; 7136 case HWRM_RING_ALLOC_AGG: 7137 db->db_ring_mask = bp->rx_agg_ring_mask; 7138 break; 7139 case HWRM_RING_ALLOC_CMPL: 7140 case HWRM_RING_ALLOC_NQ: 7141 db->db_ring_mask = bp->cp_ring_mask; 7142 break; 7143 } 7144 if (bp->flags & BNXT_FLAG_CHIP_P7) { 7145 db->db_epoch_mask = db->db_ring_mask + 1; 7146 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); 7147 } 7148 } 7149 7150 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 7151 u32 map_idx, u32 xid) 7152 { 7153 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7154 switch (ring_type) { 7155 case HWRM_RING_ALLOC_TX: 7156 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 7157 break; 7158 case HWRM_RING_ALLOC_RX: 7159 case HWRM_RING_ALLOC_AGG: 7160 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 7161 break; 7162 case HWRM_RING_ALLOC_CMPL: 7163 db->db_key64 = DBR_PATH_L2; 7164 break; 7165 case HWRM_RING_ALLOC_NQ: 7166 db->db_key64 = DBR_PATH_L2; 7167 break; 7168 } 7169 db->db_key64 |= (u64)xid << DBR_XID_SFT; 7170 7171 if (bp->flags & BNXT_FLAG_CHIP_P7) 7172 db->db_key64 |= DBR_VALID; 7173 7174 db->doorbell = bp->bar1 + bp->db_offset; 7175 } else { 7176 db->doorbell = bp->bar1 + map_idx * 0x80; 7177 switch (ring_type) { 7178 case HWRM_RING_ALLOC_TX: 7179 db->db_key32 = DB_KEY_TX; 7180 break; 7181 case HWRM_RING_ALLOC_RX: 7182 case HWRM_RING_ALLOC_AGG: 7183 db->db_key32 = DB_KEY_RX; 7184 break; 7185 case HWRM_RING_ALLOC_CMPL: 7186 db->db_key32 = DB_KEY_CP; 7187 break; 7188 } 7189 } 7190 bnxt_set_db_mask(bp, db, ring_type); 7191 } 7192 7193 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, 7194 struct bnxt_rx_ring_info *rxr) 7195 { 7196 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7197 struct bnxt_napi *bnapi = rxr->bnapi; 7198 u32 type = HWRM_RING_ALLOC_RX; 7199 u32 map_idx = bnapi->index; 7200 int rc; 7201 7202 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7203 if (rc) 7204 return rc; 7205 7206 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 7207 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 7208 7209 return 0; 7210 } 7211 7212 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, 7213 struct bnxt_rx_ring_info *rxr) 7214 { 7215 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7216 u32 type = HWRM_RING_ALLOC_AGG; 7217 u32 grp_idx = ring->grp_idx; 7218 u32 map_idx; 7219 int rc; 7220 7221 map_idx = grp_idx + bp->rx_nr_rings; 7222 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7223 if (rc) 7224 return rc; 7225 7226 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 7227 ring->fw_ring_id); 7228 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 7229 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7230 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 7231 7232 return 0; 7233 } 7234 7235 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, 7236 struct bnxt_cp_ring_info *cpr) 7237 { 7238 const u32 type = HWRM_RING_ALLOC_CMPL; 7239 struct bnxt_napi *bnapi = cpr->bnapi; 7240 struct bnxt_ring_struct *ring; 7241 u32 map_idx = bnapi->index; 7242 int rc; 7243 7244 ring = &cpr->cp_ring_struct; 7245 ring->handle = BNXT_SET_NQ_HDL(cpr); 7246 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7247 if (rc) 7248 return rc; 7249 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 7250 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 7251 return 0; 7252 } 7253 7254 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, 7255 struct bnxt_tx_ring_info *txr, u32 tx_idx) 7256 { 7257 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7258 const u32 type = HWRM_RING_ALLOC_TX; 7259 int rc; 7260 7261 rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx); 7262 if (rc) 7263 return rc; 7264 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id); 7265 return 0; 7266 } 7267 7268 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 7269 { 7270 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 7271 int i, rc = 0; 7272 u32 type; 7273 7274 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7275 type = HWRM_RING_ALLOC_NQ; 7276 else 7277 type = HWRM_RING_ALLOC_CMPL; 7278 for (i = 0; i < bp->cp_nr_rings; i++) { 7279 struct bnxt_napi *bnapi = bp->bnapi[i]; 7280 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7281 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 7282 u32 map_idx = ring->map_idx; 7283 unsigned int vector; 7284 7285 vector = bp->irq_tbl[map_idx].vector; 7286 disable_irq_nosync(vector); 7287 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7288 if (rc) { 7289 enable_irq(vector); 7290 goto err_out; 7291 } 7292 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 7293 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 7294 enable_irq(vector); 7295 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 7296 7297 if (!i) { 7298 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 7299 if (rc) 7300 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 7301 } 7302 } 7303 7304 for (i = 0; i < bp->tx_nr_rings; i++) { 7305 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 7306 7307 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7308 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); 7309 if (rc) 7310 goto err_out; 7311 } 7312 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i); 7313 if (rc) 7314 goto err_out; 7315 } 7316 7317 for (i = 0; i < bp->rx_nr_rings; i++) { 7318 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7319 7320 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 7321 if (rc) 7322 goto err_out; 7323 /* If we have agg rings, post agg buffers first. */ 7324 if (!agg_rings) 7325 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7326 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7327 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); 7328 if (rc) 7329 goto err_out; 7330 } 7331 } 7332 7333 if (agg_rings) { 7334 for (i = 0; i < bp->rx_nr_rings; i++) { 7335 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); 7336 if (rc) 7337 goto err_out; 7338 } 7339 } 7340 err_out: 7341 return rc; 7342 } 7343 7344 static void bnxt_cancel_dim(struct bnxt *bp) 7345 { 7346 int i; 7347 7348 /* DIM work is initialized in bnxt_enable_napi(). Proceed only 7349 * if NAPI is enabled. 7350 */ 7351 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 7352 return; 7353 7354 /* Make sure NAPI sees that the VNIC is disabled */ 7355 synchronize_net(); 7356 for (i = 0; i < bp->rx_nr_rings; i++) { 7357 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7358 struct bnxt_napi *bnapi = rxr->bnapi; 7359 7360 cancel_work_sync(&bnapi->cp_ring.dim.work); 7361 } 7362 } 7363 7364 static int hwrm_ring_free_send_msg(struct bnxt *bp, 7365 struct bnxt_ring_struct *ring, 7366 u32 ring_type, int cmpl_ring_id) 7367 { 7368 struct hwrm_ring_free_output *resp; 7369 struct hwrm_ring_free_input *req; 7370 u16 error_code = 0; 7371 int rc; 7372 7373 if (BNXT_NO_FW_ACCESS(bp)) 7374 return 0; 7375 7376 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 7377 if (rc) 7378 goto exit; 7379 7380 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 7381 req->ring_type = ring_type; 7382 req->ring_id = cpu_to_le16(ring->fw_ring_id); 7383 7384 resp = hwrm_req_hold(bp, req); 7385 rc = hwrm_req_send(bp, req); 7386 error_code = le16_to_cpu(resp->error_code); 7387 hwrm_req_drop(bp, req); 7388 exit: 7389 if (rc || error_code) { 7390 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 7391 ring_type, rc, error_code); 7392 return -EIO; 7393 } 7394 return 0; 7395 } 7396 7397 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, 7398 struct bnxt_tx_ring_info *txr, 7399 bool close_path) 7400 { 7401 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7402 u32 cmpl_ring_id; 7403 7404 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7405 return; 7406 7407 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) : 7408 INVALID_HW_RING_ID; 7409 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, 7410 cmpl_ring_id); 7411 ring->fw_ring_id = INVALID_HW_RING_ID; 7412 } 7413 7414 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, 7415 struct bnxt_rx_ring_info *rxr, 7416 bool close_path) 7417 { 7418 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7419 u32 grp_idx = rxr->bnapi->index; 7420 u32 cmpl_ring_id; 7421 7422 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7423 return; 7424 7425 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7426 hwrm_ring_free_send_msg(bp, ring, 7427 RING_FREE_REQ_RING_TYPE_RX, 7428 close_path ? cmpl_ring_id : 7429 INVALID_HW_RING_ID); 7430 ring->fw_ring_id = INVALID_HW_RING_ID; 7431 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; 7432 } 7433 7434 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, 7435 struct bnxt_rx_ring_info *rxr, 7436 bool close_path) 7437 { 7438 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7439 u32 grp_idx = rxr->bnapi->index; 7440 u32 type, cmpl_ring_id; 7441 7442 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7443 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 7444 else 7445 type = RING_FREE_REQ_RING_TYPE_RX; 7446 7447 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7448 return; 7449 7450 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7451 hwrm_ring_free_send_msg(bp, ring, type, 7452 close_path ? cmpl_ring_id : 7453 INVALID_HW_RING_ID); 7454 ring->fw_ring_id = INVALID_HW_RING_ID; 7455 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; 7456 } 7457 7458 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp, 7459 struct bnxt_cp_ring_info *cpr) 7460 { 7461 struct bnxt_ring_struct *ring; 7462 7463 ring = &cpr->cp_ring_struct; 7464 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7465 return; 7466 7467 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL, 7468 INVALID_HW_RING_ID); 7469 ring->fw_ring_id = INVALID_HW_RING_ID; 7470 } 7471 7472 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 7473 { 7474 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 7475 int i, size = ring->ring_mem.page_size; 7476 7477 cpr->cp_raw_cons = 0; 7478 cpr->toggle = 0; 7479 7480 for (i = 0; i < bp->cp_nr_pages; i++) 7481 if (cpr->cp_desc_ring[i]) 7482 memset(cpr->cp_desc_ring[i], 0, size); 7483 } 7484 7485 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 7486 { 7487 u32 type; 7488 int i; 7489 7490 if (!bp->bnapi) 7491 return; 7492 7493 for (i = 0; i < bp->tx_nr_rings; i++) 7494 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path); 7495 7496 bnxt_cancel_dim(bp); 7497 for (i = 0; i < bp->rx_nr_rings; i++) { 7498 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); 7499 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); 7500 } 7501 7502 /* The completion rings are about to be freed. After that the 7503 * IRQ doorbell will not work anymore. So we need to disable 7504 * IRQ here. 7505 */ 7506 bnxt_disable_int_sync(bp); 7507 7508 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7509 type = RING_FREE_REQ_RING_TYPE_NQ; 7510 else 7511 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 7512 for (i = 0; i < bp->cp_nr_rings; i++) { 7513 struct bnxt_napi *bnapi = bp->bnapi[i]; 7514 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7515 struct bnxt_ring_struct *ring; 7516 int j; 7517 7518 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) 7519 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]); 7520 7521 ring = &cpr->cp_ring_struct; 7522 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7523 hwrm_ring_free_send_msg(bp, ring, type, 7524 INVALID_HW_RING_ID); 7525 ring->fw_ring_id = INVALID_HW_RING_ID; 7526 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 7527 } 7528 } 7529 } 7530 7531 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7532 bool shared); 7533 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7534 bool shared); 7535 7536 static int bnxt_hwrm_get_rings(struct bnxt *bp) 7537 { 7538 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7539 struct hwrm_func_qcfg_output *resp; 7540 struct hwrm_func_qcfg_input *req; 7541 int rc; 7542 7543 if (bp->hwrm_spec_code < 0x10601) 7544 return 0; 7545 7546 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7547 if (rc) 7548 return rc; 7549 7550 req->fid = cpu_to_le16(0xffff); 7551 resp = hwrm_req_hold(bp, req); 7552 rc = hwrm_req_send(bp, req); 7553 if (rc) { 7554 hwrm_req_drop(bp, req); 7555 return rc; 7556 } 7557 7558 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7559 if (BNXT_NEW_RM(bp)) { 7560 u16 cp, stats; 7561 7562 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 7563 hw_resc->resv_hw_ring_grps = 7564 le32_to_cpu(resp->alloc_hw_ring_grps); 7565 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 7566 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); 7567 cp = le16_to_cpu(resp->alloc_cmpl_rings); 7568 stats = le16_to_cpu(resp->alloc_stat_ctx); 7569 hw_resc->resv_irqs = cp; 7570 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7571 int rx = hw_resc->resv_rx_rings; 7572 int tx = hw_resc->resv_tx_rings; 7573 7574 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7575 rx >>= 1; 7576 if (cp < (rx + tx)) { 7577 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 7578 if (rc) 7579 goto get_rings_exit; 7580 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7581 rx <<= 1; 7582 hw_resc->resv_rx_rings = rx; 7583 hw_resc->resv_tx_rings = tx; 7584 } 7585 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 7586 hw_resc->resv_hw_ring_grps = rx; 7587 } 7588 hw_resc->resv_cp_rings = cp; 7589 hw_resc->resv_stat_ctxs = stats; 7590 } 7591 get_rings_exit: 7592 hwrm_req_drop(bp, req); 7593 return rc; 7594 } 7595 7596 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 7597 { 7598 struct hwrm_func_qcfg_output *resp; 7599 struct hwrm_func_qcfg_input *req; 7600 int rc; 7601 7602 if (bp->hwrm_spec_code < 0x10601) 7603 return 0; 7604 7605 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7606 if (rc) 7607 return rc; 7608 7609 req->fid = cpu_to_le16(fid); 7610 resp = hwrm_req_hold(bp, req); 7611 rc = hwrm_req_send(bp, req); 7612 if (!rc) 7613 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7614 7615 hwrm_req_drop(bp, req); 7616 return rc; 7617 } 7618 7619 static bool bnxt_rfs_supported(struct bnxt *bp); 7620 7621 static struct hwrm_func_cfg_input * 7622 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7623 { 7624 struct hwrm_func_cfg_input *req; 7625 u32 enables = 0; 7626 7627 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 7628 return NULL; 7629 7630 req->fid = cpu_to_le16(0xffff); 7631 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7632 req->num_tx_rings = cpu_to_le16(hwr->tx); 7633 if (BNXT_NEW_RM(bp)) { 7634 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 7635 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7636 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7637 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 7638 enables |= hwr->cp_p5 ? 7639 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7640 } else { 7641 enables |= hwr->cp ? 7642 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7643 enables |= hwr->grp ? 7644 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7645 } 7646 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7647 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 7648 0; 7649 req->num_rx_rings = cpu_to_le16(hwr->rx); 7650 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7651 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7652 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7653 req->num_msix = cpu_to_le16(hwr->cp); 7654 } else { 7655 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7656 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7657 } 7658 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7659 req->num_vnics = cpu_to_le16(hwr->vnic); 7660 } 7661 req->enables = cpu_to_le32(enables); 7662 return req; 7663 } 7664 7665 static struct hwrm_func_vf_cfg_input * 7666 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7667 { 7668 struct hwrm_func_vf_cfg_input *req; 7669 u32 enables = 0; 7670 7671 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 7672 return NULL; 7673 7674 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7675 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7676 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7677 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7678 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7679 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7680 enables |= hwr->cp_p5 ? 7681 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7682 } else { 7683 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7684 enables |= hwr->grp ? 7685 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7686 } 7687 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7688 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 7689 7690 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 7691 req->num_tx_rings = cpu_to_le16(hwr->tx); 7692 req->num_rx_rings = cpu_to_le16(hwr->rx); 7693 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7694 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7695 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7696 } else { 7697 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7698 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7699 } 7700 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7701 req->num_vnics = cpu_to_le16(hwr->vnic); 7702 7703 req->enables = cpu_to_le32(enables); 7704 return req; 7705 } 7706 7707 static int 7708 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7709 { 7710 struct hwrm_func_cfg_input *req; 7711 int rc; 7712 7713 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7714 if (!req) 7715 return -ENOMEM; 7716 7717 if (!req->enables) { 7718 hwrm_req_drop(bp, req); 7719 return 0; 7720 } 7721 7722 rc = hwrm_req_send(bp, req); 7723 if (rc) 7724 return rc; 7725 7726 if (bp->hwrm_spec_code < 0x10601) 7727 bp->hw_resc.resv_tx_rings = hwr->tx; 7728 7729 return bnxt_hwrm_get_rings(bp); 7730 } 7731 7732 static int 7733 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7734 { 7735 struct hwrm_func_vf_cfg_input *req; 7736 int rc; 7737 7738 if (!BNXT_NEW_RM(bp)) { 7739 bp->hw_resc.resv_tx_rings = hwr->tx; 7740 return 0; 7741 } 7742 7743 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7744 if (!req) 7745 return -ENOMEM; 7746 7747 rc = hwrm_req_send(bp, req); 7748 if (rc) 7749 return rc; 7750 7751 return bnxt_hwrm_get_rings(bp); 7752 } 7753 7754 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7755 { 7756 if (BNXT_PF(bp)) 7757 return bnxt_hwrm_reserve_pf_rings(bp, hwr); 7758 else 7759 return bnxt_hwrm_reserve_vf_rings(bp, hwr); 7760 } 7761 7762 int bnxt_nq_rings_in_use(struct bnxt *bp) 7763 { 7764 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); 7765 } 7766 7767 static int bnxt_cp_rings_in_use(struct bnxt *bp) 7768 { 7769 int cp; 7770 7771 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7772 return bnxt_nq_rings_in_use(bp); 7773 7774 cp = bp->tx_nr_rings + bp->rx_nr_rings; 7775 return cp; 7776 } 7777 7778 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 7779 { 7780 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); 7781 } 7782 7783 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7784 { 7785 if (!hwr->grp) 7786 return 0; 7787 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7788 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); 7789 7790 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7791 rss_ctx *= hwr->vnic; 7792 return rss_ctx; 7793 } 7794 if (BNXT_VF(bp)) 7795 return BNXT_VF_MAX_RSS_CTX; 7796 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) 7797 return hwr->grp + 1; 7798 return 1; 7799 } 7800 7801 /* Check if a default RSS map needs to be setup. This function is only 7802 * used on older firmware that does not require reserving RX rings. 7803 */ 7804 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 7805 { 7806 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7807 7808 /* The RSS map is valid for RX rings set to resv_rx_rings */ 7809 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 7810 hw_resc->resv_rx_rings = bp->rx_nr_rings; 7811 if (!netif_is_rxfh_configured(bp->dev)) 7812 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7813 } 7814 } 7815 7816 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) 7817 { 7818 if (bp->flags & BNXT_FLAG_RFS) { 7819 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7820 return 2 + bp->num_rss_ctx; 7821 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7822 return rx_rings + 1; 7823 } 7824 return 1; 7825 } 7826 7827 static bool bnxt_need_reserve_rings(struct bnxt *bp) 7828 { 7829 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7830 int cp = bnxt_cp_rings_in_use(bp); 7831 int nq = bnxt_nq_rings_in_use(bp); 7832 int rx = bp->rx_nr_rings, stat; 7833 int vnic, grp = rx; 7834 7835 /* Old firmware does not need RX ring reservations but we still 7836 * need to setup a default RSS map when needed. With new firmware 7837 * we go through RX ring reservations first and then set up the 7838 * RSS map for the successfully reserved RX rings when needed. 7839 */ 7840 if (!BNXT_NEW_RM(bp)) 7841 bnxt_check_rss_tbl_no_rmgr(bp); 7842 7843 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7844 bp->hwrm_spec_code >= 0x10601) 7845 return true; 7846 7847 if (!BNXT_NEW_RM(bp)) 7848 return false; 7849 7850 vnic = bnxt_get_total_vnics(bp, rx); 7851 7852 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7853 rx <<= 1; 7854 stat = bnxt_get_func_stat_ctxs(bp); 7855 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 7856 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 7857 (hw_resc->resv_hw_ring_grps != grp && 7858 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 7859 return true; 7860 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 7861 hw_resc->resv_irqs != nq) 7862 return true; 7863 return false; 7864 } 7865 7866 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7867 { 7868 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7869 7870 hwr->tx = hw_resc->resv_tx_rings; 7871 if (BNXT_NEW_RM(bp)) { 7872 hwr->rx = hw_resc->resv_rx_rings; 7873 hwr->cp = hw_resc->resv_irqs; 7874 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7875 hwr->cp_p5 = hw_resc->resv_cp_rings; 7876 hwr->grp = hw_resc->resv_hw_ring_grps; 7877 hwr->vnic = hw_resc->resv_vnics; 7878 hwr->stat = hw_resc->resv_stat_ctxs; 7879 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; 7880 } 7881 } 7882 7883 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7884 { 7885 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && 7886 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); 7887 } 7888 7889 static int bnxt_get_avail_msix(struct bnxt *bp, int num); 7890 7891 static int __bnxt_reserve_rings(struct bnxt *bp) 7892 { 7893 struct bnxt_hw_rings hwr = {0}; 7894 int rx_rings, old_rx_rings, rc; 7895 int cp = bp->cp_nr_rings; 7896 int ulp_msix = 0; 7897 bool sh = false; 7898 int tx_cp; 7899 7900 if (!bnxt_need_reserve_rings(bp)) 7901 return 0; 7902 7903 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 7904 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 7905 if (!ulp_msix) 7906 bnxt_set_ulp_stat_ctxs(bp, 0); 7907 7908 if (ulp_msix > bp->ulp_num_msix_want) 7909 ulp_msix = bp->ulp_num_msix_want; 7910 hwr.cp = cp + ulp_msix; 7911 } else { 7912 hwr.cp = bnxt_nq_rings_in_use(bp); 7913 } 7914 7915 hwr.tx = bp->tx_nr_rings; 7916 hwr.rx = bp->rx_nr_rings; 7917 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7918 sh = true; 7919 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7920 hwr.cp_p5 = hwr.rx + hwr.tx; 7921 7922 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); 7923 7924 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7925 hwr.rx <<= 1; 7926 hwr.grp = bp->rx_nr_rings; 7927 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 7928 hwr.stat = bnxt_get_func_stat_ctxs(bp); 7929 old_rx_rings = bp->hw_resc.resv_rx_rings; 7930 7931 rc = bnxt_hwrm_reserve_rings(bp, &hwr); 7932 if (rc) 7933 return rc; 7934 7935 bnxt_copy_reserved_rings(bp, &hwr); 7936 7937 rx_rings = hwr.rx; 7938 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7939 if (hwr.rx >= 2) { 7940 rx_rings = hwr.rx >> 1; 7941 } else { 7942 if (netif_running(bp->dev)) 7943 return -ENOMEM; 7944 7945 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7946 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7947 bp->dev->hw_features &= ~NETIF_F_LRO; 7948 bp->dev->features &= ~NETIF_F_LRO; 7949 bnxt_set_ring_params(bp); 7950 } 7951 } 7952 rx_rings = min_t(int, rx_rings, hwr.grp); 7953 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); 7954 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) 7955 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); 7956 hwr.cp = min_t(int, hwr.cp, hwr.stat); 7957 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); 7958 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7959 hwr.rx = rx_rings << 1; 7960 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); 7961 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 7962 bp->tx_nr_rings = hwr.tx; 7963 7964 /* If we cannot reserve all the RX rings, reset the RSS map only 7965 * if absolutely necessary 7966 */ 7967 if (rx_rings != bp->rx_nr_rings) { 7968 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 7969 rx_rings, bp->rx_nr_rings); 7970 if (netif_is_rxfh_configured(bp->dev) && 7971 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 7972 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 7973 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 7974 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 7975 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 7976 } 7977 } 7978 bp->rx_nr_rings = rx_rings; 7979 bp->cp_nr_rings = hwr.cp; 7980 7981 if (!bnxt_rings_ok(bp, &hwr)) 7982 return -ENOMEM; 7983 7984 if (old_rx_rings != bp->hw_resc.resv_rx_rings && 7985 !netif_is_rxfh_configured(bp->dev)) 7986 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7987 7988 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { 7989 int resv_msix, resv_ctx, ulp_ctxs; 7990 struct bnxt_hw_resc *hw_resc; 7991 7992 hw_resc = &bp->hw_resc; 7993 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; 7994 ulp_msix = min_t(int, resv_msix, ulp_msix); 7995 bnxt_set_ulp_msix_num(bp, ulp_msix); 7996 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; 7997 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); 7998 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); 7999 } 8000 8001 return rc; 8002 } 8003 8004 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8005 { 8006 struct hwrm_func_vf_cfg_input *req; 8007 u32 flags; 8008 8009 if (!BNXT_NEW_RM(bp)) 8010 return 0; 8011 8012 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 8013 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 8014 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 8015 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 8016 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 8017 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 8018 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 8019 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8020 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 8021 8022 req->flags = cpu_to_le32(flags); 8023 return hwrm_req_send_silent(bp, req); 8024 } 8025 8026 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8027 { 8028 struct hwrm_func_cfg_input *req; 8029 u32 flags; 8030 8031 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 8032 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 8033 if (BNXT_NEW_RM(bp)) { 8034 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 8035 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 8036 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 8037 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 8038 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 8039 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 8040 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 8041 else 8042 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 8043 } 8044 8045 req->flags = cpu_to_le32(flags); 8046 return hwrm_req_send_silent(bp, req); 8047 } 8048 8049 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8050 { 8051 if (bp->hwrm_spec_code < 0x10801) 8052 return 0; 8053 8054 if (BNXT_PF(bp)) 8055 return bnxt_hwrm_check_pf_rings(bp, hwr); 8056 8057 return bnxt_hwrm_check_vf_rings(bp, hwr); 8058 } 8059 8060 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 8061 { 8062 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8063 struct hwrm_ring_aggint_qcaps_output *resp; 8064 struct hwrm_ring_aggint_qcaps_input *req; 8065 int rc; 8066 8067 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 8068 coal_cap->num_cmpl_dma_aggr_max = 63; 8069 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 8070 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 8071 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 8072 coal_cap->int_lat_tmr_min_max = 65535; 8073 coal_cap->int_lat_tmr_max_max = 65535; 8074 coal_cap->num_cmpl_aggr_int_max = 65535; 8075 coal_cap->timer_units = 80; 8076 8077 if (bp->hwrm_spec_code < 0x10902) 8078 return; 8079 8080 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 8081 return; 8082 8083 resp = hwrm_req_hold(bp, req); 8084 rc = hwrm_req_send_silent(bp, req); 8085 if (!rc) { 8086 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 8087 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 8088 coal_cap->num_cmpl_dma_aggr_max = 8089 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 8090 coal_cap->num_cmpl_dma_aggr_during_int_max = 8091 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 8092 coal_cap->cmpl_aggr_dma_tmr_max = 8093 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 8094 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 8095 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 8096 coal_cap->int_lat_tmr_min_max = 8097 le16_to_cpu(resp->int_lat_tmr_min_max); 8098 coal_cap->int_lat_tmr_max_max = 8099 le16_to_cpu(resp->int_lat_tmr_max_max); 8100 coal_cap->num_cmpl_aggr_int_max = 8101 le16_to_cpu(resp->num_cmpl_aggr_int_max); 8102 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 8103 } 8104 hwrm_req_drop(bp, req); 8105 } 8106 8107 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 8108 { 8109 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8110 8111 return usec * 1000 / coal_cap->timer_units; 8112 } 8113 8114 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 8115 struct bnxt_coal *hw_coal, 8116 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8117 { 8118 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8119 u16 val, tmr, max, flags = hw_coal->flags; 8120 u32 cmpl_params = coal_cap->cmpl_params; 8121 8122 max = hw_coal->bufs_per_record * 128; 8123 if (hw_coal->budget) 8124 max = hw_coal->bufs_per_record * hw_coal->budget; 8125 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 8126 8127 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 8128 req->num_cmpl_aggr_int = cpu_to_le16(val); 8129 8130 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 8131 req->num_cmpl_dma_aggr = cpu_to_le16(val); 8132 8133 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 8134 coal_cap->num_cmpl_dma_aggr_during_int_max); 8135 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 8136 8137 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 8138 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 8139 req->int_lat_tmr_max = cpu_to_le16(tmr); 8140 8141 /* min timer set to 1/2 of interrupt timer */ 8142 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 8143 val = tmr / 2; 8144 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 8145 req->int_lat_tmr_min = cpu_to_le16(val); 8146 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 8147 } 8148 8149 /* buf timer set to 1/4 of interrupt timer */ 8150 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 8151 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 8152 8153 if (cmpl_params & 8154 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 8155 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 8156 val = clamp_t(u16, tmr, 1, 8157 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 8158 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 8159 req->enables |= 8160 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 8161 } 8162 8163 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 8164 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 8165 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 8166 req->flags = cpu_to_le16(flags); 8167 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 8168 } 8169 8170 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 8171 struct bnxt_coal *hw_coal) 8172 { 8173 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 8174 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8175 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8176 u32 nq_params = coal_cap->nq_params; 8177 u16 tmr; 8178 int rc; 8179 8180 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 8181 return 0; 8182 8183 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8184 if (rc) 8185 return rc; 8186 8187 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 8188 req->flags = 8189 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 8190 8191 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 8192 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 8193 req->int_lat_tmr_min = cpu_to_le16(tmr); 8194 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 8195 return hwrm_req_send(bp, req); 8196 } 8197 8198 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 8199 { 8200 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 8201 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8202 struct bnxt_coal coal; 8203 int rc; 8204 8205 /* Tick values in micro seconds. 8206 * 1 coal_buf x bufs_per_record = 1 completion record. 8207 */ 8208 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 8209 8210 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 8211 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 8212 8213 if (!bnapi->rx_ring) 8214 return -ENODEV; 8215 8216 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8217 if (rc) 8218 return rc; 8219 8220 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 8221 8222 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 8223 8224 return hwrm_req_send(bp, req_rx); 8225 } 8226 8227 static int 8228 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 8229 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8230 { 8231 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 8232 8233 req->ring_id = cpu_to_le16(ring_id); 8234 return hwrm_req_send(bp, req); 8235 } 8236 8237 static int 8238 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 8239 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8240 { 8241 struct bnxt_tx_ring_info *txr; 8242 int i, rc; 8243 8244 bnxt_for_each_napi_tx(i, bnapi, txr) { 8245 u16 ring_id; 8246 8247 ring_id = bnxt_cp_ring_for_tx(bp, txr); 8248 req->ring_id = cpu_to_le16(ring_id); 8249 rc = hwrm_req_send(bp, req); 8250 if (rc) 8251 return rc; 8252 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8253 return 0; 8254 } 8255 return 0; 8256 } 8257 8258 int bnxt_hwrm_set_coal(struct bnxt *bp) 8259 { 8260 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 8261 int i, rc; 8262 8263 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8264 if (rc) 8265 return rc; 8266 8267 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8268 if (rc) { 8269 hwrm_req_drop(bp, req_rx); 8270 return rc; 8271 } 8272 8273 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 8274 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 8275 8276 hwrm_req_hold(bp, req_rx); 8277 hwrm_req_hold(bp, req_tx); 8278 for (i = 0; i < bp->cp_nr_rings; i++) { 8279 struct bnxt_napi *bnapi = bp->bnapi[i]; 8280 struct bnxt_coal *hw_coal; 8281 8282 if (!bnapi->rx_ring) 8283 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8284 else 8285 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 8286 if (rc) 8287 break; 8288 8289 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8290 continue; 8291 8292 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 8293 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8294 if (rc) 8295 break; 8296 } 8297 if (bnapi->rx_ring) 8298 hw_coal = &bp->rx_coal; 8299 else 8300 hw_coal = &bp->tx_coal; 8301 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 8302 } 8303 hwrm_req_drop(bp, req_rx); 8304 hwrm_req_drop(bp, req_tx); 8305 return rc; 8306 } 8307 8308 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 8309 { 8310 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 8311 struct hwrm_stat_ctx_free_input *req; 8312 int i; 8313 8314 if (!bp->bnapi) 8315 return; 8316 8317 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8318 return; 8319 8320 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 8321 return; 8322 if (BNXT_FW_MAJ(bp) <= 20) { 8323 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 8324 hwrm_req_drop(bp, req); 8325 return; 8326 } 8327 hwrm_req_hold(bp, req0); 8328 } 8329 hwrm_req_hold(bp, req); 8330 for (i = 0; i < bp->cp_nr_rings; i++) { 8331 struct bnxt_napi *bnapi = bp->bnapi[i]; 8332 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8333 8334 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 8335 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 8336 if (req0) { 8337 req0->stat_ctx_id = req->stat_ctx_id; 8338 hwrm_req_send(bp, req0); 8339 } 8340 hwrm_req_send(bp, req); 8341 8342 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 8343 } 8344 } 8345 hwrm_req_drop(bp, req); 8346 if (req0) 8347 hwrm_req_drop(bp, req0); 8348 } 8349 8350 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 8351 { 8352 struct hwrm_stat_ctx_alloc_output *resp; 8353 struct hwrm_stat_ctx_alloc_input *req; 8354 int rc, i; 8355 8356 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8357 return 0; 8358 8359 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 8360 if (rc) 8361 return rc; 8362 8363 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 8364 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 8365 8366 resp = hwrm_req_hold(bp, req); 8367 for (i = 0; i < bp->cp_nr_rings; i++) { 8368 struct bnxt_napi *bnapi = bp->bnapi[i]; 8369 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8370 8371 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 8372 8373 rc = hwrm_req_send(bp, req); 8374 if (rc) 8375 break; 8376 8377 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 8378 8379 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 8380 } 8381 hwrm_req_drop(bp, req); 8382 return rc; 8383 } 8384 8385 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 8386 { 8387 struct hwrm_func_qcfg_output *resp; 8388 struct hwrm_func_qcfg_input *req; 8389 u16 flags; 8390 int rc; 8391 8392 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 8393 if (rc) 8394 return rc; 8395 8396 req->fid = cpu_to_le16(0xffff); 8397 resp = hwrm_req_hold(bp, req); 8398 rc = hwrm_req_send(bp, req); 8399 if (rc) 8400 goto func_qcfg_exit; 8401 8402 flags = le16_to_cpu(resp->flags); 8403 #ifdef CONFIG_BNXT_SRIOV 8404 if (BNXT_VF(bp)) { 8405 struct bnxt_vf_info *vf = &bp->vf; 8406 8407 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 8408 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF) 8409 vf->flags |= BNXT_VF_TRUST; 8410 else 8411 vf->flags &= ~BNXT_VF_TRUST; 8412 } else { 8413 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 8414 } 8415 #endif 8416 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 8417 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 8418 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 8419 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 8420 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 8421 } 8422 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 8423 bp->flags |= BNXT_FLAG_MULTI_HOST; 8424 8425 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 8426 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 8427 8428 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV) 8429 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV; 8430 8431 switch (resp->port_partition_type) { 8432 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 8433 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2: 8434 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 8435 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 8436 bp->port_partition_type = resp->port_partition_type; 8437 break; 8438 } 8439 if (bp->hwrm_spec_code < 0x10707 || 8440 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 8441 bp->br_mode = BRIDGE_MODE_VEB; 8442 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 8443 bp->br_mode = BRIDGE_MODE_VEPA; 8444 else 8445 bp->br_mode = BRIDGE_MODE_UNDEF; 8446 8447 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 8448 if (!bp->max_mtu) 8449 bp->max_mtu = BNXT_MAX_MTU; 8450 8451 if (bp->db_size) 8452 goto func_qcfg_exit; 8453 8454 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; 8455 if (BNXT_CHIP_P5(bp)) { 8456 if (BNXT_PF(bp)) 8457 bp->db_offset = DB_PF_OFFSET_P5; 8458 else 8459 bp->db_offset = DB_VF_OFFSET_P5; 8460 } 8461 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 8462 1024); 8463 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 8464 bp->db_size <= bp->db_offset) 8465 bp->db_size = pci_resource_len(bp->pdev, 2); 8466 8467 func_qcfg_exit: 8468 hwrm_req_drop(bp, req); 8469 return rc; 8470 } 8471 8472 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 8473 u8 init_val, u8 init_offset, 8474 bool init_mask_set) 8475 { 8476 ctxm->init_value = init_val; 8477 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 8478 if (init_mask_set) 8479 ctxm->init_offset = init_offset * 4; 8480 else 8481 ctxm->init_value = 0; 8482 } 8483 8484 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 8485 { 8486 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8487 u16 type; 8488 8489 for (type = 0; type < ctx_max; type++) { 8490 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8491 int n = 1; 8492 8493 if (!ctxm->max_entries || ctxm->pg_info) 8494 continue; 8495 8496 if (ctxm->instance_bmap) 8497 n = hweight32(ctxm->instance_bmap); 8498 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 8499 if (!ctxm->pg_info) 8500 return -ENOMEM; 8501 } 8502 return 0; 8503 } 8504 8505 static void bnxt_free_one_ctx_mem(struct bnxt *bp, 8506 struct bnxt_ctx_mem_type *ctxm, bool force); 8507 8508 #define BNXT_CTX_INIT_VALID(flags) \ 8509 (!!((flags) & \ 8510 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 8511 8512 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 8513 { 8514 struct hwrm_func_backing_store_qcaps_v2_output *resp; 8515 struct hwrm_func_backing_store_qcaps_v2_input *req; 8516 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8517 u16 type; 8518 int rc; 8519 8520 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 8521 if (rc) 8522 return rc; 8523 8524 if (!ctx) { 8525 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8526 if (!ctx) 8527 return -ENOMEM; 8528 bp->ctx = ctx; 8529 } 8530 8531 resp = hwrm_req_hold(bp, req); 8532 8533 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 8534 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8535 u8 init_val, init_off, i; 8536 u32 max_entries; 8537 u16 entry_size; 8538 __le32 *p; 8539 u32 flags; 8540 8541 req->type = cpu_to_le16(type); 8542 rc = hwrm_req_send(bp, req); 8543 if (rc) 8544 goto ctx_done; 8545 flags = le32_to_cpu(resp->flags); 8546 type = le16_to_cpu(resp->next_valid_type); 8547 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) { 8548 bnxt_free_one_ctx_mem(bp, ctxm, true); 8549 continue; 8550 } 8551 entry_size = le16_to_cpu(resp->entry_size); 8552 max_entries = le32_to_cpu(resp->max_num_entries); 8553 if (ctxm->mem_valid) { 8554 if (!(flags & BNXT_CTX_MEM_PERSIST) || 8555 ctxm->entry_size != entry_size || 8556 ctxm->max_entries != max_entries) 8557 bnxt_free_one_ctx_mem(bp, ctxm, true); 8558 else 8559 continue; 8560 } 8561 ctxm->type = le16_to_cpu(resp->type); 8562 ctxm->entry_size = entry_size; 8563 ctxm->flags = flags; 8564 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 8565 ctxm->entry_multiple = resp->entry_multiple; 8566 ctxm->max_entries = max_entries; 8567 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 8568 init_val = resp->ctx_init_value; 8569 init_off = resp->ctx_init_offset; 8570 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 8571 BNXT_CTX_INIT_VALID(flags)); 8572 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 8573 BNXT_MAX_SPLIT_ENTRY); 8574 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 8575 i++, p++) 8576 ctxm->split[i] = le32_to_cpu(*p); 8577 } 8578 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 8579 8580 ctx_done: 8581 hwrm_req_drop(bp, req); 8582 return rc; 8583 } 8584 8585 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 8586 { 8587 struct hwrm_func_backing_store_qcaps_output *resp; 8588 struct hwrm_func_backing_store_qcaps_input *req; 8589 int rc; 8590 8591 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || 8592 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 8593 return 0; 8594 8595 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8596 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 8597 8598 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 8599 if (rc) 8600 return rc; 8601 8602 resp = hwrm_req_hold(bp, req); 8603 rc = hwrm_req_send_silent(bp, req); 8604 if (!rc) { 8605 struct bnxt_ctx_mem_type *ctxm; 8606 struct bnxt_ctx_mem_info *ctx; 8607 u8 init_val, init_idx = 0; 8608 u16 init_mask; 8609 8610 ctx = bp->ctx; 8611 if (!ctx) { 8612 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8613 if (!ctx) { 8614 rc = -ENOMEM; 8615 goto ctx_err; 8616 } 8617 bp->ctx = ctx; 8618 } 8619 init_val = resp->ctx_kind_initializer; 8620 init_mask = le16_to_cpu(resp->ctx_init_mask); 8621 8622 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8623 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 8624 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 8625 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 8626 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); 8627 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 8628 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 8629 (init_mask & (1 << init_idx++)) != 0); 8630 8631 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8632 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 8633 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 8634 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 8635 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 8636 (init_mask & (1 << init_idx++)) != 0); 8637 8638 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8639 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 8640 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 8641 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 8642 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 8643 (init_mask & (1 << init_idx++)) != 0); 8644 8645 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8646 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 8647 ctxm->max_entries = ctxm->vnic_entries + 8648 le16_to_cpu(resp->vnic_max_ring_table_entries); 8649 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 8650 bnxt_init_ctx_initializer(ctxm, init_val, 8651 resp->vnic_init_offset, 8652 (init_mask & (1 << init_idx++)) != 0); 8653 8654 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8655 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 8656 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 8657 bnxt_init_ctx_initializer(ctxm, init_val, 8658 resp->stat_init_offset, 8659 (init_mask & (1 << init_idx++)) != 0); 8660 8661 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8662 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 8663 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 8664 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 8665 ctxm->entry_multiple = resp->tqm_entries_multiple; 8666 if (!ctxm->entry_multiple) 8667 ctxm->entry_multiple = 1; 8668 8669 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 8670 8671 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8672 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 8673 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 8674 ctxm->mrav_num_entries_units = 8675 le16_to_cpu(resp->mrav_num_entries_units); 8676 bnxt_init_ctx_initializer(ctxm, init_val, 8677 resp->mrav_init_offset, 8678 (init_mask & (1 << init_idx++)) != 0); 8679 8680 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8681 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 8682 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 8683 8684 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 8685 if (!ctx->tqm_fp_rings_count) 8686 ctx->tqm_fp_rings_count = bp->max_q; 8687 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 8688 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 8689 8690 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8691 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 8692 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 8693 8694 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 8695 } else { 8696 rc = 0; 8697 } 8698 ctx_err: 8699 hwrm_req_drop(bp, req); 8700 return rc; 8701 } 8702 8703 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 8704 __le64 *pg_dir) 8705 { 8706 if (!rmem->nr_pages) 8707 return; 8708 8709 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 8710 if (rmem->depth >= 1) { 8711 if (rmem->depth == 2) 8712 *pg_attr |= 2; 8713 else 8714 *pg_attr |= 1; 8715 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 8716 } else { 8717 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 8718 } 8719 } 8720 8721 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 8722 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 8723 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 8724 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 8725 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 8726 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 8727 8728 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 8729 { 8730 struct hwrm_func_backing_store_cfg_input *req; 8731 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8732 struct bnxt_ctx_pg_info *ctx_pg; 8733 struct bnxt_ctx_mem_type *ctxm; 8734 void **__req = (void **)&req; 8735 u32 req_len = sizeof(*req); 8736 __le32 *num_entries; 8737 __le64 *pg_dir; 8738 u32 flags = 0; 8739 u8 *pg_attr; 8740 u32 ena; 8741 int rc; 8742 int i; 8743 8744 if (!ctx) 8745 return 0; 8746 8747 if (req_len > bp->hwrm_max_ext_req_len) 8748 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 8749 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 8750 if (rc) 8751 return rc; 8752 8753 req->enables = cpu_to_le32(enables); 8754 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 8755 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8756 ctx_pg = ctxm->pg_info; 8757 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 8758 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 8759 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 8760 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 8761 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8762 &req->qpc_pg_size_qpc_lvl, 8763 &req->qpc_page_dir); 8764 8765 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) 8766 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); 8767 } 8768 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 8769 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8770 ctx_pg = ctxm->pg_info; 8771 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 8772 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 8773 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 8774 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8775 &req->srq_pg_size_srq_lvl, 8776 &req->srq_page_dir); 8777 } 8778 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 8779 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8780 ctx_pg = ctxm->pg_info; 8781 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 8782 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 8783 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 8784 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8785 &req->cq_pg_size_cq_lvl, 8786 &req->cq_page_dir); 8787 } 8788 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 8789 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8790 ctx_pg = ctxm->pg_info; 8791 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 8792 req->vnic_num_ring_table_entries = 8793 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 8794 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 8795 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8796 &req->vnic_pg_size_vnic_lvl, 8797 &req->vnic_page_dir); 8798 } 8799 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 8800 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8801 ctx_pg = ctxm->pg_info; 8802 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 8803 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 8804 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8805 &req->stat_pg_size_stat_lvl, 8806 &req->stat_page_dir); 8807 } 8808 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 8809 u32 units; 8810 8811 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8812 ctx_pg = ctxm->pg_info; 8813 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 8814 units = ctxm->mrav_num_entries_units; 8815 if (units) { 8816 u32 num_mr, num_ah = ctxm->mrav_av_entries; 8817 u32 entries; 8818 8819 num_mr = ctx_pg->entries - num_ah; 8820 entries = ((num_mr / units) << 16) | (num_ah / units); 8821 req->mrav_num_entries = cpu_to_le32(entries); 8822 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 8823 } 8824 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 8825 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8826 &req->mrav_pg_size_mrav_lvl, 8827 &req->mrav_page_dir); 8828 } 8829 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 8830 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8831 ctx_pg = ctxm->pg_info; 8832 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 8833 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 8834 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8835 &req->tim_pg_size_tim_lvl, 8836 &req->tim_page_dir); 8837 } 8838 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8839 for (i = 0, num_entries = &req->tqm_sp_num_entries, 8840 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 8841 pg_dir = &req->tqm_sp_page_dir, 8842 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 8843 ctx_pg = ctxm->pg_info; 8844 i < BNXT_MAX_TQM_RINGS; 8845 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 8846 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 8847 if (!(enables & ena)) 8848 continue; 8849 8850 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 8851 *num_entries = cpu_to_le32(ctx_pg->entries); 8852 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 8853 } 8854 req->flags = cpu_to_le32(flags); 8855 return hwrm_req_send(bp, req); 8856 } 8857 8858 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 8859 struct bnxt_ctx_pg_info *ctx_pg) 8860 { 8861 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8862 8863 rmem->page_size = BNXT_PAGE_SIZE; 8864 rmem->pg_arr = ctx_pg->ctx_pg_arr; 8865 rmem->dma_arr = ctx_pg->ctx_dma_arr; 8866 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 8867 if (rmem->depth >= 1) 8868 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 8869 return bnxt_alloc_ring(bp, rmem); 8870 } 8871 8872 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 8873 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 8874 u8 depth, struct bnxt_ctx_mem_type *ctxm) 8875 { 8876 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8877 int rc; 8878 8879 if (!mem_size) 8880 return -EINVAL; 8881 8882 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8883 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 8884 ctx_pg->nr_pages = 0; 8885 return -EINVAL; 8886 } 8887 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 8888 int nr_tbls, i; 8889 8890 rmem->depth = 2; 8891 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 8892 GFP_KERNEL); 8893 if (!ctx_pg->ctx_pg_tbl) 8894 return -ENOMEM; 8895 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 8896 rmem->nr_pages = nr_tbls; 8897 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8898 if (rc) 8899 return rc; 8900 for (i = 0; i < nr_tbls; i++) { 8901 struct bnxt_ctx_pg_info *pg_tbl; 8902 8903 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 8904 if (!pg_tbl) 8905 return -ENOMEM; 8906 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 8907 rmem = &pg_tbl->ring_mem; 8908 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 8909 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 8910 rmem->depth = 1; 8911 rmem->nr_pages = MAX_CTX_PAGES; 8912 rmem->ctx_mem = ctxm; 8913 if (i == (nr_tbls - 1)) { 8914 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 8915 8916 if (rem) 8917 rmem->nr_pages = rem; 8918 } 8919 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 8920 if (rc) 8921 break; 8922 } 8923 } else { 8924 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8925 if (rmem->nr_pages > 1 || depth) 8926 rmem->depth = 1; 8927 rmem->ctx_mem = ctxm; 8928 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8929 } 8930 return rc; 8931 } 8932 8933 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp, 8934 struct bnxt_ctx_pg_info *ctx_pg, 8935 void *buf, size_t offset, size_t head, 8936 size_t tail) 8937 { 8938 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8939 size_t nr_pages = ctx_pg->nr_pages; 8940 int page_size = rmem->page_size; 8941 size_t len = 0, total_len = 0; 8942 u16 depth = rmem->depth; 8943 8944 tail %= nr_pages * page_size; 8945 do { 8946 if (depth > 1) { 8947 int i = head / (page_size * MAX_CTX_PAGES); 8948 struct bnxt_ctx_pg_info *pg_tbl; 8949 8950 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 8951 rmem = &pg_tbl->ring_mem; 8952 } 8953 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail); 8954 head += len; 8955 offset += len; 8956 total_len += len; 8957 if (head >= nr_pages * page_size) 8958 head = 0; 8959 } while (head != tail); 8960 return total_len; 8961 } 8962 8963 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 8964 struct bnxt_ctx_pg_info *ctx_pg) 8965 { 8966 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8967 8968 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 8969 ctx_pg->ctx_pg_tbl) { 8970 int i, nr_tbls = rmem->nr_pages; 8971 8972 for (i = 0; i < nr_tbls; i++) { 8973 struct bnxt_ctx_pg_info *pg_tbl; 8974 struct bnxt_ring_mem_info *rmem2; 8975 8976 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 8977 if (!pg_tbl) 8978 continue; 8979 rmem2 = &pg_tbl->ring_mem; 8980 bnxt_free_ring(bp, rmem2); 8981 ctx_pg->ctx_pg_arr[i] = NULL; 8982 kfree(pg_tbl); 8983 ctx_pg->ctx_pg_tbl[i] = NULL; 8984 } 8985 kfree(ctx_pg->ctx_pg_tbl); 8986 ctx_pg->ctx_pg_tbl = NULL; 8987 } 8988 bnxt_free_ring(bp, rmem); 8989 ctx_pg->nr_pages = 0; 8990 } 8991 8992 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 8993 struct bnxt_ctx_mem_type *ctxm, u32 entries, 8994 u8 pg_lvl) 8995 { 8996 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 8997 int i, rc = 0, n = 1; 8998 u32 mem_size; 8999 9000 if (!ctxm->entry_size || !ctx_pg) 9001 return -EINVAL; 9002 if (ctxm->instance_bmap) 9003 n = hweight32(ctxm->instance_bmap); 9004 if (ctxm->entry_multiple) 9005 entries = roundup(entries, ctxm->entry_multiple); 9006 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 9007 mem_size = entries * ctxm->entry_size; 9008 for (i = 0; i < n && !rc; i++) { 9009 ctx_pg[i].entries = entries; 9010 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 9011 ctxm->init_value ? ctxm : NULL); 9012 } 9013 if (!rc) 9014 ctxm->mem_valid = 1; 9015 return rc; 9016 } 9017 9018 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 9019 struct bnxt_ctx_mem_type *ctxm, 9020 bool last) 9021 { 9022 struct hwrm_func_backing_store_cfg_v2_input *req; 9023 u32 instance_bmap = ctxm->instance_bmap; 9024 int i, j, rc = 0, n = 1; 9025 __le32 *p; 9026 9027 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 9028 return 0; 9029 9030 if (instance_bmap) 9031 n = hweight32(ctxm->instance_bmap); 9032 else 9033 instance_bmap = 1; 9034 9035 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 9036 if (rc) 9037 return rc; 9038 hwrm_req_hold(bp, req); 9039 req->type = cpu_to_le16(ctxm->type); 9040 req->entry_size = cpu_to_le16(ctxm->entry_size); 9041 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) && 9042 bnxt_bs_trace_avail(bp, ctxm->type)) { 9043 struct bnxt_bs_trace_info *bs_trace; 9044 u32 enables; 9045 9046 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET; 9047 req->enables = cpu_to_le32(enables); 9048 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]]; 9049 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset); 9050 } 9051 req->subtype_valid_cnt = ctxm->split_entry_cnt; 9052 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 9053 p[i] = cpu_to_le32(ctxm->split[i]); 9054 for (i = 0, j = 0; j < n && !rc; i++) { 9055 struct bnxt_ctx_pg_info *ctx_pg; 9056 9057 if (!(instance_bmap & (1 << i))) 9058 continue; 9059 req->instance = cpu_to_le16(i); 9060 ctx_pg = &ctxm->pg_info[j++]; 9061 if (!ctx_pg->entries) 9062 continue; 9063 req->num_entries = cpu_to_le32(ctx_pg->entries); 9064 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 9065 &req->page_size_pbl_level, 9066 &req->page_dir); 9067 if (last && j == n) 9068 req->flags = 9069 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 9070 rc = hwrm_req_send(bp, req); 9071 } 9072 hwrm_req_drop(bp, req); 9073 return rc; 9074 } 9075 9076 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) 9077 { 9078 struct bnxt_ctx_mem_info *ctx = bp->ctx; 9079 struct bnxt_ctx_mem_type *ctxm; 9080 u16 last_type = BNXT_CTX_INV; 9081 int rc = 0; 9082 u16 type; 9083 9084 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) { 9085 ctxm = &ctx->ctx_arr[type]; 9086 if (!bnxt_bs_trace_avail(bp, type)) 9087 continue; 9088 if (!ctxm->mem_valid) { 9089 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, 9090 ctxm->max_entries, 1); 9091 if (rc) { 9092 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n", 9093 type); 9094 continue; 9095 } 9096 bnxt_bs_trace_init(bp, ctxm); 9097 } 9098 last_type = type; 9099 } 9100 9101 if (last_type == BNXT_CTX_INV) { 9102 if (!ena) 9103 return 0; 9104 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) 9105 last_type = BNXT_CTX_MAX - 1; 9106 else 9107 last_type = BNXT_CTX_L2_MAX - 1; 9108 } 9109 ctx->ctx_arr[last_type].last = 1; 9110 9111 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 9112 ctxm = &ctx->ctx_arr[type]; 9113 9114 if (!ctxm->mem_valid) 9115 continue; 9116 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 9117 if (rc) 9118 return rc; 9119 } 9120 return 0; 9121 } 9122 9123 /** 9124 * __bnxt_copy_ctx_mem - copy host context memory 9125 * @bp: The driver context 9126 * @ctxm: The pointer to the context memory type 9127 * @buf: The destination buffer or NULL to just obtain the length 9128 * @offset: The buffer offset to copy the data to 9129 * @head: The head offset of context memory to copy from 9130 * @tail: The tail offset (last byte + 1) of context memory to end the copy 9131 * 9132 * This function is called for debugging purposes to dump the host context 9133 * used by the chip. 9134 * 9135 * Return: Length of memory copied 9136 */ 9137 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp, 9138 struct bnxt_ctx_mem_type *ctxm, void *buf, 9139 size_t offset, size_t head, size_t tail) 9140 { 9141 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 9142 size_t len = 0, total_len = 0; 9143 int i, n = 1; 9144 9145 if (!ctx_pg) 9146 return 0; 9147 9148 if (ctxm->instance_bmap) 9149 n = hweight32(ctxm->instance_bmap); 9150 for (i = 0; i < n; i++) { 9151 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head, 9152 tail); 9153 offset += len; 9154 total_len += len; 9155 } 9156 return total_len; 9157 } 9158 9159 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm, 9160 void *buf, size_t offset) 9161 { 9162 size_t tail = ctxm->max_entries * ctxm->entry_size; 9163 9164 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail); 9165 } 9166 9167 static void bnxt_free_one_ctx_mem(struct bnxt *bp, 9168 struct bnxt_ctx_mem_type *ctxm, bool force) 9169 { 9170 struct bnxt_ctx_pg_info *ctx_pg; 9171 int i, n = 1; 9172 9173 ctxm->last = 0; 9174 9175 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST)) 9176 return; 9177 9178 ctx_pg = ctxm->pg_info; 9179 if (ctx_pg) { 9180 if (ctxm->instance_bmap) 9181 n = hweight32(ctxm->instance_bmap); 9182 for (i = 0; i < n; i++) 9183 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 9184 9185 kfree(ctx_pg); 9186 ctxm->pg_info = NULL; 9187 ctxm->mem_valid = 0; 9188 } 9189 memset(ctxm, 0, sizeof(*ctxm)); 9190 } 9191 9192 void bnxt_free_ctx_mem(struct bnxt *bp, bool force) 9193 { 9194 struct bnxt_ctx_mem_info *ctx = bp->ctx; 9195 u16 type; 9196 9197 if (!ctx) 9198 return; 9199 9200 for (type = 0; type < BNXT_CTX_V2_MAX; type++) 9201 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force); 9202 9203 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 9204 if (force) { 9205 kfree(ctx); 9206 bp->ctx = NULL; 9207 } 9208 } 9209 9210 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 9211 { 9212 struct bnxt_ctx_mem_type *ctxm; 9213 struct bnxt_ctx_mem_info *ctx; 9214 u32 l2_qps, qp1_qps, max_qps; 9215 u32 ena, entries_sp, entries; 9216 u32 srqs, max_srqs, min; 9217 u32 num_mr, num_ah; 9218 u32 extra_srqs = 0; 9219 u32 extra_qps = 0; 9220 u32 fast_qpmd_qps; 9221 u8 pg_lvl = 1; 9222 int i, rc; 9223 9224 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 9225 if (rc) { 9226 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 9227 rc); 9228 return rc; 9229 } 9230 ctx = bp->ctx; 9231 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 9232 return 0; 9233 9234 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 9235 l2_qps = ctxm->qp_l2_entries; 9236 qp1_qps = ctxm->qp_qp1_entries; 9237 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; 9238 max_qps = ctxm->max_entries; 9239 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 9240 srqs = ctxm->srq_l2_entries; 9241 max_srqs = ctxm->max_entries; 9242 ena = 0; 9243 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 9244 pg_lvl = 2; 9245 if (BNXT_SW_RES_LMT(bp)) { 9246 extra_qps = max_qps - l2_qps - qp1_qps; 9247 extra_srqs = max_srqs - srqs; 9248 } else { 9249 extra_qps = min_t(u32, 65536, 9250 max_qps - l2_qps - qp1_qps); 9251 /* allocate extra qps if fw supports RoCE fast qp 9252 * destroy feature 9253 */ 9254 extra_qps += fast_qpmd_qps; 9255 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 9256 } 9257 if (fast_qpmd_qps) 9258 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; 9259 } 9260 9261 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 9262 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 9263 pg_lvl); 9264 if (rc) 9265 return rc; 9266 9267 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 9268 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 9269 if (rc) 9270 return rc; 9271 9272 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 9273 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 9274 extra_qps * 2, pg_lvl); 9275 if (rc) 9276 return rc; 9277 9278 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 9279 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 9280 if (rc) 9281 return rc; 9282 9283 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 9284 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 9285 if (rc) 9286 return rc; 9287 9288 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 9289 goto skip_rdma; 9290 9291 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 9292 if (BNXT_SW_RES_LMT(bp) && 9293 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) { 9294 num_ah = ctxm->mrav_av_entries; 9295 num_mr = ctxm->max_entries - num_ah; 9296 } else { 9297 /* 128K extra is needed to accommodate static AH context 9298 * allocation by f/w. 9299 */ 9300 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 9301 num_ah = min_t(u32, num_mr, 1024 * 128); 9302 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 9303 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 9304 ctxm->mrav_av_entries = num_ah; 9305 } 9306 9307 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 9308 if (rc) 9309 return rc; 9310 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 9311 9312 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 9313 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 9314 if (rc) 9315 return rc; 9316 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 9317 9318 skip_rdma: 9319 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 9320 min = ctxm->min_entries; 9321 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 9322 2 * (extra_qps + qp1_qps) + min; 9323 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 9324 if (rc) 9325 return rc; 9326 9327 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 9328 entries = l2_qps + 2 * (extra_qps + qp1_qps); 9329 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 9330 if (rc) 9331 return rc; 9332 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 9333 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 9334 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 9335 9336 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 9337 rc = bnxt_backing_store_cfg_v2(bp, ena); 9338 else 9339 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 9340 if (rc) { 9341 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 9342 rc); 9343 return rc; 9344 } 9345 ctx->flags |= BNXT_CTX_FLAG_INITED; 9346 return 0; 9347 } 9348 9349 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp) 9350 { 9351 struct hwrm_dbg_crashdump_medium_cfg_input *req; 9352 u16 page_attr; 9353 int rc; 9354 9355 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 9356 return 0; 9357 9358 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG); 9359 if (rc) 9360 return rc; 9361 9362 if (BNXT_PAGE_SIZE == 0x2000) 9363 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K; 9364 else if (BNXT_PAGE_SIZE == 0x10000) 9365 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K; 9366 else 9367 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K; 9368 req->pg_size_lvl = cpu_to_le16(page_attr | 9369 bp->fw_crash_mem->ring_mem.depth); 9370 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map); 9371 req->size = cpu_to_le32(bp->fw_crash_len); 9372 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR); 9373 return hwrm_req_send(bp, req); 9374 } 9375 9376 static void bnxt_free_crash_dump_mem(struct bnxt *bp) 9377 { 9378 if (bp->fw_crash_mem) { 9379 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 9380 kfree(bp->fw_crash_mem); 9381 bp->fw_crash_mem = NULL; 9382 } 9383 } 9384 9385 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp) 9386 { 9387 u32 mem_size = 0; 9388 int rc; 9389 9390 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 9391 return 0; 9392 9393 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size); 9394 if (rc) 9395 return rc; 9396 9397 mem_size = round_up(mem_size, 4); 9398 9399 /* keep and use the existing pages */ 9400 if (bp->fw_crash_mem && 9401 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE) 9402 goto alloc_done; 9403 9404 if (bp->fw_crash_mem) 9405 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 9406 else 9407 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), 9408 GFP_KERNEL); 9409 if (!bp->fw_crash_mem) 9410 return -ENOMEM; 9411 9412 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL); 9413 if (rc) { 9414 bnxt_free_crash_dump_mem(bp); 9415 return rc; 9416 } 9417 9418 alloc_done: 9419 bp->fw_crash_len = mem_size; 9420 return 0; 9421 } 9422 9423 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 9424 { 9425 struct hwrm_func_resource_qcaps_output *resp; 9426 struct hwrm_func_resource_qcaps_input *req; 9427 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9428 int rc; 9429 9430 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 9431 if (rc) 9432 return rc; 9433 9434 req->fid = cpu_to_le16(0xffff); 9435 resp = hwrm_req_hold(bp, req); 9436 rc = hwrm_req_send_silent(bp, req); 9437 if (rc) 9438 goto hwrm_func_resc_qcaps_exit; 9439 9440 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 9441 if (!all) 9442 goto hwrm_func_resc_qcaps_exit; 9443 9444 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 9445 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9446 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 9447 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9448 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 9449 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9450 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 9451 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9452 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 9453 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 9454 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 9455 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9456 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 9457 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9458 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 9459 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9460 9461 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 9462 u16 max_msix = le16_to_cpu(resp->max_msix); 9463 9464 hw_resc->max_nqs = max_msix; 9465 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 9466 } 9467 9468 if (BNXT_PF(bp)) { 9469 struct bnxt_pf_info *pf = &bp->pf; 9470 9471 pf->vf_resv_strategy = 9472 le16_to_cpu(resp->vf_reservation_strategy); 9473 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 9474 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 9475 } 9476 hwrm_func_resc_qcaps_exit: 9477 hwrm_req_drop(bp, req); 9478 return rc; 9479 } 9480 9481 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 9482 { 9483 struct hwrm_port_mac_ptp_qcfg_output *resp; 9484 struct hwrm_port_mac_ptp_qcfg_input *req; 9485 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 9486 u8 flags; 9487 int rc; 9488 9489 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { 9490 rc = -ENODEV; 9491 goto no_ptp; 9492 } 9493 9494 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 9495 if (rc) 9496 goto no_ptp; 9497 9498 req->port_id = cpu_to_le16(bp->pf.port_id); 9499 resp = hwrm_req_hold(bp, req); 9500 rc = hwrm_req_send(bp, req); 9501 if (rc) 9502 goto exit; 9503 9504 flags = resp->flags; 9505 if (BNXT_CHIP_P5_AND_MINUS(bp) && 9506 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 9507 rc = -ENODEV; 9508 goto exit; 9509 } 9510 if (!ptp) { 9511 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 9512 if (!ptp) { 9513 rc = -ENOMEM; 9514 goto exit; 9515 } 9516 ptp->bp = bp; 9517 bp->ptp_cfg = ptp; 9518 } 9519 9520 if (flags & 9521 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | 9522 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { 9523 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 9524 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 9525 } else if (BNXT_CHIP_P5(bp)) { 9526 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 9527 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 9528 } else { 9529 rc = -ENODEV; 9530 goto exit; 9531 } 9532 ptp->rtc_configured = 9533 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 9534 rc = bnxt_ptp_init(bp); 9535 if (rc) 9536 netdev_warn(bp->dev, "PTP initialization failed.\n"); 9537 exit: 9538 hwrm_req_drop(bp, req); 9539 if (!rc) 9540 return 0; 9541 9542 no_ptp: 9543 bnxt_ptp_clear(bp); 9544 kfree(ptp); 9545 bp->ptp_cfg = NULL; 9546 return rc; 9547 } 9548 9549 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 9550 { 9551 struct hwrm_func_qcaps_output *resp; 9552 struct hwrm_func_qcaps_input *req; 9553 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9554 u32 flags, flags_ext, flags_ext2; 9555 int rc; 9556 9557 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 9558 if (rc) 9559 return rc; 9560 9561 req->fid = cpu_to_le16(0xffff); 9562 resp = hwrm_req_hold(bp, req); 9563 rc = hwrm_req_send(bp, req); 9564 if (rc) 9565 goto hwrm_func_qcaps_exit; 9566 9567 flags = le32_to_cpu(resp->flags); 9568 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 9569 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 9570 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 9571 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 9572 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 9573 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 9574 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 9575 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 9576 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 9577 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 9578 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 9579 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 9580 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 9581 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 9582 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 9583 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 9584 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 9585 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 9586 9587 flags_ext = le32_to_cpu(resp->flags_ext); 9588 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 9589 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 9590 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 9591 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 9592 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 9593 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 9594 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 9595 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 9596 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 9597 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 9598 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED) 9599 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2; 9600 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) 9601 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; 9602 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 9603 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 9604 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) 9605 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; 9606 9607 flags_ext2 = le32_to_cpu(resp->flags_ext2); 9608 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 9609 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 9610 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) 9611 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; 9612 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) 9613 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; 9614 if (flags_ext2 & 9615 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED) 9616 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS; 9617 if (BNXT_PF(bp) && 9618 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED)) 9619 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; 9620 9621 bp->tx_push_thresh = 0; 9622 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 9623 BNXT_FW_MAJ(bp) > 217) 9624 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 9625 9626 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9627 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9628 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9629 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9630 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 9631 if (!hw_resc->max_hw_ring_grps) 9632 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 9633 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9634 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9635 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9636 9637 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); 9638 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); 9639 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 9640 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 9641 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 9642 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 9643 9644 if (BNXT_PF(bp)) { 9645 struct bnxt_pf_info *pf = &bp->pf; 9646 9647 pf->fw_fid = le16_to_cpu(resp->fid); 9648 pf->port_id = le16_to_cpu(resp->port_id); 9649 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 9650 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 9651 pf->max_vfs = le16_to_cpu(resp->max_vfs); 9652 bp->flags &= ~BNXT_FLAG_WOL_CAP; 9653 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 9654 bp->flags |= BNXT_FLAG_WOL_CAP; 9655 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 9656 bp->fw_cap |= BNXT_FW_CAP_PTP; 9657 } else { 9658 bnxt_ptp_clear(bp); 9659 kfree(bp->ptp_cfg); 9660 bp->ptp_cfg = NULL; 9661 } 9662 } else { 9663 #ifdef CONFIG_BNXT_SRIOV 9664 struct bnxt_vf_info *vf = &bp->vf; 9665 9666 vf->fw_fid = le16_to_cpu(resp->fid); 9667 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 9668 #endif 9669 } 9670 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); 9671 9672 hwrm_func_qcaps_exit: 9673 hwrm_req_drop(bp, req); 9674 return rc; 9675 } 9676 9677 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 9678 { 9679 struct hwrm_dbg_qcaps_output *resp; 9680 struct hwrm_dbg_qcaps_input *req; 9681 int rc; 9682 9683 bp->fw_dbg_cap = 0; 9684 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 9685 return; 9686 9687 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 9688 if (rc) 9689 return; 9690 9691 req->fid = cpu_to_le16(0xffff); 9692 resp = hwrm_req_hold(bp, req); 9693 rc = hwrm_req_send(bp, req); 9694 if (rc) 9695 goto hwrm_dbg_qcaps_exit; 9696 9697 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 9698 9699 hwrm_dbg_qcaps_exit: 9700 hwrm_req_drop(bp, req); 9701 } 9702 9703 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 9704 9705 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 9706 { 9707 int rc; 9708 9709 rc = __bnxt_hwrm_func_qcaps(bp); 9710 if (rc) 9711 return rc; 9712 9713 bnxt_hwrm_dbg_qcaps(bp); 9714 9715 rc = bnxt_hwrm_queue_qportcfg(bp); 9716 if (rc) { 9717 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 9718 return rc; 9719 } 9720 if (bp->hwrm_spec_code >= 0x10803) { 9721 rc = bnxt_alloc_ctx_mem(bp); 9722 if (rc) 9723 return rc; 9724 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 9725 if (!rc) 9726 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 9727 } 9728 return 0; 9729 } 9730 9731 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 9732 { 9733 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 9734 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 9735 u32 flags; 9736 int rc; 9737 9738 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 9739 return 0; 9740 9741 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 9742 if (rc) 9743 return rc; 9744 9745 resp = hwrm_req_hold(bp, req); 9746 rc = hwrm_req_send(bp, req); 9747 if (rc) 9748 goto hwrm_cfa_adv_qcaps_exit; 9749 9750 flags = le32_to_cpu(resp->flags); 9751 if (flags & 9752 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 9753 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 9754 9755 if (flags & 9756 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) 9757 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; 9758 9759 if (flags & 9760 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) 9761 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; 9762 9763 hwrm_cfa_adv_qcaps_exit: 9764 hwrm_req_drop(bp, req); 9765 return rc; 9766 } 9767 9768 static int __bnxt_alloc_fw_health(struct bnxt *bp) 9769 { 9770 if (bp->fw_health) 9771 return 0; 9772 9773 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 9774 if (!bp->fw_health) 9775 return -ENOMEM; 9776 9777 mutex_init(&bp->fw_health->lock); 9778 return 0; 9779 } 9780 9781 static int bnxt_alloc_fw_health(struct bnxt *bp) 9782 { 9783 int rc; 9784 9785 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 9786 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9787 return 0; 9788 9789 rc = __bnxt_alloc_fw_health(bp); 9790 if (rc) { 9791 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 9792 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9793 return rc; 9794 } 9795 9796 return 0; 9797 } 9798 9799 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 9800 { 9801 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 9802 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 9803 BNXT_FW_HEALTH_WIN_MAP_OFF); 9804 } 9805 9806 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 9807 { 9808 struct bnxt_fw_health *fw_health = bp->fw_health; 9809 u32 reg_type; 9810 9811 if (!fw_health) 9812 return; 9813 9814 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 9815 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9816 fw_health->status_reliable = false; 9817 9818 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 9819 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9820 fw_health->resets_reliable = false; 9821 } 9822 9823 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 9824 { 9825 void __iomem *hs; 9826 u32 status_loc; 9827 u32 reg_type; 9828 u32 sig; 9829 9830 if (bp->fw_health) 9831 bp->fw_health->status_reliable = false; 9832 9833 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 9834 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 9835 9836 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 9837 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 9838 if (!bp->chip_num) { 9839 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 9840 bp->chip_num = readl(bp->bar0 + 9841 BNXT_FW_HEALTH_WIN_BASE + 9842 BNXT_GRC_REG_CHIP_NUM); 9843 } 9844 if (!BNXT_CHIP_P5_PLUS(bp)) 9845 return; 9846 9847 status_loc = BNXT_GRC_REG_STATUS_P5 | 9848 BNXT_FW_HEALTH_REG_TYPE_BAR0; 9849 } else { 9850 status_loc = readl(hs + offsetof(struct hcomm_status, 9851 fw_status_loc)); 9852 } 9853 9854 if (__bnxt_alloc_fw_health(bp)) { 9855 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 9856 return; 9857 } 9858 9859 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 9860 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 9861 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 9862 __bnxt_map_fw_health_reg(bp, status_loc); 9863 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 9864 BNXT_FW_HEALTH_WIN_OFF(status_loc); 9865 } 9866 9867 bp->fw_health->status_reliable = true; 9868 } 9869 9870 static int bnxt_map_fw_health_regs(struct bnxt *bp) 9871 { 9872 struct bnxt_fw_health *fw_health = bp->fw_health; 9873 u32 reg_base = 0xffffffff; 9874 int i; 9875 9876 bp->fw_health->status_reliable = false; 9877 bp->fw_health->resets_reliable = false; 9878 /* Only pre-map the monitoring GRC registers using window 3 */ 9879 for (i = 0; i < 4; i++) { 9880 u32 reg = fw_health->regs[i]; 9881 9882 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 9883 continue; 9884 if (reg_base == 0xffffffff) 9885 reg_base = reg & BNXT_GRC_BASE_MASK; 9886 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 9887 return -ERANGE; 9888 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 9889 } 9890 bp->fw_health->status_reliable = true; 9891 bp->fw_health->resets_reliable = true; 9892 if (reg_base == 0xffffffff) 9893 return 0; 9894 9895 __bnxt_map_fw_health_reg(bp, reg_base); 9896 return 0; 9897 } 9898 9899 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 9900 { 9901 if (!bp->fw_health) 9902 return; 9903 9904 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 9905 bp->fw_health->status_reliable = true; 9906 bp->fw_health->resets_reliable = true; 9907 } else { 9908 bnxt_try_map_fw_health_reg(bp); 9909 } 9910 } 9911 9912 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 9913 { 9914 struct bnxt_fw_health *fw_health = bp->fw_health; 9915 struct hwrm_error_recovery_qcfg_output *resp; 9916 struct hwrm_error_recovery_qcfg_input *req; 9917 int rc, i; 9918 9919 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9920 return 0; 9921 9922 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 9923 if (rc) 9924 return rc; 9925 9926 resp = hwrm_req_hold(bp, req); 9927 rc = hwrm_req_send(bp, req); 9928 if (rc) 9929 goto err_recovery_out; 9930 fw_health->flags = le32_to_cpu(resp->flags); 9931 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 9932 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 9933 rc = -EINVAL; 9934 goto err_recovery_out; 9935 } 9936 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 9937 fw_health->master_func_wait_dsecs = 9938 le32_to_cpu(resp->master_func_wait_period); 9939 fw_health->normal_func_wait_dsecs = 9940 le32_to_cpu(resp->normal_func_wait_period); 9941 fw_health->post_reset_wait_dsecs = 9942 le32_to_cpu(resp->master_func_wait_period_after_reset); 9943 fw_health->post_reset_max_wait_dsecs = 9944 le32_to_cpu(resp->max_bailout_time_after_reset); 9945 fw_health->regs[BNXT_FW_HEALTH_REG] = 9946 le32_to_cpu(resp->fw_health_status_reg); 9947 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 9948 le32_to_cpu(resp->fw_heartbeat_reg); 9949 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 9950 le32_to_cpu(resp->fw_reset_cnt_reg); 9951 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 9952 le32_to_cpu(resp->reset_inprogress_reg); 9953 fw_health->fw_reset_inprog_reg_mask = 9954 le32_to_cpu(resp->reset_inprogress_reg_mask); 9955 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 9956 if (fw_health->fw_reset_seq_cnt >= 16) { 9957 rc = -EINVAL; 9958 goto err_recovery_out; 9959 } 9960 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 9961 fw_health->fw_reset_seq_regs[i] = 9962 le32_to_cpu(resp->reset_reg[i]); 9963 fw_health->fw_reset_seq_vals[i] = 9964 le32_to_cpu(resp->reset_reg_val[i]); 9965 fw_health->fw_reset_seq_delay_msec[i] = 9966 resp->delay_after_reset[i]; 9967 } 9968 err_recovery_out: 9969 hwrm_req_drop(bp, req); 9970 if (!rc) 9971 rc = bnxt_map_fw_health_regs(bp); 9972 if (rc) 9973 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9974 return rc; 9975 } 9976 9977 static int bnxt_hwrm_func_reset(struct bnxt *bp) 9978 { 9979 struct hwrm_func_reset_input *req; 9980 int rc; 9981 9982 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 9983 if (rc) 9984 return rc; 9985 9986 req->enables = 0; 9987 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 9988 return hwrm_req_send(bp, req); 9989 } 9990 9991 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 9992 { 9993 struct hwrm_nvm_get_dev_info_output nvm_info; 9994 9995 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 9996 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 9997 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 9998 nvm_info.nvm_cfg_ver_upd); 9999 } 10000 10001 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 10002 { 10003 struct hwrm_queue_qportcfg_output *resp; 10004 struct hwrm_queue_qportcfg_input *req; 10005 u8 i, j, *qptr; 10006 bool no_rdma; 10007 int rc = 0; 10008 10009 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 10010 if (rc) 10011 return rc; 10012 10013 resp = hwrm_req_hold(bp, req); 10014 rc = hwrm_req_send(bp, req); 10015 if (rc) 10016 goto qportcfg_exit; 10017 10018 if (!resp->max_configurable_queues) { 10019 rc = -EINVAL; 10020 goto qportcfg_exit; 10021 } 10022 bp->max_tc = resp->max_configurable_queues; 10023 bp->max_lltc = resp->max_configurable_lossless_queues; 10024 if (bp->max_tc > BNXT_MAX_QUEUE) 10025 bp->max_tc = BNXT_MAX_QUEUE; 10026 10027 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 10028 qptr = &resp->queue_id0; 10029 for (i = 0, j = 0; i < bp->max_tc; i++) { 10030 bp->q_info[j].queue_id = *qptr; 10031 bp->q_ids[i] = *qptr++; 10032 bp->q_info[j].queue_profile = *qptr++; 10033 bp->tc_to_qidx[j] = j; 10034 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 10035 (no_rdma && BNXT_PF(bp))) 10036 j++; 10037 } 10038 bp->max_q = bp->max_tc; 10039 bp->max_tc = max_t(u8, j, 1); 10040 10041 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 10042 bp->max_tc = 1; 10043 10044 if (bp->max_lltc > bp->max_tc) 10045 bp->max_lltc = bp->max_tc; 10046 10047 qportcfg_exit: 10048 hwrm_req_drop(bp, req); 10049 return rc; 10050 } 10051 10052 static int bnxt_hwrm_poll(struct bnxt *bp) 10053 { 10054 struct hwrm_ver_get_input *req; 10055 int rc; 10056 10057 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 10058 if (rc) 10059 return rc; 10060 10061 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 10062 req->hwrm_intf_min = HWRM_VERSION_MINOR; 10063 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 10064 10065 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 10066 rc = hwrm_req_send(bp, req); 10067 return rc; 10068 } 10069 10070 static int bnxt_hwrm_ver_get(struct bnxt *bp) 10071 { 10072 struct hwrm_ver_get_output *resp; 10073 struct hwrm_ver_get_input *req; 10074 u16 fw_maj, fw_min, fw_bld, fw_rsv; 10075 u32 dev_caps_cfg, hwrm_ver; 10076 int rc, len; 10077 10078 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 10079 if (rc) 10080 return rc; 10081 10082 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 10083 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 10084 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 10085 req->hwrm_intf_min = HWRM_VERSION_MINOR; 10086 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 10087 10088 resp = hwrm_req_hold(bp, req); 10089 rc = hwrm_req_send(bp, req); 10090 if (rc) 10091 goto hwrm_ver_get_exit; 10092 10093 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 10094 10095 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 10096 resp->hwrm_intf_min_8b << 8 | 10097 resp->hwrm_intf_upd_8b; 10098 if (resp->hwrm_intf_maj_8b < 1) { 10099 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 10100 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 10101 resp->hwrm_intf_upd_8b); 10102 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 10103 } 10104 10105 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 10106 HWRM_VERSION_UPDATE; 10107 10108 if (bp->hwrm_spec_code > hwrm_ver) 10109 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 10110 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 10111 HWRM_VERSION_UPDATE); 10112 else 10113 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 10114 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 10115 resp->hwrm_intf_upd_8b); 10116 10117 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 10118 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 10119 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 10120 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 10121 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 10122 len = FW_VER_STR_LEN; 10123 } else { 10124 fw_maj = resp->hwrm_fw_maj_8b; 10125 fw_min = resp->hwrm_fw_min_8b; 10126 fw_bld = resp->hwrm_fw_bld_8b; 10127 fw_rsv = resp->hwrm_fw_rsvd_8b; 10128 len = BC_HWRM_STR_LEN; 10129 } 10130 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 10131 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 10132 fw_rsv); 10133 10134 if (strlen(resp->active_pkg_name)) { 10135 int fw_ver_len = strlen(bp->fw_ver_str); 10136 10137 snprintf(bp->fw_ver_str + fw_ver_len, 10138 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 10139 resp->active_pkg_name); 10140 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 10141 } 10142 10143 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 10144 if (!bp->hwrm_cmd_timeout) 10145 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 10146 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 10147 if (!bp->hwrm_cmd_max_timeout) 10148 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 10149 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) 10150 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", 10151 bp->hwrm_cmd_max_timeout / 1000); 10152 10153 if (resp->hwrm_intf_maj_8b >= 1) { 10154 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 10155 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 10156 } 10157 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 10158 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 10159 10160 bp->chip_num = le16_to_cpu(resp->chip_num); 10161 bp->chip_rev = resp->chip_rev; 10162 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 10163 !resp->chip_metal) 10164 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 10165 10166 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 10167 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 10168 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 10169 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 10170 10171 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 10172 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 10173 10174 if (dev_caps_cfg & 10175 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 10176 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 10177 10178 if (dev_caps_cfg & 10179 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 10180 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 10181 10182 if (dev_caps_cfg & 10183 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 10184 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 10185 10186 hwrm_ver_get_exit: 10187 hwrm_req_drop(bp, req); 10188 return rc; 10189 } 10190 10191 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 10192 { 10193 struct hwrm_fw_set_time_input *req; 10194 struct tm tm; 10195 time64_t now = ktime_get_real_seconds(); 10196 int rc; 10197 10198 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 10199 bp->hwrm_spec_code < 0x10400) 10200 return -EOPNOTSUPP; 10201 10202 time64_to_tm(now, 0, &tm); 10203 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 10204 if (rc) 10205 return rc; 10206 10207 req->year = cpu_to_le16(1900 + tm.tm_year); 10208 req->month = 1 + tm.tm_mon; 10209 req->day = tm.tm_mday; 10210 req->hour = tm.tm_hour; 10211 req->minute = tm.tm_min; 10212 req->second = tm.tm_sec; 10213 return hwrm_req_send(bp, req); 10214 } 10215 10216 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 10217 { 10218 u64 sw_tmp; 10219 10220 hw &= mask; 10221 sw_tmp = (*sw & ~mask) | hw; 10222 if (hw < (*sw & mask)) 10223 sw_tmp += mask + 1; 10224 WRITE_ONCE(*sw, sw_tmp); 10225 } 10226 10227 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 10228 int count, bool ignore_zero) 10229 { 10230 int i; 10231 10232 for (i = 0; i < count; i++) { 10233 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 10234 10235 if (ignore_zero && !hw) 10236 continue; 10237 10238 if (masks[i] == -1ULL) 10239 sw_stats[i] = hw; 10240 else 10241 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 10242 } 10243 } 10244 10245 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 10246 { 10247 if (!stats->hw_stats) 10248 return; 10249 10250 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 10251 stats->hw_masks, stats->len / 8, false); 10252 } 10253 10254 static void bnxt_accumulate_all_stats(struct bnxt *bp) 10255 { 10256 struct bnxt_stats_mem *ring0_stats; 10257 bool ignore_zero = false; 10258 int i; 10259 10260 /* Chip bug. Counter intermittently becomes 0. */ 10261 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10262 ignore_zero = true; 10263 10264 for (i = 0; i < bp->cp_nr_rings; i++) { 10265 struct bnxt_napi *bnapi = bp->bnapi[i]; 10266 struct bnxt_cp_ring_info *cpr; 10267 struct bnxt_stats_mem *stats; 10268 10269 cpr = &bnapi->cp_ring; 10270 stats = &cpr->stats; 10271 if (!i) 10272 ring0_stats = stats; 10273 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 10274 ring0_stats->hw_masks, 10275 ring0_stats->len / 8, ignore_zero); 10276 } 10277 if (bp->flags & BNXT_FLAG_PORT_STATS) { 10278 struct bnxt_stats_mem *stats = &bp->port_stats; 10279 __le64 *hw_stats = stats->hw_stats; 10280 u64 *sw_stats = stats->sw_stats; 10281 u64 *masks = stats->hw_masks; 10282 int cnt; 10283 10284 cnt = sizeof(struct rx_port_stats) / 8; 10285 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 10286 10287 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10288 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10289 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10290 cnt = sizeof(struct tx_port_stats) / 8; 10291 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 10292 } 10293 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 10294 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 10295 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 10296 } 10297 } 10298 10299 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 10300 { 10301 struct hwrm_port_qstats_input *req; 10302 struct bnxt_pf_info *pf = &bp->pf; 10303 int rc; 10304 10305 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 10306 return 0; 10307 10308 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 10309 return -EOPNOTSUPP; 10310 10311 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 10312 if (rc) 10313 return rc; 10314 10315 req->flags = flags; 10316 req->port_id = cpu_to_le16(pf->port_id); 10317 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 10318 BNXT_TX_PORT_STATS_BYTE_OFFSET); 10319 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 10320 return hwrm_req_send(bp, req); 10321 } 10322 10323 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 10324 { 10325 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 10326 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 10327 struct hwrm_port_qstats_ext_output *resp_qs; 10328 struct hwrm_port_qstats_ext_input *req_qs; 10329 struct bnxt_pf_info *pf = &bp->pf; 10330 u32 tx_stat_size; 10331 int rc; 10332 10333 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 10334 return 0; 10335 10336 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 10337 return -EOPNOTSUPP; 10338 10339 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 10340 if (rc) 10341 return rc; 10342 10343 req_qs->flags = flags; 10344 req_qs->port_id = cpu_to_le16(pf->port_id); 10345 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 10346 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 10347 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 10348 sizeof(struct tx_port_stats_ext) : 0; 10349 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 10350 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 10351 resp_qs = hwrm_req_hold(bp, req_qs); 10352 rc = hwrm_req_send(bp, req_qs); 10353 if (!rc) { 10354 bp->fw_rx_stats_ext_size = 10355 le16_to_cpu(resp_qs->rx_stat_size) / 8; 10356 if (BNXT_FW_MAJ(bp) < 220 && 10357 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 10358 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 10359 10360 bp->fw_tx_stats_ext_size = tx_stat_size ? 10361 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 10362 } else { 10363 bp->fw_rx_stats_ext_size = 0; 10364 bp->fw_tx_stats_ext_size = 0; 10365 } 10366 hwrm_req_drop(bp, req_qs); 10367 10368 if (flags) 10369 return rc; 10370 10371 if (bp->fw_tx_stats_ext_size <= 10372 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 10373 bp->pri2cos_valid = 0; 10374 return rc; 10375 } 10376 10377 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 10378 if (rc) 10379 return rc; 10380 10381 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 10382 10383 resp_qc = hwrm_req_hold(bp, req_qc); 10384 rc = hwrm_req_send(bp, req_qc); 10385 if (!rc) { 10386 u8 *pri2cos; 10387 int i, j; 10388 10389 pri2cos = &resp_qc->pri0_cos_queue_id; 10390 for (i = 0; i < 8; i++) { 10391 u8 queue_id = pri2cos[i]; 10392 u8 queue_idx; 10393 10394 /* Per port queue IDs start from 0, 10, 20, etc */ 10395 queue_idx = queue_id % 10; 10396 if (queue_idx > BNXT_MAX_QUEUE) { 10397 bp->pri2cos_valid = false; 10398 hwrm_req_drop(bp, req_qc); 10399 return rc; 10400 } 10401 for (j = 0; j < bp->max_q; j++) { 10402 if (bp->q_ids[j] == queue_id) 10403 bp->pri2cos_idx[i] = queue_idx; 10404 } 10405 } 10406 bp->pri2cos_valid = true; 10407 } 10408 hwrm_req_drop(bp, req_qc); 10409 10410 return rc; 10411 } 10412 10413 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 10414 { 10415 bnxt_hwrm_tunnel_dst_port_free(bp, 10416 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10417 bnxt_hwrm_tunnel_dst_port_free(bp, 10418 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10419 } 10420 10421 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 10422 { 10423 int rc, i; 10424 u32 tpa_flags = 0; 10425 10426 if (set_tpa) 10427 tpa_flags = bp->flags & BNXT_FLAG_TPA; 10428 else if (BNXT_NO_FW_ACCESS(bp)) 10429 return 0; 10430 for (i = 0; i < bp->nr_vnics; i++) { 10431 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); 10432 if (rc) { 10433 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 10434 i, rc); 10435 return rc; 10436 } 10437 } 10438 return 0; 10439 } 10440 10441 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 10442 { 10443 int i; 10444 10445 for (i = 0; i < bp->nr_vnics; i++) 10446 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); 10447 } 10448 10449 static void bnxt_clear_vnic(struct bnxt *bp) 10450 { 10451 if (!bp->vnic_info) 10452 return; 10453 10454 bnxt_hwrm_clear_vnic_filter(bp); 10455 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 10456 /* clear all RSS setting before free vnic ctx */ 10457 bnxt_hwrm_clear_vnic_rss(bp); 10458 bnxt_hwrm_vnic_ctx_free(bp); 10459 } 10460 /* before free the vnic, undo the vnic tpa settings */ 10461 if (bp->flags & BNXT_FLAG_TPA) 10462 bnxt_set_tpa(bp, false); 10463 bnxt_hwrm_vnic_free(bp); 10464 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10465 bnxt_hwrm_vnic_ctx_free(bp); 10466 } 10467 10468 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 10469 bool irq_re_init) 10470 { 10471 bnxt_clear_vnic(bp); 10472 bnxt_hwrm_ring_free(bp, close_path); 10473 bnxt_hwrm_ring_grp_free(bp); 10474 if (irq_re_init) { 10475 bnxt_hwrm_stat_ctx_free(bp); 10476 bnxt_hwrm_free_tunnel_ports(bp); 10477 } 10478 } 10479 10480 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 10481 { 10482 struct hwrm_func_cfg_input *req; 10483 u8 evb_mode; 10484 int rc; 10485 10486 if (br_mode == BRIDGE_MODE_VEB) 10487 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 10488 else if (br_mode == BRIDGE_MODE_VEPA) 10489 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 10490 else 10491 return -EINVAL; 10492 10493 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10494 if (rc) 10495 return rc; 10496 10497 req->fid = cpu_to_le16(0xffff); 10498 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 10499 req->evb_mode = evb_mode; 10500 return hwrm_req_send(bp, req); 10501 } 10502 10503 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 10504 { 10505 struct hwrm_func_cfg_input *req; 10506 int rc; 10507 10508 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 10509 return 0; 10510 10511 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10512 if (rc) 10513 return rc; 10514 10515 req->fid = cpu_to_le16(0xffff); 10516 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 10517 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 10518 if (size == 128) 10519 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 10520 10521 return hwrm_req_send(bp, req); 10522 } 10523 10524 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10525 { 10526 int rc; 10527 10528 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 10529 goto skip_rss_ctx; 10530 10531 /* allocate context for vnic */ 10532 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 10533 if (rc) { 10534 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10535 vnic->vnic_id, rc); 10536 goto vnic_setup_err; 10537 } 10538 bp->rsscos_nr_ctxs++; 10539 10540 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10541 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); 10542 if (rc) { 10543 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 10544 vnic->vnic_id, rc); 10545 goto vnic_setup_err; 10546 } 10547 bp->rsscos_nr_ctxs++; 10548 } 10549 10550 skip_rss_ctx: 10551 /* configure default vnic, ring grp */ 10552 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10553 if (rc) { 10554 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10555 vnic->vnic_id, rc); 10556 goto vnic_setup_err; 10557 } 10558 10559 /* Enable RSS hashing on vnic */ 10560 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); 10561 if (rc) { 10562 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 10563 vnic->vnic_id, rc); 10564 goto vnic_setup_err; 10565 } 10566 10567 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10568 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10569 if (rc) { 10570 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10571 vnic->vnic_id, rc); 10572 } 10573 } 10574 10575 vnic_setup_err: 10576 return rc; 10577 } 10578 10579 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, 10580 u8 valid) 10581 { 10582 struct hwrm_vnic_update_input *req; 10583 int rc; 10584 10585 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE); 10586 if (rc) 10587 return rc; 10588 10589 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 10590 10591 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID) 10592 req->mru = cpu_to_le16(vnic->mru); 10593 10594 req->enables = cpu_to_le32(valid); 10595 10596 return hwrm_req_send(bp, req); 10597 } 10598 10599 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10600 { 10601 int rc; 10602 10603 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 10604 if (rc) { 10605 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 10606 vnic->vnic_id, rc); 10607 return rc; 10608 } 10609 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10610 if (rc) 10611 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10612 vnic->vnic_id, rc); 10613 return rc; 10614 } 10615 10616 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10617 { 10618 int rc, i, nr_ctxs; 10619 10620 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 10621 for (i = 0; i < nr_ctxs; i++) { 10622 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); 10623 if (rc) { 10624 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 10625 vnic->vnic_id, i, rc); 10626 break; 10627 } 10628 bp->rsscos_nr_ctxs++; 10629 } 10630 if (i < nr_ctxs) 10631 return -ENOMEM; 10632 10633 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); 10634 if (rc) 10635 return rc; 10636 10637 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10638 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10639 if (rc) { 10640 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10641 vnic->vnic_id, rc); 10642 } 10643 } 10644 return rc; 10645 } 10646 10647 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10648 { 10649 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10650 return __bnxt_setup_vnic_p5(bp, vnic); 10651 else 10652 return __bnxt_setup_vnic(bp, vnic); 10653 } 10654 10655 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, 10656 struct bnxt_vnic_info *vnic, 10657 u16 start_rx_ring_idx, int rx_rings) 10658 { 10659 int rc; 10660 10661 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); 10662 if (rc) { 10663 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10664 vnic->vnic_id, rc); 10665 return rc; 10666 } 10667 return bnxt_setup_vnic(bp, vnic); 10668 } 10669 10670 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 10671 { 10672 struct bnxt_vnic_info *vnic; 10673 int i, rc = 0; 10674 10675 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 10676 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 10677 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); 10678 } 10679 10680 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10681 return 0; 10682 10683 for (i = 0; i < bp->rx_nr_rings; i++) { 10684 u16 vnic_id = i + 1; 10685 u16 ring_id = i; 10686 10687 if (vnic_id >= bp->nr_vnics) 10688 break; 10689 10690 vnic = &bp->vnic_info[vnic_id]; 10691 vnic->flags |= BNXT_VNIC_RFS_FLAG; 10692 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 10693 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 10694 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) 10695 break; 10696 } 10697 return rc; 10698 } 10699 10700 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, 10701 bool all) 10702 { 10703 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10704 struct bnxt_filter_base *usr_fltr, *tmp; 10705 struct bnxt_ntuple_filter *ntp_fltr; 10706 int i; 10707 10708 if (netif_running(bp->dev)) { 10709 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); 10710 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { 10711 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) 10712 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); 10713 } 10714 } 10715 if (!all) 10716 return; 10717 10718 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 10719 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && 10720 usr_fltr->fw_vnic_id == rss_ctx->index) { 10721 ntp_fltr = container_of(usr_fltr, 10722 struct bnxt_ntuple_filter, 10723 base); 10724 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); 10725 bnxt_del_ntp_filter(bp, ntp_fltr); 10726 bnxt_del_one_usr_fltr(bp, usr_fltr); 10727 } 10728 } 10729 10730 if (vnic->rss_table) 10731 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, 10732 vnic->rss_table, 10733 vnic->rss_table_dma_addr); 10734 bp->num_rss_ctx--; 10735 } 10736 10737 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) 10738 { 10739 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); 10740 struct ethtool_rxfh_context *ctx; 10741 unsigned long context; 10742 10743 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10744 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10745 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10746 10747 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || 10748 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || 10749 __bnxt_setup_vnic_p5(bp, vnic)) { 10750 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", 10751 rss_ctx->index); 10752 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 10753 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); 10754 } 10755 } 10756 } 10757 10758 static void bnxt_clear_rss_ctxs(struct bnxt *bp) 10759 { 10760 struct ethtool_rxfh_context *ctx; 10761 unsigned long context; 10762 10763 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10764 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10765 10766 bnxt_del_one_rss_ctx(bp, rss_ctx, false); 10767 } 10768 } 10769 10770 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 10771 static bool bnxt_promisc_ok(struct bnxt *bp) 10772 { 10773 #ifdef CONFIG_BNXT_SRIOV 10774 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 10775 return false; 10776 #endif 10777 return true; 10778 } 10779 10780 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 10781 { 10782 struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; 10783 unsigned int rc = 0; 10784 10785 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); 10786 if (rc) { 10787 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10788 rc); 10789 return rc; 10790 } 10791 10792 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10793 if (rc) { 10794 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10795 rc); 10796 return rc; 10797 } 10798 return rc; 10799 } 10800 10801 static int bnxt_cfg_rx_mode(struct bnxt *); 10802 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 10803 10804 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 10805 { 10806 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 10807 int rc = 0; 10808 unsigned int rx_nr_rings = bp->rx_nr_rings; 10809 10810 if (irq_re_init) { 10811 rc = bnxt_hwrm_stat_ctx_alloc(bp); 10812 if (rc) { 10813 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 10814 rc); 10815 goto err_out; 10816 } 10817 } 10818 10819 rc = bnxt_hwrm_ring_alloc(bp); 10820 if (rc) { 10821 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 10822 goto err_out; 10823 } 10824 10825 rc = bnxt_hwrm_ring_grp_alloc(bp); 10826 if (rc) { 10827 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 10828 goto err_out; 10829 } 10830 10831 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10832 rx_nr_rings--; 10833 10834 /* default vnic 0 */ 10835 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); 10836 if (rc) { 10837 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 10838 goto err_out; 10839 } 10840 10841 if (BNXT_VF(bp)) 10842 bnxt_hwrm_func_qcfg(bp); 10843 10844 rc = bnxt_setup_vnic(bp, vnic); 10845 if (rc) 10846 goto err_out; 10847 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 10848 bnxt_hwrm_update_rss_hash_cfg(bp); 10849 10850 if (bp->flags & BNXT_FLAG_RFS) { 10851 rc = bnxt_alloc_rfs_vnics(bp); 10852 if (rc) 10853 goto err_out; 10854 } 10855 10856 if (bp->flags & BNXT_FLAG_TPA) { 10857 rc = bnxt_set_tpa(bp, true); 10858 if (rc) 10859 goto err_out; 10860 } 10861 10862 if (BNXT_VF(bp)) 10863 bnxt_update_vf_mac(bp); 10864 10865 /* Filter for default vnic 0 */ 10866 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 10867 if (rc) { 10868 if (BNXT_VF(bp) && rc == -ENODEV) 10869 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 10870 else 10871 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 10872 goto err_out; 10873 } 10874 vnic->uc_filter_count = 1; 10875 10876 vnic->rx_mask = 0; 10877 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 10878 goto skip_rx_mask; 10879 10880 if (bp->dev->flags & IFF_BROADCAST) 10881 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 10882 10883 if (bp->dev->flags & IFF_PROMISC) 10884 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10885 10886 if (bp->dev->flags & IFF_ALLMULTI) { 10887 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10888 vnic->mc_list_count = 0; 10889 } else if (bp->dev->flags & IFF_MULTICAST) { 10890 u32 mask = 0; 10891 10892 bnxt_mc_list_updated(bp, &mask); 10893 vnic->rx_mask |= mask; 10894 } 10895 10896 rc = bnxt_cfg_rx_mode(bp); 10897 if (rc) 10898 goto err_out; 10899 10900 skip_rx_mask: 10901 rc = bnxt_hwrm_set_coal(bp); 10902 if (rc) 10903 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 10904 rc); 10905 10906 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10907 rc = bnxt_setup_nitroa0_vnic(bp); 10908 if (rc) 10909 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 10910 rc); 10911 } 10912 10913 if (BNXT_VF(bp)) { 10914 bnxt_hwrm_func_qcfg(bp); 10915 netdev_update_features(bp->dev); 10916 } 10917 10918 return 0; 10919 10920 err_out: 10921 bnxt_hwrm_resource_free(bp, 0, true); 10922 10923 return rc; 10924 } 10925 10926 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 10927 { 10928 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 10929 return 0; 10930 } 10931 10932 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 10933 { 10934 bnxt_init_cp_rings(bp); 10935 bnxt_init_rx_rings(bp); 10936 bnxt_init_tx_rings(bp); 10937 bnxt_init_ring_grps(bp, irq_re_init); 10938 bnxt_init_vnics(bp); 10939 10940 return bnxt_init_chip(bp, irq_re_init); 10941 } 10942 10943 static int bnxt_set_real_num_queues(struct bnxt *bp) 10944 { 10945 int rc; 10946 struct net_device *dev = bp->dev; 10947 10948 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 10949 bp->tx_nr_rings_xdp); 10950 if (rc) 10951 return rc; 10952 10953 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 10954 if (rc) 10955 return rc; 10956 10957 #ifdef CONFIG_RFS_ACCEL 10958 if (bp->flags & BNXT_FLAG_RFS) 10959 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 10960 #endif 10961 10962 return rc; 10963 } 10964 10965 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 10966 bool shared) 10967 { 10968 int _rx = *rx, _tx = *tx; 10969 10970 if (shared) { 10971 *rx = min_t(int, _rx, max); 10972 *tx = min_t(int, _tx, max); 10973 } else { 10974 if (max < 2) 10975 return -ENOMEM; 10976 10977 while (_rx + _tx > max) { 10978 if (_rx > _tx && _rx > 1) 10979 _rx--; 10980 else if (_tx > 1) 10981 _tx--; 10982 } 10983 *rx = _rx; 10984 *tx = _tx; 10985 } 10986 return 0; 10987 } 10988 10989 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 10990 { 10991 return (tx - tx_xdp) / tx_sets + tx_xdp; 10992 } 10993 10994 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 10995 { 10996 int tcs = bp->num_tc; 10997 10998 if (!tcs) 10999 tcs = 1; 11000 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 11001 } 11002 11003 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 11004 { 11005 int tcs = bp->num_tc; 11006 11007 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 11008 bp->tx_nr_rings_xdp; 11009 } 11010 11011 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 11012 bool sh) 11013 { 11014 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 11015 11016 if (tx_cp != *tx) { 11017 int tx_saved = tx_cp, rc; 11018 11019 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 11020 if (rc) 11021 return rc; 11022 if (tx_cp != tx_saved) 11023 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 11024 return 0; 11025 } 11026 return __bnxt_trim_rings(bp, rx, tx, max, sh); 11027 } 11028 11029 static void bnxt_setup_msix(struct bnxt *bp) 11030 { 11031 const int len = sizeof(bp->irq_tbl[0].name); 11032 struct net_device *dev = bp->dev; 11033 int tcs, i; 11034 11035 tcs = bp->num_tc; 11036 if (tcs) { 11037 int i, off, count; 11038 11039 for (i = 0; i < tcs; i++) { 11040 count = bp->tx_nr_rings_per_tc; 11041 off = BNXT_TC_TO_RING_BASE(bp, i); 11042 netdev_set_tc_queue(dev, i, count, off); 11043 } 11044 } 11045 11046 for (i = 0; i < bp->cp_nr_rings; i++) { 11047 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11048 char *attr; 11049 11050 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 11051 attr = "TxRx"; 11052 else if (i < bp->rx_nr_rings) 11053 attr = "rx"; 11054 else 11055 attr = "tx"; 11056 11057 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 11058 attr, i); 11059 bp->irq_tbl[map_idx].handler = bnxt_msix; 11060 } 11061 } 11062 11063 static int bnxt_init_int_mode(struct bnxt *bp); 11064 11065 static int bnxt_change_msix(struct bnxt *bp, int total) 11066 { 11067 struct msi_map map; 11068 int i; 11069 11070 /* add MSIX to the end if needed */ 11071 for (i = bp->total_irqs; i < total; i++) { 11072 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL); 11073 if (map.index < 0) 11074 return bp->total_irqs; 11075 bp->irq_tbl[i].vector = map.virq; 11076 bp->total_irqs++; 11077 } 11078 11079 /* trim MSIX from the end if needed */ 11080 for (i = bp->total_irqs; i > total; i--) { 11081 map.index = i - 1; 11082 map.virq = bp->irq_tbl[i - 1].vector; 11083 pci_msix_free_irq(bp->pdev, map); 11084 bp->total_irqs--; 11085 } 11086 return bp->total_irqs; 11087 } 11088 11089 static int bnxt_setup_int_mode(struct bnxt *bp) 11090 { 11091 int rc; 11092 11093 if (!bp->irq_tbl) { 11094 rc = bnxt_init_int_mode(bp); 11095 if (rc || !bp->irq_tbl) 11096 return rc ?: -ENODEV; 11097 } 11098 11099 bnxt_setup_msix(bp); 11100 11101 rc = bnxt_set_real_num_queues(bp); 11102 return rc; 11103 } 11104 11105 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 11106 { 11107 return bp->hw_resc.max_rsscos_ctxs; 11108 } 11109 11110 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 11111 { 11112 return bp->hw_resc.max_vnics; 11113 } 11114 11115 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 11116 { 11117 return bp->hw_resc.max_stat_ctxs; 11118 } 11119 11120 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 11121 { 11122 return bp->hw_resc.max_cp_rings; 11123 } 11124 11125 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 11126 { 11127 unsigned int cp = bp->hw_resc.max_cp_rings; 11128 11129 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 11130 cp -= bnxt_get_ulp_msix_num(bp); 11131 11132 return cp; 11133 } 11134 11135 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 11136 { 11137 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11138 11139 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11140 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 11141 11142 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 11143 } 11144 11145 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 11146 { 11147 bp->hw_resc.max_irqs = max_irqs; 11148 } 11149 11150 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 11151 { 11152 unsigned int cp; 11153 11154 cp = bnxt_get_max_func_cp_rings_for_en(bp); 11155 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11156 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 11157 else 11158 return cp - bp->cp_nr_rings; 11159 } 11160 11161 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 11162 { 11163 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 11164 } 11165 11166 static int bnxt_get_avail_msix(struct bnxt *bp, int num) 11167 { 11168 int max_irq = bnxt_get_max_func_irqs(bp); 11169 int total_req = bp->cp_nr_rings + num; 11170 11171 if (max_irq < total_req) { 11172 num = max_irq - bp->cp_nr_rings; 11173 if (num <= 0) 11174 return 0; 11175 } 11176 return num; 11177 } 11178 11179 static int bnxt_get_num_msix(struct bnxt *bp) 11180 { 11181 if (!BNXT_NEW_RM(bp)) 11182 return bnxt_get_max_func_irqs(bp); 11183 11184 return bnxt_nq_rings_in_use(bp); 11185 } 11186 11187 static int bnxt_init_int_mode(struct bnxt *bp) 11188 { 11189 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size; 11190 11191 total_vecs = bnxt_get_num_msix(bp); 11192 max = bnxt_get_max_func_irqs(bp); 11193 if (total_vecs > max) 11194 total_vecs = max; 11195 11196 if (!total_vecs) 11197 return 0; 11198 11199 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 11200 min = 2; 11201 11202 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs, 11203 PCI_IRQ_MSIX); 11204 ulp_msix = bnxt_get_ulp_msix_num(bp); 11205 if (total_vecs < 0 || total_vecs < ulp_msix) { 11206 rc = -ENODEV; 11207 goto msix_setup_exit; 11208 } 11209 11210 tbl_size = total_vecs; 11211 if (pci_msix_can_alloc_dyn(bp->pdev)) 11212 tbl_size = max; 11213 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL); 11214 if (bp->irq_tbl) { 11215 for (i = 0; i < total_vecs; i++) 11216 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i); 11217 11218 bp->total_irqs = total_vecs; 11219 /* Trim rings based upon num of vectors allocated */ 11220 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 11221 total_vecs - ulp_msix, min == 1); 11222 if (rc) 11223 goto msix_setup_exit; 11224 11225 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 11226 bp->cp_nr_rings = (min == 1) ? 11227 max_t(int, tx_cp, bp->rx_nr_rings) : 11228 tx_cp + bp->rx_nr_rings; 11229 11230 } else { 11231 rc = -ENOMEM; 11232 goto msix_setup_exit; 11233 } 11234 return 0; 11235 11236 msix_setup_exit: 11237 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc); 11238 kfree(bp->irq_tbl); 11239 bp->irq_tbl = NULL; 11240 pci_free_irq_vectors(bp->pdev); 11241 return rc; 11242 } 11243 11244 static void bnxt_clear_int_mode(struct bnxt *bp) 11245 { 11246 pci_free_irq_vectors(bp->pdev); 11247 11248 kfree(bp->irq_tbl); 11249 bp->irq_tbl = NULL; 11250 } 11251 11252 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 11253 { 11254 bool irq_cleared = false; 11255 bool irq_change = false; 11256 int tcs = bp->num_tc; 11257 int irqs_required; 11258 int rc; 11259 11260 if (!bnxt_need_reserve_rings(bp)) 11261 return 0; 11262 11263 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 11264 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 11265 11266 if (ulp_msix > bp->ulp_num_msix_want) 11267 ulp_msix = bp->ulp_num_msix_want; 11268 irqs_required = ulp_msix + bp->cp_nr_rings; 11269 } else { 11270 irqs_required = bnxt_get_num_msix(bp); 11271 } 11272 11273 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { 11274 irq_change = true; 11275 if (!pci_msix_can_alloc_dyn(bp->pdev)) { 11276 bnxt_ulp_irq_stop(bp); 11277 bnxt_clear_int_mode(bp); 11278 irq_cleared = true; 11279 } 11280 } 11281 rc = __bnxt_reserve_rings(bp); 11282 if (irq_cleared) { 11283 if (!rc) 11284 rc = bnxt_init_int_mode(bp); 11285 bnxt_ulp_irq_restart(bp, rc); 11286 } else if (irq_change && !rc) { 11287 if (bnxt_change_msix(bp, irqs_required) != irqs_required) 11288 rc = -ENOSPC; 11289 } 11290 if (rc) { 11291 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 11292 return rc; 11293 } 11294 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 11295 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 11296 netdev_err(bp->dev, "tx ring reservation failure\n"); 11297 netdev_reset_tc(bp->dev); 11298 bp->num_tc = 0; 11299 if (bp->tx_nr_rings_xdp) 11300 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 11301 else 11302 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11303 return -ENOMEM; 11304 } 11305 return 0; 11306 } 11307 11308 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx) 11309 { 11310 struct bnxt_tx_ring_info *txr; 11311 struct netdev_queue *txq; 11312 struct bnxt_napi *bnapi; 11313 int i; 11314 11315 bnapi = bp->bnapi[idx]; 11316 bnxt_for_each_napi_tx(i, bnapi, txr) { 11317 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 11318 synchronize_net(); 11319 11320 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) { 11321 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 11322 if (txq) { 11323 __netif_tx_lock_bh(txq); 11324 netif_tx_stop_queue(txq); 11325 __netif_tx_unlock_bh(txq); 11326 } 11327 } 11328 11329 if (!bp->tph_mode) 11330 continue; 11331 11332 bnxt_hwrm_tx_ring_free(bp, txr, true); 11333 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr); 11334 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index); 11335 bnxt_clear_one_cp_ring(bp, txr->tx_cpr); 11336 } 11337 } 11338 11339 static int bnxt_tx_queue_start(struct bnxt *bp, int idx) 11340 { 11341 struct bnxt_tx_ring_info *txr; 11342 struct netdev_queue *txq; 11343 struct bnxt_napi *bnapi; 11344 int rc, i; 11345 11346 bnapi = bp->bnapi[idx]; 11347 /* All rings have been reserved and previously allocated. 11348 * Reallocating with the same parameters should never fail. 11349 */ 11350 bnxt_for_each_napi_tx(i, bnapi, txr) { 11351 if (!bp->tph_mode) 11352 goto start_tx; 11353 11354 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); 11355 if (rc) 11356 return rc; 11357 11358 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false); 11359 if (rc) 11360 return rc; 11361 11362 txr->tx_prod = 0; 11363 txr->tx_cons = 0; 11364 txr->tx_hw_cons = 0; 11365 start_tx: 11366 WRITE_ONCE(txr->dev_state, 0); 11367 synchronize_net(); 11368 11369 if (bnapi->flags & BNXT_NAPI_FLAG_XDP) 11370 continue; 11371 11372 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 11373 if (txq) 11374 netif_tx_start_queue(txq); 11375 } 11376 11377 return 0; 11378 } 11379 11380 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify, 11381 const cpumask_t *mask) 11382 { 11383 struct bnxt_irq *irq; 11384 u16 tag; 11385 int err; 11386 11387 irq = container_of(notify, struct bnxt_irq, affinity_notify); 11388 11389 if (!irq->bp->tph_mode) 11390 return; 11391 11392 cpumask_copy(irq->cpu_mask, mask); 11393 11394 if (irq->ring_nr >= irq->bp->rx_nr_rings) 11395 return; 11396 11397 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, 11398 cpumask_first(irq->cpu_mask), &tag)) 11399 return; 11400 11401 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag)) 11402 return; 11403 11404 netdev_lock(irq->bp->dev); 11405 if (netif_running(irq->bp->dev)) { 11406 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr); 11407 if (err) 11408 netdev_err(irq->bp->dev, 11409 "RX queue restart failed: err=%d\n", err); 11410 } 11411 netdev_unlock(irq->bp->dev); 11412 } 11413 11414 static void bnxt_irq_affinity_release(struct kref *ref) 11415 { 11416 struct irq_affinity_notify *notify = 11417 container_of(ref, struct irq_affinity_notify, kref); 11418 struct bnxt_irq *irq; 11419 11420 irq = container_of(notify, struct bnxt_irq, affinity_notify); 11421 11422 if (!irq->bp->tph_mode) 11423 return; 11424 11425 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) { 11426 netdev_err(irq->bp->dev, 11427 "Setting ST=0 for MSIX entry %d failed\n", 11428 irq->msix_nr); 11429 return; 11430 } 11431 } 11432 11433 static void bnxt_release_irq_notifier(struct bnxt_irq *irq) 11434 { 11435 irq_set_affinity_notifier(irq->vector, NULL); 11436 } 11437 11438 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq) 11439 { 11440 struct irq_affinity_notify *notify; 11441 11442 irq->bp = bp; 11443 11444 /* Nothing to do if TPH is not enabled */ 11445 if (!bp->tph_mode) 11446 return; 11447 11448 /* Register IRQ affinity notifier */ 11449 notify = &irq->affinity_notify; 11450 notify->irq = irq->vector; 11451 notify->notify = bnxt_irq_affinity_notify; 11452 notify->release = bnxt_irq_affinity_release; 11453 11454 irq_set_affinity_notifier(irq->vector, notify); 11455 } 11456 11457 static void bnxt_free_irq(struct bnxt *bp) 11458 { 11459 struct bnxt_irq *irq; 11460 int i; 11461 11462 #ifdef CONFIG_RFS_ACCEL 11463 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 11464 bp->dev->rx_cpu_rmap = NULL; 11465 #endif 11466 if (!bp->irq_tbl || !bp->bnapi) 11467 return; 11468 11469 for (i = 0; i < bp->cp_nr_rings; i++) { 11470 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11471 11472 irq = &bp->irq_tbl[map_idx]; 11473 if (irq->requested) { 11474 if (irq->have_cpumask) { 11475 irq_update_affinity_hint(irq->vector, NULL); 11476 free_cpumask_var(irq->cpu_mask); 11477 irq->have_cpumask = 0; 11478 } 11479 11480 bnxt_release_irq_notifier(irq); 11481 11482 free_irq(irq->vector, bp->bnapi[i]); 11483 } 11484 11485 irq->requested = 0; 11486 } 11487 11488 /* Disable TPH support */ 11489 pcie_disable_tph(bp->pdev); 11490 bp->tph_mode = 0; 11491 } 11492 11493 static int bnxt_request_irq(struct bnxt *bp) 11494 { 11495 int i, j, rc = 0; 11496 unsigned long flags = 0; 11497 #ifdef CONFIG_RFS_ACCEL 11498 struct cpu_rmap *rmap; 11499 #endif 11500 11501 rc = bnxt_setup_int_mode(bp); 11502 if (rc) { 11503 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 11504 rc); 11505 return rc; 11506 } 11507 #ifdef CONFIG_RFS_ACCEL 11508 rmap = bp->dev->rx_cpu_rmap; 11509 #endif 11510 11511 /* Enable TPH support as part of IRQ request */ 11512 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE); 11513 if (!rc) 11514 bp->tph_mode = PCI_TPH_ST_IV_MODE; 11515 11516 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 11517 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11518 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 11519 11520 #ifdef CONFIG_RFS_ACCEL 11521 if (rmap && bp->bnapi[i]->rx_ring) { 11522 rc = irq_cpu_rmap_add(rmap, irq->vector); 11523 if (rc) 11524 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 11525 j); 11526 j++; 11527 } 11528 #endif 11529 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 11530 bp->bnapi[i]); 11531 if (rc) 11532 break; 11533 11534 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector); 11535 irq->requested = 1; 11536 11537 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 11538 int numa_node = dev_to_node(&bp->pdev->dev); 11539 u16 tag; 11540 11541 irq->have_cpumask = 1; 11542 irq->msix_nr = map_idx; 11543 irq->ring_nr = i; 11544 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 11545 irq->cpu_mask); 11546 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask); 11547 if (rc) { 11548 netdev_warn(bp->dev, 11549 "Update affinity hint failed, IRQ = %d\n", 11550 irq->vector); 11551 break; 11552 } 11553 11554 bnxt_register_irq_notifier(bp, irq); 11555 11556 /* Init ST table entry */ 11557 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, 11558 cpumask_first(irq->cpu_mask), 11559 &tag)) 11560 continue; 11561 11562 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag); 11563 } 11564 } 11565 return rc; 11566 } 11567 11568 static void bnxt_del_napi(struct bnxt *bp) 11569 { 11570 int i; 11571 11572 if (!bp->bnapi) 11573 return; 11574 11575 for (i = 0; i < bp->rx_nr_rings; i++) 11576 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); 11577 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) 11578 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); 11579 11580 for (i = 0; i < bp->cp_nr_rings; i++) { 11581 struct bnxt_napi *bnapi = bp->bnapi[i]; 11582 11583 __netif_napi_del_locked(&bnapi->napi); 11584 } 11585 /* We called __netif_napi_del_locked(), we need 11586 * to respect an RCU grace period before freeing napi structures. 11587 */ 11588 synchronize_net(); 11589 } 11590 11591 static void bnxt_init_napi(struct bnxt *bp) 11592 { 11593 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 11594 unsigned int cp_nr_rings = bp->cp_nr_rings; 11595 struct bnxt_napi *bnapi; 11596 int i; 11597 11598 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11599 poll_fn = bnxt_poll_p5; 11600 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 11601 cp_nr_rings--; 11602 for (i = 0; i < cp_nr_rings; i++) { 11603 bnapi = bp->bnapi[i]; 11604 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, 11605 bnapi->index); 11606 } 11607 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11608 bnapi = bp->bnapi[cp_nr_rings]; 11609 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); 11610 } 11611 } 11612 11613 static void bnxt_disable_napi(struct bnxt *bp) 11614 { 11615 int i; 11616 11617 if (!bp->bnapi || 11618 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 11619 return; 11620 11621 for (i = 0; i < bp->cp_nr_rings; i++) { 11622 struct bnxt_napi *bnapi = bp->bnapi[i]; 11623 struct bnxt_cp_ring_info *cpr; 11624 11625 cpr = &bnapi->cp_ring; 11626 if (bnapi->tx_fault) 11627 cpr->sw_stats->tx.tx_resets++; 11628 if (bnapi->in_reset) 11629 cpr->sw_stats->rx.rx_resets++; 11630 napi_disable_locked(&bnapi->napi); 11631 } 11632 } 11633 11634 static void bnxt_enable_napi(struct bnxt *bp) 11635 { 11636 int i; 11637 11638 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 11639 for (i = 0; i < bp->cp_nr_rings; i++) { 11640 struct bnxt_napi *bnapi = bp->bnapi[i]; 11641 struct bnxt_cp_ring_info *cpr; 11642 11643 bnapi->tx_fault = 0; 11644 11645 cpr = &bnapi->cp_ring; 11646 bnapi->in_reset = false; 11647 11648 if (bnapi->rx_ring) { 11649 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 11650 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 11651 } 11652 napi_enable_locked(&bnapi->napi); 11653 } 11654 } 11655 11656 void bnxt_tx_disable(struct bnxt *bp) 11657 { 11658 int i; 11659 struct bnxt_tx_ring_info *txr; 11660 11661 if (bp->tx_ring) { 11662 for (i = 0; i < bp->tx_nr_rings; i++) { 11663 txr = &bp->tx_ring[i]; 11664 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 11665 } 11666 } 11667 /* Make sure napi polls see @dev_state change */ 11668 synchronize_net(); 11669 /* Drop carrier first to prevent TX timeout */ 11670 netif_carrier_off(bp->dev); 11671 /* Stop all TX queues */ 11672 netif_tx_disable(bp->dev); 11673 } 11674 11675 void bnxt_tx_enable(struct bnxt *bp) 11676 { 11677 int i; 11678 struct bnxt_tx_ring_info *txr; 11679 11680 for (i = 0; i < bp->tx_nr_rings; i++) { 11681 txr = &bp->tx_ring[i]; 11682 WRITE_ONCE(txr->dev_state, 0); 11683 } 11684 /* Make sure napi polls see @dev_state change */ 11685 synchronize_net(); 11686 netif_tx_wake_all_queues(bp->dev); 11687 if (BNXT_LINK_IS_UP(bp)) 11688 netif_carrier_on(bp->dev); 11689 } 11690 11691 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 11692 { 11693 u8 active_fec = link_info->active_fec_sig_mode & 11694 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 11695 11696 switch (active_fec) { 11697 default: 11698 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 11699 return "None"; 11700 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 11701 return "Clause 74 BaseR"; 11702 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 11703 return "Clause 91 RS(528,514)"; 11704 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 11705 return "Clause 91 RS544_1XN"; 11706 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 11707 return "Clause 91 RS(544,514)"; 11708 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 11709 return "Clause 91 RS272_1XN"; 11710 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 11711 return "Clause 91 RS(272,257)"; 11712 } 11713 } 11714 11715 void bnxt_report_link(struct bnxt *bp) 11716 { 11717 if (BNXT_LINK_IS_UP(bp)) { 11718 const char *signal = ""; 11719 const char *flow_ctrl; 11720 const char *duplex; 11721 u32 speed; 11722 u16 fec; 11723 11724 netif_carrier_on(bp->dev); 11725 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 11726 if (speed == SPEED_UNKNOWN) { 11727 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 11728 return; 11729 } 11730 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 11731 duplex = "full"; 11732 else 11733 duplex = "half"; 11734 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 11735 flow_ctrl = "ON - receive & transmit"; 11736 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 11737 flow_ctrl = "ON - transmit"; 11738 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 11739 flow_ctrl = "ON - receive"; 11740 else 11741 flow_ctrl = "none"; 11742 if (bp->link_info.phy_qcfg_resp.option_flags & 11743 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 11744 u8 sig_mode = bp->link_info.active_fec_sig_mode & 11745 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 11746 switch (sig_mode) { 11747 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 11748 signal = "(NRZ) "; 11749 break; 11750 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 11751 signal = "(PAM4 56Gbps) "; 11752 break; 11753 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: 11754 signal = "(PAM4 112Gbps) "; 11755 break; 11756 default: 11757 break; 11758 } 11759 } 11760 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 11761 speed, signal, duplex, flow_ctrl); 11762 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 11763 netdev_info(bp->dev, "EEE is %s\n", 11764 bp->eee.eee_active ? "active" : 11765 "not active"); 11766 fec = bp->link_info.fec_cfg; 11767 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 11768 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 11769 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 11770 bnxt_report_fec(&bp->link_info)); 11771 } else { 11772 netif_carrier_off(bp->dev); 11773 netdev_err(bp->dev, "NIC Link is Down\n"); 11774 } 11775 } 11776 11777 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 11778 { 11779 if (!resp->supported_speeds_auto_mode && 11780 !resp->supported_speeds_force_mode && 11781 !resp->supported_pam4_speeds_auto_mode && 11782 !resp->supported_pam4_speeds_force_mode && 11783 !resp->supported_speeds2_auto_mode && 11784 !resp->supported_speeds2_force_mode) 11785 return true; 11786 return false; 11787 } 11788 11789 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 11790 { 11791 struct bnxt_link_info *link_info = &bp->link_info; 11792 struct hwrm_port_phy_qcaps_output *resp; 11793 struct hwrm_port_phy_qcaps_input *req; 11794 int rc = 0; 11795 11796 if (bp->hwrm_spec_code < 0x10201) 11797 return 0; 11798 11799 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 11800 if (rc) 11801 return rc; 11802 11803 resp = hwrm_req_hold(bp, req); 11804 rc = hwrm_req_send(bp, req); 11805 if (rc) 11806 goto hwrm_phy_qcaps_exit; 11807 11808 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 11809 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 11810 struct ethtool_keee *eee = &bp->eee; 11811 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 11812 11813 _bnxt_fw_to_linkmode(eee->supported, fw_speeds); 11814 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 11815 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 11816 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 11817 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 11818 } 11819 11820 if (bp->hwrm_spec_code >= 0x10a01) { 11821 if (bnxt_phy_qcaps_no_speed(resp)) { 11822 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 11823 netdev_warn(bp->dev, "Ethernet link disabled\n"); 11824 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 11825 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 11826 netdev_info(bp->dev, "Ethernet link enabled\n"); 11827 /* Phy re-enabled, reprobe the speeds */ 11828 link_info->support_auto_speeds = 0; 11829 link_info->support_pam4_auto_speeds = 0; 11830 link_info->support_auto_speeds2 = 0; 11831 } 11832 } 11833 if (resp->supported_speeds_auto_mode) 11834 link_info->support_auto_speeds = 11835 le16_to_cpu(resp->supported_speeds_auto_mode); 11836 if (resp->supported_pam4_speeds_auto_mode) 11837 link_info->support_pam4_auto_speeds = 11838 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 11839 if (resp->supported_speeds2_auto_mode) 11840 link_info->support_auto_speeds2 = 11841 le16_to_cpu(resp->supported_speeds2_auto_mode); 11842 11843 bp->port_count = resp->port_cnt; 11844 11845 hwrm_phy_qcaps_exit: 11846 hwrm_req_drop(bp, req); 11847 return rc; 11848 } 11849 11850 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp) 11851 { 11852 struct hwrm_port_mac_qcaps_output *resp; 11853 struct hwrm_port_mac_qcaps_input *req; 11854 int rc; 11855 11856 if (bp->hwrm_spec_code < 0x10a03) 11857 return; 11858 11859 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS); 11860 if (rc) 11861 return; 11862 11863 resp = hwrm_req_hold(bp, req); 11864 rc = hwrm_req_send_silent(bp, req); 11865 if (!rc) 11866 bp->mac_flags = resp->flags; 11867 hwrm_req_drop(bp, req); 11868 } 11869 11870 static bool bnxt_support_dropped(u16 advertising, u16 supported) 11871 { 11872 u16 diff = advertising ^ supported; 11873 11874 return ((supported | diff) != supported); 11875 } 11876 11877 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 11878 { 11879 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 11880 11881 /* Check if any advertised speeds are no longer supported. The caller 11882 * holds the link_lock mutex, so we can modify link_info settings. 11883 */ 11884 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11885 if (bnxt_support_dropped(link_info->advertising, 11886 link_info->support_auto_speeds2)) { 11887 link_info->advertising = link_info->support_auto_speeds2; 11888 return true; 11889 } 11890 return false; 11891 } 11892 if (bnxt_support_dropped(link_info->advertising, 11893 link_info->support_auto_speeds)) { 11894 link_info->advertising = link_info->support_auto_speeds; 11895 return true; 11896 } 11897 if (bnxt_support_dropped(link_info->advertising_pam4, 11898 link_info->support_pam4_auto_speeds)) { 11899 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 11900 return true; 11901 } 11902 return false; 11903 } 11904 11905 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 11906 { 11907 struct bnxt_link_info *link_info = &bp->link_info; 11908 struct hwrm_port_phy_qcfg_output *resp; 11909 struct hwrm_port_phy_qcfg_input *req; 11910 u8 link_state = link_info->link_state; 11911 bool support_changed; 11912 int rc; 11913 11914 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 11915 if (rc) 11916 return rc; 11917 11918 resp = hwrm_req_hold(bp, req); 11919 rc = hwrm_req_send(bp, req); 11920 if (rc) { 11921 hwrm_req_drop(bp, req); 11922 if (BNXT_VF(bp) && rc == -ENODEV) { 11923 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 11924 rc = 0; 11925 } 11926 return rc; 11927 } 11928 11929 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 11930 link_info->phy_link_status = resp->link; 11931 link_info->duplex = resp->duplex_cfg; 11932 if (bp->hwrm_spec_code >= 0x10800) 11933 link_info->duplex = resp->duplex_state; 11934 link_info->pause = resp->pause; 11935 link_info->auto_mode = resp->auto_mode; 11936 link_info->auto_pause_setting = resp->auto_pause; 11937 link_info->lp_pause = resp->link_partner_adv_pause; 11938 link_info->force_pause_setting = resp->force_pause; 11939 link_info->duplex_setting = resp->duplex_cfg; 11940 if (link_info->phy_link_status == BNXT_LINK_LINK) { 11941 link_info->link_speed = le16_to_cpu(resp->link_speed); 11942 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 11943 link_info->active_lanes = resp->active_lanes; 11944 } else { 11945 link_info->link_speed = 0; 11946 link_info->active_lanes = 0; 11947 } 11948 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 11949 link_info->force_pam4_link_speed = 11950 le16_to_cpu(resp->force_pam4_link_speed); 11951 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); 11952 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 11953 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 11954 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); 11955 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 11956 link_info->auto_pam4_link_speeds = 11957 le16_to_cpu(resp->auto_pam4_link_speed_mask); 11958 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); 11959 link_info->lp_auto_link_speeds = 11960 le16_to_cpu(resp->link_partner_adv_speeds); 11961 link_info->lp_auto_pam4_link_speeds = 11962 resp->link_partner_pam4_adv_speeds; 11963 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 11964 link_info->phy_ver[0] = resp->phy_maj; 11965 link_info->phy_ver[1] = resp->phy_min; 11966 link_info->phy_ver[2] = resp->phy_bld; 11967 link_info->media_type = resp->media_type; 11968 link_info->phy_type = resp->phy_type; 11969 link_info->transceiver = resp->xcvr_pkg_type; 11970 link_info->phy_addr = resp->eee_config_phy_addr & 11971 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 11972 link_info->module_status = resp->module_status; 11973 11974 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 11975 struct ethtool_keee *eee = &bp->eee; 11976 u16 fw_speeds; 11977 11978 eee->eee_active = 0; 11979 if (resp->eee_config_phy_addr & 11980 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 11981 eee->eee_active = 1; 11982 fw_speeds = le16_to_cpu( 11983 resp->link_partner_adv_eee_link_speed_mask); 11984 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds); 11985 } 11986 11987 /* Pull initial EEE config */ 11988 if (!chng_link_state) { 11989 if (resp->eee_config_phy_addr & 11990 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 11991 eee->eee_enabled = 1; 11992 11993 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 11994 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds); 11995 11996 if (resp->eee_config_phy_addr & 11997 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 11998 __le32 tmr; 11999 12000 eee->tx_lpi_enabled = 1; 12001 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 12002 eee->tx_lpi_timer = le32_to_cpu(tmr) & 12003 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 12004 } 12005 } 12006 } 12007 12008 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 12009 if (bp->hwrm_spec_code >= 0x10504) { 12010 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 12011 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 12012 } 12013 /* TODO: need to add more logic to report VF link */ 12014 if (chng_link_state) { 12015 if (link_info->phy_link_status == BNXT_LINK_LINK) 12016 link_info->link_state = BNXT_LINK_STATE_UP; 12017 else 12018 link_info->link_state = BNXT_LINK_STATE_DOWN; 12019 if (link_state != link_info->link_state) 12020 bnxt_report_link(bp); 12021 } else { 12022 /* always link down if not require to update link state */ 12023 link_info->link_state = BNXT_LINK_STATE_DOWN; 12024 } 12025 hwrm_req_drop(bp, req); 12026 12027 if (!BNXT_PHY_CFG_ABLE(bp)) 12028 return 0; 12029 12030 support_changed = bnxt_support_speed_dropped(link_info); 12031 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 12032 bnxt_hwrm_set_link_setting(bp, true, false); 12033 return 0; 12034 } 12035 12036 static void bnxt_get_port_module_status(struct bnxt *bp) 12037 { 12038 struct bnxt_link_info *link_info = &bp->link_info; 12039 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 12040 u8 module_status; 12041 12042 if (bnxt_update_link(bp, true)) 12043 return; 12044 12045 module_status = link_info->module_status; 12046 switch (module_status) { 12047 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 12048 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 12049 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 12050 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 12051 bp->pf.port_id); 12052 if (bp->hwrm_spec_code >= 0x10201) { 12053 netdev_warn(bp->dev, "Module part number %s\n", 12054 resp->phy_vendor_partnumber); 12055 } 12056 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 12057 netdev_warn(bp->dev, "TX is disabled\n"); 12058 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 12059 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 12060 } 12061 } 12062 12063 static void 12064 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 12065 { 12066 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 12067 if (bp->hwrm_spec_code >= 0x10201) 12068 req->auto_pause = 12069 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 12070 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 12071 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 12072 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 12073 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 12074 req->enables |= 12075 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 12076 } else { 12077 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 12078 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 12079 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 12080 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 12081 req->enables |= 12082 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 12083 if (bp->hwrm_spec_code >= 0x10201) { 12084 req->auto_pause = req->force_pause; 12085 req->enables |= cpu_to_le32( 12086 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 12087 } 12088 } 12089 } 12090 12091 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 12092 { 12093 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 12094 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 12095 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 12096 req->enables |= 12097 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); 12098 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); 12099 } else if (bp->link_info.advertising) { 12100 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 12101 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 12102 } 12103 if (bp->link_info.advertising_pam4) { 12104 req->enables |= 12105 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 12106 req->auto_link_pam4_speed_mask = 12107 cpu_to_le16(bp->link_info.advertising_pam4); 12108 } 12109 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 12110 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 12111 } else { 12112 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 12113 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 12114 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); 12115 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); 12116 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", 12117 (u32)bp->link_info.req_link_speed); 12118 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 12119 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 12120 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 12121 } else { 12122 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 12123 } 12124 } 12125 12126 /* tell chimp that the setting takes effect immediately */ 12127 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 12128 } 12129 12130 int bnxt_hwrm_set_pause(struct bnxt *bp) 12131 { 12132 struct hwrm_port_phy_cfg_input *req; 12133 int rc; 12134 12135 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12136 if (rc) 12137 return rc; 12138 12139 bnxt_hwrm_set_pause_common(bp, req); 12140 12141 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 12142 bp->link_info.force_link_chng) 12143 bnxt_hwrm_set_link_common(bp, req); 12144 12145 rc = hwrm_req_send(bp, req); 12146 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 12147 /* since changing of pause setting doesn't trigger any link 12148 * change event, the driver needs to update the current pause 12149 * result upon successfully return of the phy_cfg command 12150 */ 12151 bp->link_info.pause = 12152 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 12153 bp->link_info.auto_pause_setting = 0; 12154 if (!bp->link_info.force_link_chng) 12155 bnxt_report_link(bp); 12156 } 12157 bp->link_info.force_link_chng = false; 12158 return rc; 12159 } 12160 12161 static void bnxt_hwrm_set_eee(struct bnxt *bp, 12162 struct hwrm_port_phy_cfg_input *req) 12163 { 12164 struct ethtool_keee *eee = &bp->eee; 12165 12166 if (eee->eee_enabled) { 12167 u16 eee_speeds; 12168 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 12169 12170 if (eee->tx_lpi_enabled) 12171 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 12172 else 12173 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 12174 12175 req->flags |= cpu_to_le32(flags); 12176 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 12177 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 12178 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 12179 } else { 12180 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 12181 } 12182 } 12183 12184 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 12185 { 12186 struct hwrm_port_phy_cfg_input *req; 12187 int rc; 12188 12189 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12190 if (rc) 12191 return rc; 12192 12193 if (set_pause) 12194 bnxt_hwrm_set_pause_common(bp, req); 12195 12196 bnxt_hwrm_set_link_common(bp, req); 12197 12198 if (set_eee) 12199 bnxt_hwrm_set_eee(bp, req); 12200 return hwrm_req_send(bp, req); 12201 } 12202 12203 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 12204 { 12205 struct hwrm_port_phy_cfg_input *req; 12206 int rc; 12207 12208 if (!BNXT_SINGLE_PF(bp)) 12209 return 0; 12210 12211 if (pci_num_vf(bp->pdev) && 12212 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 12213 return 0; 12214 12215 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12216 if (rc) 12217 return rc; 12218 12219 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 12220 rc = hwrm_req_send(bp, req); 12221 if (!rc) { 12222 mutex_lock(&bp->link_lock); 12223 /* Device is not obliged link down in certain scenarios, even 12224 * when forced. Setting the state unknown is consistent with 12225 * driver startup and will force link state to be reported 12226 * during subsequent open based on PORT_PHY_QCFG. 12227 */ 12228 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 12229 mutex_unlock(&bp->link_lock); 12230 } 12231 return rc; 12232 } 12233 12234 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 12235 { 12236 #ifdef CONFIG_TEE_BNXT_FW 12237 int rc = tee_bnxt_fw_load(); 12238 12239 if (rc) 12240 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 12241 12242 return rc; 12243 #else 12244 netdev_err(bp->dev, "OP-TEE not supported\n"); 12245 return -ENODEV; 12246 #endif 12247 } 12248 12249 static int bnxt_try_recover_fw(struct bnxt *bp) 12250 { 12251 if (bp->fw_health && bp->fw_health->status_reliable) { 12252 int retry = 0, rc; 12253 u32 sts; 12254 12255 do { 12256 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 12257 rc = bnxt_hwrm_poll(bp); 12258 if (!BNXT_FW_IS_BOOTING(sts) && 12259 !BNXT_FW_IS_RECOVERING(sts)) 12260 break; 12261 retry++; 12262 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 12263 12264 if (!BNXT_FW_IS_HEALTHY(sts)) { 12265 netdev_err(bp->dev, 12266 "Firmware not responding, status: 0x%x\n", 12267 sts); 12268 rc = -ENODEV; 12269 } 12270 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 12271 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 12272 return bnxt_fw_reset_via_optee(bp); 12273 } 12274 return rc; 12275 } 12276 12277 return -ENODEV; 12278 } 12279 12280 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 12281 { 12282 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12283 12284 if (!BNXT_NEW_RM(bp)) 12285 return; /* no resource reservations required */ 12286 12287 hw_resc->resv_cp_rings = 0; 12288 hw_resc->resv_stat_ctxs = 0; 12289 hw_resc->resv_irqs = 0; 12290 hw_resc->resv_tx_rings = 0; 12291 hw_resc->resv_rx_rings = 0; 12292 hw_resc->resv_hw_ring_grps = 0; 12293 hw_resc->resv_vnics = 0; 12294 hw_resc->resv_rsscos_ctxs = 0; 12295 if (!fw_reset) { 12296 bp->tx_nr_rings = 0; 12297 bp->rx_nr_rings = 0; 12298 } 12299 } 12300 12301 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 12302 { 12303 int rc; 12304 12305 if (!BNXT_NEW_RM(bp)) 12306 return 0; /* no resource reservations required */ 12307 12308 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 12309 if (rc) 12310 netdev_err(bp->dev, "resc_qcaps failed\n"); 12311 12312 bnxt_clear_reservations(bp, fw_reset); 12313 12314 return rc; 12315 } 12316 12317 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 12318 { 12319 struct hwrm_func_drv_if_change_output *resp; 12320 struct hwrm_func_drv_if_change_input *req; 12321 bool fw_reset = !bp->irq_tbl; 12322 bool resc_reinit = false; 12323 bool caps_change = false; 12324 int rc, retry = 0; 12325 u32 flags = 0; 12326 12327 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 12328 return 0; 12329 12330 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 12331 if (rc) 12332 return rc; 12333 12334 if (up) 12335 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 12336 resp = hwrm_req_hold(bp, req); 12337 12338 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 12339 while (retry < BNXT_FW_IF_RETRY) { 12340 rc = hwrm_req_send(bp, req); 12341 if (rc != -EAGAIN) 12342 break; 12343 12344 msleep(50); 12345 retry++; 12346 } 12347 12348 if (rc == -EAGAIN) { 12349 hwrm_req_drop(bp, req); 12350 return rc; 12351 } else if (!rc) { 12352 flags = le32_to_cpu(resp->flags); 12353 } else if (up) { 12354 rc = bnxt_try_recover_fw(bp); 12355 fw_reset = true; 12356 } 12357 hwrm_req_drop(bp, req); 12358 if (rc) 12359 return rc; 12360 12361 if (!up) { 12362 bnxt_inv_fw_health_reg(bp); 12363 return 0; 12364 } 12365 12366 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 12367 resc_reinit = true; 12368 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 12369 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 12370 fw_reset = true; 12371 else 12372 bnxt_remap_fw_health_regs(bp); 12373 12374 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 12375 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 12376 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12377 return -ENODEV; 12378 } 12379 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE) 12380 caps_change = true; 12381 12382 if (resc_reinit || fw_reset || caps_change) { 12383 if (fw_reset || caps_change) { 12384 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12385 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12386 bnxt_ulp_irq_stop(bp); 12387 bnxt_free_ctx_mem(bp, false); 12388 bnxt_dcb_free(bp); 12389 rc = bnxt_fw_init_one(bp); 12390 if (rc) { 12391 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12392 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12393 return rc; 12394 } 12395 bnxt_clear_int_mode(bp); 12396 rc = bnxt_init_int_mode(bp); 12397 if (rc) { 12398 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12399 netdev_err(bp->dev, "init int mode failed\n"); 12400 return rc; 12401 } 12402 } 12403 rc = bnxt_cancel_reservations(bp, fw_reset); 12404 } 12405 return rc; 12406 } 12407 12408 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 12409 { 12410 struct hwrm_port_led_qcaps_output *resp; 12411 struct hwrm_port_led_qcaps_input *req; 12412 struct bnxt_pf_info *pf = &bp->pf; 12413 int rc; 12414 12415 bp->num_leds = 0; 12416 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 12417 return 0; 12418 12419 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 12420 if (rc) 12421 return rc; 12422 12423 req->port_id = cpu_to_le16(pf->port_id); 12424 resp = hwrm_req_hold(bp, req); 12425 rc = hwrm_req_send(bp, req); 12426 if (rc) { 12427 hwrm_req_drop(bp, req); 12428 return rc; 12429 } 12430 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 12431 int i; 12432 12433 bp->num_leds = resp->num_leds; 12434 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 12435 bp->num_leds); 12436 for (i = 0; i < bp->num_leds; i++) { 12437 struct bnxt_led_info *led = &bp->leds[i]; 12438 __le16 caps = led->led_state_caps; 12439 12440 if (!led->led_group_id || 12441 !BNXT_LED_ALT_BLINK_CAP(caps)) { 12442 bp->num_leds = 0; 12443 break; 12444 } 12445 } 12446 } 12447 hwrm_req_drop(bp, req); 12448 return 0; 12449 } 12450 12451 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 12452 { 12453 struct hwrm_wol_filter_alloc_output *resp; 12454 struct hwrm_wol_filter_alloc_input *req; 12455 int rc; 12456 12457 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 12458 if (rc) 12459 return rc; 12460 12461 req->port_id = cpu_to_le16(bp->pf.port_id); 12462 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 12463 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 12464 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 12465 12466 resp = hwrm_req_hold(bp, req); 12467 rc = hwrm_req_send(bp, req); 12468 if (!rc) 12469 bp->wol_filter_id = resp->wol_filter_id; 12470 hwrm_req_drop(bp, req); 12471 return rc; 12472 } 12473 12474 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 12475 { 12476 struct hwrm_wol_filter_free_input *req; 12477 int rc; 12478 12479 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 12480 if (rc) 12481 return rc; 12482 12483 req->port_id = cpu_to_le16(bp->pf.port_id); 12484 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 12485 req->wol_filter_id = bp->wol_filter_id; 12486 12487 return hwrm_req_send(bp, req); 12488 } 12489 12490 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 12491 { 12492 struct hwrm_wol_filter_qcfg_output *resp; 12493 struct hwrm_wol_filter_qcfg_input *req; 12494 u16 next_handle = 0; 12495 int rc; 12496 12497 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 12498 if (rc) 12499 return rc; 12500 12501 req->port_id = cpu_to_le16(bp->pf.port_id); 12502 req->handle = cpu_to_le16(handle); 12503 resp = hwrm_req_hold(bp, req); 12504 rc = hwrm_req_send(bp, req); 12505 if (!rc) { 12506 next_handle = le16_to_cpu(resp->next_handle); 12507 if (next_handle != 0) { 12508 if (resp->wol_type == 12509 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 12510 bp->wol = 1; 12511 bp->wol_filter_id = resp->wol_filter_id; 12512 } 12513 } 12514 } 12515 hwrm_req_drop(bp, req); 12516 return next_handle; 12517 } 12518 12519 static void bnxt_get_wol_settings(struct bnxt *bp) 12520 { 12521 u16 handle = 0; 12522 12523 bp->wol = 0; 12524 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 12525 return; 12526 12527 do { 12528 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 12529 } while (handle && handle != 0xffff); 12530 } 12531 12532 static bool bnxt_eee_config_ok(struct bnxt *bp) 12533 { 12534 struct ethtool_keee *eee = &bp->eee; 12535 struct bnxt_link_info *link_info = &bp->link_info; 12536 12537 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 12538 return true; 12539 12540 if (eee->eee_enabled) { 12541 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 12542 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 12543 12544 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 12545 12546 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 12547 eee->eee_enabled = 0; 12548 return false; 12549 } 12550 if (linkmode_andnot(tmp, eee->advertised, advertising)) { 12551 linkmode_and(eee->advertised, advertising, 12552 eee->supported); 12553 return false; 12554 } 12555 } 12556 return true; 12557 } 12558 12559 static int bnxt_update_phy_setting(struct bnxt *bp) 12560 { 12561 int rc; 12562 bool update_link = false; 12563 bool update_pause = false; 12564 bool update_eee = false; 12565 struct bnxt_link_info *link_info = &bp->link_info; 12566 12567 rc = bnxt_update_link(bp, true); 12568 if (rc) { 12569 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 12570 rc); 12571 return rc; 12572 } 12573 if (!BNXT_SINGLE_PF(bp)) 12574 return 0; 12575 12576 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 12577 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 12578 link_info->req_flow_ctrl) 12579 update_pause = true; 12580 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 12581 link_info->force_pause_setting != link_info->req_flow_ctrl) 12582 update_pause = true; 12583 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 12584 if (BNXT_AUTO_MODE(link_info->auto_mode)) 12585 update_link = true; 12586 if (bnxt_force_speed_updated(link_info)) 12587 update_link = true; 12588 if (link_info->req_duplex != link_info->duplex_setting) 12589 update_link = true; 12590 } else { 12591 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 12592 update_link = true; 12593 if (bnxt_auto_speed_updated(link_info)) 12594 update_link = true; 12595 } 12596 12597 /* The last close may have shutdown the link, so need to call 12598 * PHY_CFG to bring it back up. 12599 */ 12600 if (!BNXT_LINK_IS_UP(bp)) 12601 update_link = true; 12602 12603 if (!bnxt_eee_config_ok(bp)) 12604 update_eee = true; 12605 12606 if (update_link) 12607 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 12608 else if (update_pause) 12609 rc = bnxt_hwrm_set_pause(bp); 12610 if (rc) { 12611 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 12612 rc); 12613 return rc; 12614 } 12615 12616 return rc; 12617 } 12618 12619 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 12620 12621 static int bnxt_reinit_after_abort(struct bnxt *bp) 12622 { 12623 int rc; 12624 12625 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12626 return -EBUSY; 12627 12628 if (bp->dev->reg_state == NETREG_UNREGISTERED) 12629 return -ENODEV; 12630 12631 rc = bnxt_fw_init_one(bp); 12632 if (!rc) { 12633 bnxt_clear_int_mode(bp); 12634 rc = bnxt_init_int_mode(bp); 12635 if (!rc) { 12636 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12637 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12638 } 12639 } 12640 return rc; 12641 } 12642 12643 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 12644 { 12645 struct bnxt_ntuple_filter *ntp_fltr; 12646 struct bnxt_l2_filter *l2_fltr; 12647 12648 if (list_empty(&fltr->list)) 12649 return; 12650 12651 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { 12652 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); 12653 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 12654 atomic_inc(&l2_fltr->refcnt); 12655 ntp_fltr->l2_fltr = l2_fltr; 12656 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { 12657 bnxt_del_ntp_filter(bp, ntp_fltr); 12658 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", 12659 fltr->sw_id); 12660 } 12661 } else if (fltr->type == BNXT_FLTR_TYPE_L2) { 12662 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); 12663 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { 12664 bnxt_del_l2_filter(bp, l2_fltr); 12665 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", 12666 fltr->sw_id); 12667 } 12668 } 12669 } 12670 12671 static void bnxt_cfg_usr_fltrs(struct bnxt *bp) 12672 { 12673 struct bnxt_filter_base *usr_fltr, *tmp; 12674 12675 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) 12676 bnxt_cfg_one_usr_fltr(bp, usr_fltr); 12677 } 12678 12679 static int bnxt_set_xps_mapping(struct bnxt *bp) 12680 { 12681 int numa_node = dev_to_node(&bp->pdev->dev); 12682 unsigned int q_idx, map_idx, cpu, i; 12683 const struct cpumask *cpu_mask_ptr; 12684 int nr_cpus = num_online_cpus(); 12685 cpumask_t *q_map; 12686 int rc = 0; 12687 12688 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); 12689 if (!q_map) 12690 return -ENOMEM; 12691 12692 /* Create CPU mask for all TX queues across MQPRIO traffic classes. 12693 * Each TC has the same number of TX queues. The nth TX queue for each 12694 * TC will have the same CPU mask. 12695 */ 12696 for (i = 0; i < nr_cpus; i++) { 12697 map_idx = i % bp->tx_nr_rings_per_tc; 12698 cpu = cpumask_local_spread(i, numa_node); 12699 cpu_mask_ptr = get_cpu_mask(cpu); 12700 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); 12701 } 12702 12703 /* Register CPU mask for each TX queue except the ones marked for XDP */ 12704 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { 12705 map_idx = q_idx % bp->tx_nr_rings_per_tc; 12706 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); 12707 if (rc) { 12708 netdev_warn(bp->dev, "Error setting XPS for q:%d\n", 12709 q_idx); 12710 break; 12711 } 12712 } 12713 12714 kfree(q_map); 12715 12716 return rc; 12717 } 12718 12719 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12720 { 12721 int rc = 0; 12722 12723 netif_carrier_off(bp->dev); 12724 if (irq_re_init) { 12725 /* Reserve rings now if none were reserved at driver probe. */ 12726 rc = bnxt_init_dflt_ring_mode(bp); 12727 if (rc) { 12728 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 12729 return rc; 12730 } 12731 } 12732 rc = bnxt_reserve_rings(bp, irq_re_init); 12733 if (rc) 12734 return rc; 12735 12736 rc = bnxt_alloc_mem(bp, irq_re_init); 12737 if (rc) { 12738 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12739 goto open_err_free_mem; 12740 } 12741 12742 if (irq_re_init) { 12743 bnxt_init_napi(bp); 12744 rc = bnxt_request_irq(bp); 12745 if (rc) { 12746 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 12747 goto open_err_irq; 12748 } 12749 } 12750 12751 rc = bnxt_init_nic(bp, irq_re_init); 12752 if (rc) { 12753 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12754 goto open_err_irq; 12755 } 12756 12757 bnxt_enable_napi(bp); 12758 bnxt_debug_dev_init(bp); 12759 12760 if (link_re_init) { 12761 mutex_lock(&bp->link_lock); 12762 rc = bnxt_update_phy_setting(bp); 12763 mutex_unlock(&bp->link_lock); 12764 if (rc) { 12765 netdev_warn(bp->dev, "failed to update phy settings\n"); 12766 if (BNXT_SINGLE_PF(bp)) { 12767 bp->link_info.phy_retry = true; 12768 bp->link_info.phy_retry_expires = 12769 jiffies + 5 * HZ; 12770 } 12771 } 12772 } 12773 12774 if (irq_re_init) { 12775 udp_tunnel_nic_reset_ntf(bp->dev); 12776 rc = bnxt_set_xps_mapping(bp); 12777 if (rc) 12778 netdev_warn(bp->dev, "failed to set xps mapping\n"); 12779 } 12780 12781 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 12782 if (!static_key_enabled(&bnxt_xdp_locking_key)) 12783 static_branch_enable(&bnxt_xdp_locking_key); 12784 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 12785 static_branch_disable(&bnxt_xdp_locking_key); 12786 } 12787 set_bit(BNXT_STATE_OPEN, &bp->state); 12788 bnxt_enable_int(bp); 12789 /* Enable TX queues */ 12790 bnxt_tx_enable(bp); 12791 mod_timer(&bp->timer, jiffies + bp->current_interval); 12792 /* Poll link status and check for SFP+ module status */ 12793 mutex_lock(&bp->link_lock); 12794 bnxt_get_port_module_status(bp); 12795 mutex_unlock(&bp->link_lock); 12796 12797 /* VF-reps may need to be re-opened after the PF is re-opened */ 12798 if (BNXT_PF(bp)) 12799 bnxt_vf_reps_open(bp); 12800 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 12801 WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); 12802 bnxt_ptp_init_rtc(bp, true); 12803 bnxt_ptp_cfg_tstamp_filters(bp); 12804 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12805 bnxt_hwrm_realloc_rss_ctx_vnic(bp); 12806 bnxt_cfg_usr_fltrs(bp); 12807 return 0; 12808 12809 open_err_irq: 12810 bnxt_del_napi(bp); 12811 12812 open_err_free_mem: 12813 bnxt_free_skbs(bp); 12814 bnxt_free_irq(bp); 12815 bnxt_free_mem(bp, true); 12816 return rc; 12817 } 12818 12819 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12820 { 12821 int rc = 0; 12822 12823 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 12824 rc = -EIO; 12825 if (!rc) 12826 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 12827 if (rc) { 12828 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 12829 netif_close(bp->dev); 12830 } 12831 return rc; 12832 } 12833 12834 /* netdev instance lock held, open the NIC half way by allocating all 12835 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used 12836 * for offline self tests. 12837 */ 12838 int bnxt_half_open_nic(struct bnxt *bp) 12839 { 12840 int rc = 0; 12841 12842 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12843 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 12844 rc = -ENODEV; 12845 goto half_open_err; 12846 } 12847 12848 rc = bnxt_alloc_mem(bp, true); 12849 if (rc) { 12850 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12851 goto half_open_err; 12852 } 12853 bnxt_init_napi(bp); 12854 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12855 rc = bnxt_init_nic(bp, true); 12856 if (rc) { 12857 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12858 bnxt_del_napi(bp); 12859 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12860 goto half_open_err; 12861 } 12862 return 0; 12863 12864 half_open_err: 12865 bnxt_free_skbs(bp); 12866 bnxt_free_mem(bp, true); 12867 netif_close(bp->dev); 12868 return rc; 12869 } 12870 12871 /* netdev instance lock held, this call can only be made after a previous 12872 * successful call to bnxt_half_open_nic(). 12873 */ 12874 void bnxt_half_close_nic(struct bnxt *bp) 12875 { 12876 bnxt_hwrm_resource_free(bp, false, true); 12877 bnxt_del_napi(bp); 12878 bnxt_free_skbs(bp); 12879 bnxt_free_mem(bp, true); 12880 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12881 } 12882 12883 void bnxt_reenable_sriov(struct bnxt *bp) 12884 { 12885 if (BNXT_PF(bp)) { 12886 struct bnxt_pf_info *pf = &bp->pf; 12887 int n = pf->active_vfs; 12888 12889 if (n) 12890 bnxt_cfg_hw_sriov(bp, &n, true); 12891 } 12892 } 12893 12894 static int bnxt_open(struct net_device *dev) 12895 { 12896 struct bnxt *bp = netdev_priv(dev); 12897 int rc; 12898 12899 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12900 rc = bnxt_reinit_after_abort(bp); 12901 if (rc) { 12902 if (rc == -EBUSY) 12903 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 12904 else 12905 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 12906 return -ENODEV; 12907 } 12908 } 12909 12910 rc = bnxt_hwrm_if_change(bp, true); 12911 if (rc) 12912 return rc; 12913 12914 rc = __bnxt_open_nic(bp, true, true); 12915 if (rc) { 12916 bnxt_hwrm_if_change(bp, false); 12917 } else { 12918 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 12919 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12920 bnxt_queue_sp_work(bp, 12921 BNXT_RESTART_ULP_SP_EVENT); 12922 } 12923 } 12924 12925 return rc; 12926 } 12927 12928 static bool bnxt_drv_busy(struct bnxt *bp) 12929 { 12930 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 12931 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 12932 } 12933 12934 static void bnxt_get_ring_stats(struct bnxt *bp, 12935 struct rtnl_link_stats64 *stats); 12936 12937 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 12938 bool link_re_init) 12939 { 12940 /* Close the VF-reps before closing PF */ 12941 if (BNXT_PF(bp)) 12942 bnxt_vf_reps_close(bp); 12943 12944 /* Change device state to avoid TX queue wake up's */ 12945 bnxt_tx_disable(bp); 12946 12947 clear_bit(BNXT_STATE_OPEN, &bp->state); 12948 smp_mb__after_atomic(); 12949 while (bnxt_drv_busy(bp)) 12950 msleep(20); 12951 12952 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12953 bnxt_clear_rss_ctxs(bp); 12954 /* Flush rings and disable interrupts */ 12955 bnxt_shutdown_nic(bp, irq_re_init); 12956 12957 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 12958 12959 bnxt_debug_dev_exit(bp); 12960 bnxt_disable_napi(bp); 12961 timer_delete_sync(&bp->timer); 12962 bnxt_free_skbs(bp); 12963 12964 /* Save ring stats before shutdown */ 12965 if (bp->bnapi && irq_re_init) { 12966 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 12967 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 12968 } 12969 if (irq_re_init) { 12970 bnxt_free_irq(bp); 12971 bnxt_del_napi(bp); 12972 } 12973 bnxt_free_mem(bp, irq_re_init); 12974 } 12975 12976 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12977 { 12978 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12979 /* If we get here, it means firmware reset is in progress 12980 * while we are trying to close. We can safely proceed with 12981 * the close because we are holding netdev instance lock. 12982 * Some firmware messages may fail as we proceed to close. 12983 * We set the ABORT_ERR flag here so that the FW reset thread 12984 * will later abort when it gets the netdev instance lock 12985 * and sees the flag. 12986 */ 12987 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 12988 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12989 } 12990 12991 #ifdef CONFIG_BNXT_SRIOV 12992 if (bp->sriov_cfg) { 12993 int rc; 12994 12995 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 12996 !bp->sriov_cfg, 12997 BNXT_SRIOV_CFG_WAIT_TMO); 12998 if (!rc) 12999 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); 13000 else if (rc < 0) 13001 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); 13002 } 13003 #endif 13004 __bnxt_close_nic(bp, irq_re_init, link_re_init); 13005 } 13006 13007 static int bnxt_close(struct net_device *dev) 13008 { 13009 struct bnxt *bp = netdev_priv(dev); 13010 13011 bnxt_close_nic(bp, true, true); 13012 bnxt_hwrm_shutdown_link(bp); 13013 bnxt_hwrm_if_change(bp, false); 13014 return 0; 13015 } 13016 13017 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 13018 u16 *val) 13019 { 13020 struct hwrm_port_phy_mdio_read_output *resp; 13021 struct hwrm_port_phy_mdio_read_input *req; 13022 int rc; 13023 13024 if (bp->hwrm_spec_code < 0x10a00) 13025 return -EOPNOTSUPP; 13026 13027 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 13028 if (rc) 13029 return rc; 13030 13031 req->port_id = cpu_to_le16(bp->pf.port_id); 13032 req->phy_addr = phy_addr; 13033 req->reg_addr = cpu_to_le16(reg & 0x1f); 13034 if (mdio_phy_id_is_c45(phy_addr)) { 13035 req->cl45_mdio = 1; 13036 req->phy_addr = mdio_phy_id_prtad(phy_addr); 13037 req->dev_addr = mdio_phy_id_devad(phy_addr); 13038 req->reg_addr = cpu_to_le16(reg); 13039 } 13040 13041 resp = hwrm_req_hold(bp, req); 13042 rc = hwrm_req_send(bp, req); 13043 if (!rc) 13044 *val = le16_to_cpu(resp->reg_data); 13045 hwrm_req_drop(bp, req); 13046 return rc; 13047 } 13048 13049 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 13050 u16 val) 13051 { 13052 struct hwrm_port_phy_mdio_write_input *req; 13053 int rc; 13054 13055 if (bp->hwrm_spec_code < 0x10a00) 13056 return -EOPNOTSUPP; 13057 13058 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 13059 if (rc) 13060 return rc; 13061 13062 req->port_id = cpu_to_le16(bp->pf.port_id); 13063 req->phy_addr = phy_addr; 13064 req->reg_addr = cpu_to_le16(reg & 0x1f); 13065 if (mdio_phy_id_is_c45(phy_addr)) { 13066 req->cl45_mdio = 1; 13067 req->phy_addr = mdio_phy_id_prtad(phy_addr); 13068 req->dev_addr = mdio_phy_id_devad(phy_addr); 13069 req->reg_addr = cpu_to_le16(reg); 13070 } 13071 req->reg_data = cpu_to_le16(val); 13072 13073 return hwrm_req_send(bp, req); 13074 } 13075 13076 /* netdev instance lock held */ 13077 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13078 { 13079 struct mii_ioctl_data *mdio = if_mii(ifr); 13080 struct bnxt *bp = netdev_priv(dev); 13081 int rc; 13082 13083 switch (cmd) { 13084 case SIOCGMIIPHY: 13085 mdio->phy_id = bp->link_info.phy_addr; 13086 13087 fallthrough; 13088 case SIOCGMIIREG: { 13089 u16 mii_regval = 0; 13090 13091 if (!netif_running(dev)) 13092 return -EAGAIN; 13093 13094 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 13095 &mii_regval); 13096 mdio->val_out = mii_regval; 13097 return rc; 13098 } 13099 13100 case SIOCSMIIREG: 13101 if (!netif_running(dev)) 13102 return -EAGAIN; 13103 13104 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 13105 mdio->val_in); 13106 13107 case SIOCSHWTSTAMP: 13108 return bnxt_hwtstamp_set(dev, ifr); 13109 13110 case SIOCGHWTSTAMP: 13111 return bnxt_hwtstamp_get(dev, ifr); 13112 13113 default: 13114 /* do nothing */ 13115 break; 13116 } 13117 return -EOPNOTSUPP; 13118 } 13119 13120 static void bnxt_get_ring_stats(struct bnxt *bp, 13121 struct rtnl_link_stats64 *stats) 13122 { 13123 int i; 13124 13125 for (i = 0; i < bp->cp_nr_rings; i++) { 13126 struct bnxt_napi *bnapi = bp->bnapi[i]; 13127 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13128 u64 *sw = cpr->stats.sw_stats; 13129 13130 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 13131 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 13132 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 13133 13134 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 13135 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 13136 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 13137 13138 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 13139 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 13140 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 13141 13142 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 13143 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 13144 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 13145 13146 stats->rx_missed_errors += 13147 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 13148 13149 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 13150 13151 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 13152 13153 stats->rx_dropped += 13154 cpr->sw_stats->rx.rx_netpoll_discards + 13155 cpr->sw_stats->rx.rx_oom_discards; 13156 } 13157 } 13158 13159 static void bnxt_add_prev_stats(struct bnxt *bp, 13160 struct rtnl_link_stats64 *stats) 13161 { 13162 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 13163 13164 stats->rx_packets += prev_stats->rx_packets; 13165 stats->tx_packets += prev_stats->tx_packets; 13166 stats->rx_bytes += prev_stats->rx_bytes; 13167 stats->tx_bytes += prev_stats->tx_bytes; 13168 stats->rx_missed_errors += prev_stats->rx_missed_errors; 13169 stats->multicast += prev_stats->multicast; 13170 stats->rx_dropped += prev_stats->rx_dropped; 13171 stats->tx_dropped += prev_stats->tx_dropped; 13172 } 13173 13174 static void 13175 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 13176 { 13177 struct bnxt *bp = netdev_priv(dev); 13178 13179 set_bit(BNXT_STATE_READ_STATS, &bp->state); 13180 /* Make sure bnxt_close_nic() sees that we are reading stats before 13181 * we check the BNXT_STATE_OPEN flag. 13182 */ 13183 smp_mb__after_atomic(); 13184 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13185 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 13186 *stats = bp->net_stats_prev; 13187 return; 13188 } 13189 13190 bnxt_get_ring_stats(bp, stats); 13191 bnxt_add_prev_stats(bp, stats); 13192 13193 if (bp->flags & BNXT_FLAG_PORT_STATS) { 13194 u64 *rx = bp->port_stats.sw_stats; 13195 u64 *tx = bp->port_stats.sw_stats + 13196 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 13197 13198 stats->rx_crc_errors = 13199 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 13200 stats->rx_frame_errors = 13201 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 13202 stats->rx_length_errors = 13203 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 13204 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 13205 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 13206 stats->rx_errors = 13207 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 13208 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 13209 stats->collisions = 13210 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 13211 stats->tx_fifo_errors = 13212 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 13213 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 13214 } 13215 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 13216 } 13217 13218 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 13219 struct bnxt_total_ring_err_stats *stats, 13220 struct bnxt_cp_ring_info *cpr) 13221 { 13222 struct bnxt_sw_stats *sw_stats = cpr->sw_stats; 13223 u64 *hw_stats = cpr->stats.sw_stats; 13224 13225 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 13226 stats->rx_total_resets += sw_stats->rx.rx_resets; 13227 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 13228 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 13229 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 13230 stats->rx_total_ring_discards += 13231 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 13232 stats->tx_total_resets += sw_stats->tx.tx_resets; 13233 stats->tx_total_ring_discards += 13234 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 13235 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 13236 } 13237 13238 void bnxt_get_ring_err_stats(struct bnxt *bp, 13239 struct bnxt_total_ring_err_stats *stats) 13240 { 13241 int i; 13242 13243 for (i = 0; i < bp->cp_nr_rings; i++) 13244 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 13245 } 13246 13247 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 13248 { 13249 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13250 struct net_device *dev = bp->dev; 13251 struct netdev_hw_addr *ha; 13252 u8 *haddr; 13253 int mc_count = 0; 13254 bool update = false; 13255 int off = 0; 13256 13257 netdev_for_each_mc_addr(ha, dev) { 13258 if (mc_count >= BNXT_MAX_MC_ADDRS) { 13259 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13260 vnic->mc_list_count = 0; 13261 return false; 13262 } 13263 haddr = ha->addr; 13264 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 13265 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 13266 update = true; 13267 } 13268 off += ETH_ALEN; 13269 mc_count++; 13270 } 13271 if (mc_count) 13272 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 13273 13274 if (mc_count != vnic->mc_list_count) { 13275 vnic->mc_list_count = mc_count; 13276 update = true; 13277 } 13278 return update; 13279 } 13280 13281 static bool bnxt_uc_list_updated(struct bnxt *bp) 13282 { 13283 struct net_device *dev = bp->dev; 13284 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13285 struct netdev_hw_addr *ha; 13286 int off = 0; 13287 13288 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 13289 return true; 13290 13291 netdev_for_each_uc_addr(ha, dev) { 13292 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 13293 return true; 13294 13295 off += ETH_ALEN; 13296 } 13297 return false; 13298 } 13299 13300 static void bnxt_set_rx_mode(struct net_device *dev) 13301 { 13302 struct bnxt *bp = netdev_priv(dev); 13303 struct bnxt_vnic_info *vnic; 13304 bool mc_update = false; 13305 bool uc_update; 13306 u32 mask; 13307 13308 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 13309 return; 13310 13311 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13312 mask = vnic->rx_mask; 13313 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 13314 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 13315 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 13316 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 13317 13318 if (dev->flags & IFF_PROMISC) 13319 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13320 13321 uc_update = bnxt_uc_list_updated(bp); 13322 13323 if (dev->flags & IFF_BROADCAST) 13324 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 13325 if (dev->flags & IFF_ALLMULTI) { 13326 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13327 vnic->mc_list_count = 0; 13328 } else if (dev->flags & IFF_MULTICAST) { 13329 mc_update = bnxt_mc_list_updated(bp, &mask); 13330 } 13331 13332 if (mask != vnic->rx_mask || uc_update || mc_update) { 13333 vnic->rx_mask = mask; 13334 13335 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 13336 } 13337 } 13338 13339 static int bnxt_cfg_rx_mode(struct bnxt *bp) 13340 { 13341 struct net_device *dev = bp->dev; 13342 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13343 struct netdev_hw_addr *ha; 13344 int i, off = 0, rc; 13345 bool uc_update; 13346 13347 netif_addr_lock_bh(dev); 13348 uc_update = bnxt_uc_list_updated(bp); 13349 netif_addr_unlock_bh(dev); 13350 13351 if (!uc_update) 13352 goto skip_uc; 13353 13354 for (i = 1; i < vnic->uc_filter_count; i++) { 13355 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; 13356 13357 bnxt_hwrm_l2_filter_free(bp, fltr); 13358 bnxt_del_l2_filter(bp, fltr); 13359 } 13360 13361 vnic->uc_filter_count = 1; 13362 13363 netif_addr_lock_bh(dev); 13364 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 13365 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13366 } else { 13367 netdev_for_each_uc_addr(ha, dev) { 13368 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 13369 off += ETH_ALEN; 13370 vnic->uc_filter_count++; 13371 } 13372 } 13373 netif_addr_unlock_bh(dev); 13374 13375 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 13376 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 13377 if (rc) { 13378 if (BNXT_VF(bp) && rc == -ENODEV) { 13379 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13380 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 13381 else 13382 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 13383 rc = 0; 13384 } else { 13385 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 13386 } 13387 vnic->uc_filter_count = i; 13388 return rc; 13389 } 13390 } 13391 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13392 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 13393 13394 skip_uc: 13395 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 13396 !bnxt_promisc_ok(bp)) 13397 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13398 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 13399 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 13400 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 13401 rc); 13402 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 13403 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13404 vnic->mc_list_count = 0; 13405 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 13406 } 13407 if (rc) 13408 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 13409 rc); 13410 13411 return rc; 13412 } 13413 13414 static bool bnxt_can_reserve_rings(struct bnxt *bp) 13415 { 13416 #ifdef CONFIG_BNXT_SRIOV 13417 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 13418 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 13419 13420 /* No minimum rings were provisioned by the PF. Don't 13421 * reserve rings by default when device is down. 13422 */ 13423 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 13424 return true; 13425 13426 if (!netif_running(bp->dev)) 13427 return false; 13428 } 13429 #endif 13430 return true; 13431 } 13432 13433 /* If the chip and firmware supports RFS */ 13434 static bool bnxt_rfs_supported(struct bnxt *bp) 13435 { 13436 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 13437 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 13438 return true; 13439 return false; 13440 } 13441 /* 212 firmware is broken for aRFS */ 13442 if (BNXT_FW_MAJ(bp) == 212) 13443 return false; 13444 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 13445 return true; 13446 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 13447 return true; 13448 return false; 13449 } 13450 13451 /* If runtime conditions support RFS */ 13452 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) 13453 { 13454 struct bnxt_hw_rings hwr = {0}; 13455 int max_vnics, max_rss_ctxs; 13456 13457 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13458 !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 13459 return bnxt_rfs_supported(bp); 13460 13461 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 13462 return false; 13463 13464 hwr.grp = bp->rx_nr_rings; 13465 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); 13466 if (new_rss_ctx) 13467 hwr.vnic++; 13468 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 13469 max_vnics = bnxt_get_max_func_vnics(bp); 13470 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 13471 13472 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { 13473 if (bp->rx_nr_rings > 1) 13474 netdev_warn(bp->dev, 13475 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 13476 min(max_rss_ctxs - 1, max_vnics - 1)); 13477 return false; 13478 } 13479 13480 if (!BNXT_NEW_RM(bp)) 13481 return true; 13482 13483 /* Do not reduce VNIC and RSS ctx reservations. There is a FW 13484 * issue that will mess up the default VNIC if we reduce the 13485 * reservations. 13486 */ 13487 if (hwr.vnic <= bp->hw_resc.resv_vnics && 13488 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 13489 return true; 13490 13491 bnxt_hwrm_reserve_rings(bp, &hwr); 13492 if (hwr.vnic <= bp->hw_resc.resv_vnics && 13493 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 13494 return true; 13495 13496 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 13497 hwr.vnic = 1; 13498 hwr.rss_ctx = 0; 13499 bnxt_hwrm_reserve_rings(bp, &hwr); 13500 return false; 13501 } 13502 13503 static netdev_features_t bnxt_fix_features(struct net_device *dev, 13504 netdev_features_t features) 13505 { 13506 struct bnxt *bp = netdev_priv(dev); 13507 netdev_features_t vlan_features; 13508 13509 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) 13510 features &= ~NETIF_F_NTUPLE; 13511 13512 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 13513 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 13514 13515 if (!(features & NETIF_F_GRO)) 13516 features &= ~NETIF_F_GRO_HW; 13517 13518 if (features & NETIF_F_GRO_HW) 13519 features &= ~NETIF_F_LRO; 13520 13521 /* Both CTAG and STAG VLAN acceleration on the RX side have to be 13522 * turned on or off together. 13523 */ 13524 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 13525 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 13526 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 13527 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 13528 else if (vlan_features) 13529 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 13530 } 13531 #ifdef CONFIG_BNXT_SRIOV 13532 if (BNXT_VF(bp) && bp->vf.vlan) 13533 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 13534 #endif 13535 return features; 13536 } 13537 13538 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, 13539 bool link_re_init, u32 flags, bool update_tpa) 13540 { 13541 bnxt_close_nic(bp, irq_re_init, link_re_init); 13542 bp->flags = flags; 13543 if (update_tpa) 13544 bnxt_set_ring_params(bp); 13545 return bnxt_open_nic(bp, irq_re_init, link_re_init); 13546 } 13547 13548 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 13549 { 13550 bool update_tpa = false, update_ntuple = false; 13551 struct bnxt *bp = netdev_priv(dev); 13552 u32 flags = bp->flags; 13553 u32 changes; 13554 int rc = 0; 13555 bool re_init = false; 13556 13557 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 13558 if (features & NETIF_F_GRO_HW) 13559 flags |= BNXT_FLAG_GRO; 13560 else if (features & NETIF_F_LRO) 13561 flags |= BNXT_FLAG_LRO; 13562 13563 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 13564 flags &= ~BNXT_FLAG_TPA; 13565 13566 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 13567 flags |= BNXT_FLAG_STRIP_VLAN; 13568 13569 if (features & NETIF_F_NTUPLE) 13570 flags |= BNXT_FLAG_RFS; 13571 else 13572 bnxt_clear_usr_fltrs(bp, true); 13573 13574 changes = flags ^ bp->flags; 13575 if (changes & BNXT_FLAG_TPA) { 13576 update_tpa = true; 13577 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 13578 (flags & BNXT_FLAG_TPA) == 0 || 13579 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 13580 re_init = true; 13581 } 13582 13583 if (changes & ~BNXT_FLAG_TPA) 13584 re_init = true; 13585 13586 if (changes & BNXT_FLAG_RFS) 13587 update_ntuple = true; 13588 13589 if (flags != bp->flags) { 13590 u32 old_flags = bp->flags; 13591 13592 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13593 bp->flags = flags; 13594 if (update_tpa) 13595 bnxt_set_ring_params(bp); 13596 return rc; 13597 } 13598 13599 if (update_ntuple) 13600 return bnxt_reinit_features(bp, true, false, flags, update_tpa); 13601 13602 if (re_init) 13603 return bnxt_reinit_features(bp, false, false, flags, update_tpa); 13604 13605 if (update_tpa) { 13606 bp->flags = flags; 13607 rc = bnxt_set_tpa(bp, 13608 (flags & BNXT_FLAG_TPA) ? 13609 true : false); 13610 if (rc) 13611 bp->flags = old_flags; 13612 } 13613 } 13614 return rc; 13615 } 13616 13617 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 13618 u8 **nextp) 13619 { 13620 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 13621 struct hop_jumbo_hdr *jhdr; 13622 int hdr_count = 0; 13623 u8 *nexthdr; 13624 int start; 13625 13626 /* Check that there are at most 2 IPv6 extension headers, no 13627 * fragment header, and each is <= 64 bytes. 13628 */ 13629 start = nw_off + sizeof(*ip6h); 13630 nexthdr = &ip6h->nexthdr; 13631 while (ipv6_ext_hdr(*nexthdr)) { 13632 struct ipv6_opt_hdr *hp; 13633 int hdrlen; 13634 13635 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 13636 *nexthdr == NEXTHDR_FRAGMENT) 13637 return false; 13638 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 13639 skb_headlen(skb), NULL); 13640 if (!hp) 13641 return false; 13642 if (*nexthdr == NEXTHDR_AUTH) 13643 hdrlen = ipv6_authlen(hp); 13644 else 13645 hdrlen = ipv6_optlen(hp); 13646 13647 if (hdrlen > 64) 13648 return false; 13649 13650 /* The ext header may be a hop-by-hop header inserted for 13651 * big TCP purposes. This will be removed before sending 13652 * from NIC, so do not count it. 13653 */ 13654 if (*nexthdr == NEXTHDR_HOP) { 13655 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 13656 goto increment_hdr; 13657 13658 jhdr = (struct hop_jumbo_hdr *)hp; 13659 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 13660 jhdr->nexthdr != IPPROTO_TCP) 13661 goto increment_hdr; 13662 13663 goto next_hdr; 13664 } 13665 increment_hdr: 13666 hdr_count++; 13667 next_hdr: 13668 nexthdr = &hp->nexthdr; 13669 start += hdrlen; 13670 } 13671 if (nextp) { 13672 /* Caller will check inner protocol */ 13673 if (skb->encapsulation) { 13674 *nextp = nexthdr; 13675 return true; 13676 } 13677 *nextp = NULL; 13678 } 13679 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 13680 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 13681 } 13682 13683 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 13684 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 13685 { 13686 struct udphdr *uh = udp_hdr(skb); 13687 __be16 udp_port = uh->dest; 13688 13689 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && 13690 udp_port != bp->vxlan_gpe_port) 13691 return false; 13692 if (skb->inner_protocol == htons(ETH_P_TEB)) { 13693 struct ethhdr *eh = inner_eth_hdr(skb); 13694 13695 switch (eh->h_proto) { 13696 case htons(ETH_P_IP): 13697 return true; 13698 case htons(ETH_P_IPV6): 13699 return bnxt_exthdr_check(bp, skb, 13700 skb_inner_network_offset(skb), 13701 NULL); 13702 } 13703 } else if (skb->inner_protocol == htons(ETH_P_IP)) { 13704 return true; 13705 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { 13706 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13707 NULL); 13708 } 13709 return false; 13710 } 13711 13712 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 13713 { 13714 switch (l4_proto) { 13715 case IPPROTO_UDP: 13716 return bnxt_udp_tunl_check(bp, skb); 13717 case IPPROTO_IPIP: 13718 return true; 13719 case IPPROTO_GRE: { 13720 switch (skb->inner_protocol) { 13721 default: 13722 return false; 13723 case htons(ETH_P_IP): 13724 return true; 13725 case htons(ETH_P_IPV6): 13726 fallthrough; 13727 } 13728 } 13729 case IPPROTO_IPV6: 13730 /* Check ext headers of inner ipv6 */ 13731 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13732 NULL); 13733 } 13734 return false; 13735 } 13736 13737 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 13738 struct net_device *dev, 13739 netdev_features_t features) 13740 { 13741 struct bnxt *bp = netdev_priv(dev); 13742 u8 *l4_proto; 13743 13744 features = vlan_features_check(skb, features); 13745 switch (vlan_get_protocol(skb)) { 13746 case htons(ETH_P_IP): 13747 if (!skb->encapsulation) 13748 return features; 13749 l4_proto = &ip_hdr(skb)->protocol; 13750 if (bnxt_tunl_check(bp, skb, *l4_proto)) 13751 return features; 13752 break; 13753 case htons(ETH_P_IPV6): 13754 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 13755 &l4_proto)) 13756 break; 13757 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 13758 return features; 13759 break; 13760 } 13761 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 13762 } 13763 13764 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 13765 u32 *reg_buf) 13766 { 13767 struct hwrm_dbg_read_direct_output *resp; 13768 struct hwrm_dbg_read_direct_input *req; 13769 __le32 *dbg_reg_buf; 13770 dma_addr_t mapping; 13771 int rc, i; 13772 13773 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 13774 if (rc) 13775 return rc; 13776 13777 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 13778 &mapping); 13779 if (!dbg_reg_buf) { 13780 rc = -ENOMEM; 13781 goto dbg_rd_reg_exit; 13782 } 13783 13784 req->host_dest_addr = cpu_to_le64(mapping); 13785 13786 resp = hwrm_req_hold(bp, req); 13787 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 13788 req->read_len32 = cpu_to_le32(num_words); 13789 13790 rc = hwrm_req_send(bp, req); 13791 if (rc || resp->error_code) { 13792 rc = -EIO; 13793 goto dbg_rd_reg_exit; 13794 } 13795 for (i = 0; i < num_words; i++) 13796 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 13797 13798 dbg_rd_reg_exit: 13799 hwrm_req_drop(bp, req); 13800 return rc; 13801 } 13802 13803 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 13804 u32 ring_id, u32 *prod, u32 *cons) 13805 { 13806 struct hwrm_dbg_ring_info_get_output *resp; 13807 struct hwrm_dbg_ring_info_get_input *req; 13808 int rc; 13809 13810 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 13811 if (rc) 13812 return rc; 13813 13814 req->ring_type = ring_type; 13815 req->fw_ring_id = cpu_to_le32(ring_id); 13816 resp = hwrm_req_hold(bp, req); 13817 rc = hwrm_req_send(bp, req); 13818 if (!rc) { 13819 *prod = le32_to_cpu(resp->producer_index); 13820 *cons = le32_to_cpu(resp->consumer_index); 13821 } 13822 hwrm_req_drop(bp, req); 13823 return rc; 13824 } 13825 13826 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 13827 { 13828 struct bnxt_tx_ring_info *txr; 13829 int i = bnapi->index, j; 13830 13831 bnxt_for_each_napi_tx(j, bnapi, txr) 13832 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 13833 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 13834 txr->tx_cons); 13835 } 13836 13837 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 13838 { 13839 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 13840 int i = bnapi->index; 13841 13842 if (!rxr) 13843 return; 13844 13845 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 13846 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 13847 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 13848 rxr->rx_sw_agg_prod); 13849 } 13850 13851 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 13852 { 13853 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13854 int i = bnapi->index; 13855 13856 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 13857 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 13858 } 13859 13860 static void bnxt_dbg_dump_states(struct bnxt *bp) 13861 { 13862 int i; 13863 struct bnxt_napi *bnapi; 13864 13865 for (i = 0; i < bp->cp_nr_rings; i++) { 13866 bnapi = bp->bnapi[i]; 13867 if (netif_msg_drv(bp)) { 13868 bnxt_dump_tx_sw_state(bnapi); 13869 bnxt_dump_rx_sw_state(bnapi); 13870 bnxt_dump_cp_sw_state(bnapi); 13871 } 13872 } 13873 } 13874 13875 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 13876 { 13877 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 13878 struct hwrm_ring_reset_input *req; 13879 struct bnxt_napi *bnapi = rxr->bnapi; 13880 struct bnxt_cp_ring_info *cpr; 13881 u16 cp_ring_id; 13882 int rc; 13883 13884 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 13885 if (rc) 13886 return rc; 13887 13888 cpr = &bnapi->cp_ring; 13889 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 13890 req->cmpl_ring = cpu_to_le16(cp_ring_id); 13891 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 13892 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 13893 return hwrm_req_send_silent(bp, req); 13894 } 13895 13896 static void bnxt_reset_task(struct bnxt *bp, bool silent) 13897 { 13898 if (!silent) 13899 bnxt_dbg_dump_states(bp); 13900 if (netif_running(bp->dev)) { 13901 bnxt_close_nic(bp, !silent, false); 13902 bnxt_open_nic(bp, !silent, false); 13903 } 13904 } 13905 13906 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 13907 { 13908 struct bnxt *bp = netdev_priv(dev); 13909 13910 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 13911 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 13912 } 13913 13914 static void bnxt_fw_health_check(struct bnxt *bp) 13915 { 13916 struct bnxt_fw_health *fw_health = bp->fw_health; 13917 struct pci_dev *pdev = bp->pdev; 13918 u32 val; 13919 13920 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 13921 return; 13922 13923 /* Make sure it is enabled before checking the tmr_counter. */ 13924 smp_rmb(); 13925 if (fw_health->tmr_counter) { 13926 fw_health->tmr_counter--; 13927 return; 13928 } 13929 13930 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 13931 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 13932 fw_health->arrests++; 13933 goto fw_reset; 13934 } 13935 13936 fw_health->last_fw_heartbeat = val; 13937 13938 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13939 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 13940 fw_health->discoveries++; 13941 goto fw_reset; 13942 } 13943 13944 fw_health->tmr_counter = fw_health->tmr_multiplier; 13945 return; 13946 13947 fw_reset: 13948 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 13949 } 13950 13951 static void bnxt_timer(struct timer_list *t) 13952 { 13953 struct bnxt *bp = from_timer(bp, t, timer); 13954 struct net_device *dev = bp->dev; 13955 13956 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 13957 return; 13958 13959 if (atomic_read(&bp->intr_sem) != 0) 13960 goto bnxt_restart_timer; 13961 13962 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 13963 bnxt_fw_health_check(bp); 13964 13965 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 13966 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 13967 13968 if (bnxt_tc_flower_enabled(bp)) 13969 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 13970 13971 #ifdef CONFIG_RFS_ACCEL 13972 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 13973 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 13974 #endif /*CONFIG_RFS_ACCEL*/ 13975 13976 if (bp->link_info.phy_retry) { 13977 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 13978 bp->link_info.phy_retry = false; 13979 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 13980 } else { 13981 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 13982 } 13983 } 13984 13985 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13986 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 13987 13988 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 13989 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 13990 13991 bnxt_restart_timer: 13992 mod_timer(&bp->timer, jiffies + bp->current_interval); 13993 } 13994 13995 static void bnxt_lock_sp(struct bnxt *bp) 13996 { 13997 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 13998 * set. If the device is being closed, bnxt_close() may be holding 13999 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear. 14000 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev 14001 * instance lock. 14002 */ 14003 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14004 netdev_lock(bp->dev); 14005 } 14006 14007 static void bnxt_unlock_sp(struct bnxt *bp) 14008 { 14009 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14010 netdev_unlock(bp->dev); 14011 } 14012 14013 /* Only called from bnxt_sp_task() */ 14014 static void bnxt_reset(struct bnxt *bp, bool silent) 14015 { 14016 bnxt_lock_sp(bp); 14017 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 14018 bnxt_reset_task(bp, silent); 14019 bnxt_unlock_sp(bp); 14020 } 14021 14022 /* Only called from bnxt_sp_task() */ 14023 static void bnxt_rx_ring_reset(struct bnxt *bp) 14024 { 14025 int i; 14026 14027 bnxt_lock_sp(bp); 14028 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 14029 bnxt_unlock_sp(bp); 14030 return; 14031 } 14032 /* Disable and flush TPA before resetting the RX ring */ 14033 if (bp->flags & BNXT_FLAG_TPA) 14034 bnxt_set_tpa(bp, false); 14035 for (i = 0; i < bp->rx_nr_rings; i++) { 14036 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 14037 struct bnxt_cp_ring_info *cpr; 14038 int rc; 14039 14040 if (!rxr->bnapi->in_reset) 14041 continue; 14042 14043 rc = bnxt_hwrm_rx_ring_reset(bp, i); 14044 if (rc) { 14045 if (rc == -EINVAL || rc == -EOPNOTSUPP) 14046 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 14047 else 14048 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 14049 rc); 14050 bnxt_reset_task(bp, true); 14051 break; 14052 } 14053 bnxt_free_one_rx_ring_skbs(bp, rxr); 14054 rxr->rx_prod = 0; 14055 rxr->rx_agg_prod = 0; 14056 rxr->rx_sw_agg_prod = 0; 14057 rxr->rx_next_cons = 0; 14058 rxr->bnapi->in_reset = false; 14059 bnxt_alloc_one_rx_ring(bp, i); 14060 cpr = &rxr->bnapi->cp_ring; 14061 cpr->sw_stats->rx.rx_resets++; 14062 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14063 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 14064 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 14065 } 14066 if (bp->flags & BNXT_FLAG_TPA) 14067 bnxt_set_tpa(bp, true); 14068 bnxt_unlock_sp(bp); 14069 } 14070 14071 static void bnxt_fw_fatal_close(struct bnxt *bp) 14072 { 14073 bnxt_tx_disable(bp); 14074 bnxt_disable_napi(bp); 14075 bnxt_disable_int_sync(bp); 14076 bnxt_free_irq(bp); 14077 bnxt_clear_int_mode(bp); 14078 pci_disable_device(bp->pdev); 14079 } 14080 14081 static void bnxt_fw_reset_close(struct bnxt *bp) 14082 { 14083 /* When firmware is in fatal state, quiesce device and disable 14084 * bus master to prevent any potential bad DMAs before freeing 14085 * kernel memory. 14086 */ 14087 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 14088 u16 val = 0; 14089 14090 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 14091 if (val == 0xffff) 14092 bp->fw_reset_min_dsecs = 0; 14093 bnxt_fw_fatal_close(bp); 14094 } 14095 __bnxt_close_nic(bp, true, false); 14096 bnxt_vf_reps_free(bp); 14097 bnxt_clear_int_mode(bp); 14098 bnxt_hwrm_func_drv_unrgtr(bp); 14099 if (pci_is_enabled(bp->pdev)) 14100 pci_disable_device(bp->pdev); 14101 bnxt_free_ctx_mem(bp, false); 14102 } 14103 14104 static bool is_bnxt_fw_ok(struct bnxt *bp) 14105 { 14106 struct bnxt_fw_health *fw_health = bp->fw_health; 14107 bool no_heartbeat = false, has_reset = false; 14108 u32 val; 14109 14110 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 14111 if (val == fw_health->last_fw_heartbeat) 14112 no_heartbeat = true; 14113 14114 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14115 if (val != fw_health->last_fw_reset_cnt) 14116 has_reset = true; 14117 14118 if (!no_heartbeat && has_reset) 14119 return true; 14120 14121 return false; 14122 } 14123 14124 /* netdev instance lock is acquired before calling this function */ 14125 static void bnxt_force_fw_reset(struct bnxt *bp) 14126 { 14127 struct bnxt_fw_health *fw_health = bp->fw_health; 14128 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 14129 u32 wait_dsecs; 14130 14131 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 14132 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 14133 return; 14134 14135 /* we have to serialize with bnxt_refclk_read()*/ 14136 if (ptp) { 14137 unsigned long flags; 14138 14139 write_seqlock_irqsave(&ptp->ptp_lock, flags); 14140 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14141 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 14142 } else { 14143 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14144 } 14145 bnxt_fw_reset_close(bp); 14146 wait_dsecs = fw_health->master_func_wait_dsecs; 14147 if (fw_health->primary) { 14148 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 14149 wait_dsecs = 0; 14150 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 14151 } else { 14152 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 14153 wait_dsecs = fw_health->normal_func_wait_dsecs; 14154 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14155 } 14156 14157 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 14158 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 14159 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 14160 } 14161 14162 void bnxt_fw_exception(struct bnxt *bp) 14163 { 14164 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 14165 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 14166 bnxt_ulp_stop(bp); 14167 bnxt_lock_sp(bp); 14168 bnxt_force_fw_reset(bp); 14169 bnxt_unlock_sp(bp); 14170 } 14171 14172 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 14173 * < 0 on error. 14174 */ 14175 static int bnxt_get_registered_vfs(struct bnxt *bp) 14176 { 14177 #ifdef CONFIG_BNXT_SRIOV 14178 int rc; 14179 14180 if (!BNXT_PF(bp)) 14181 return 0; 14182 14183 rc = bnxt_hwrm_func_qcfg(bp); 14184 if (rc) { 14185 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 14186 return rc; 14187 } 14188 if (bp->pf.registered_vfs) 14189 return bp->pf.registered_vfs; 14190 if (bp->sriov_cfg) 14191 return 1; 14192 #endif 14193 return 0; 14194 } 14195 14196 void bnxt_fw_reset(struct bnxt *bp) 14197 { 14198 bnxt_ulp_stop(bp); 14199 bnxt_lock_sp(bp); 14200 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 14201 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 14202 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 14203 int n = 0, tmo; 14204 14205 /* we have to serialize with bnxt_refclk_read()*/ 14206 if (ptp) { 14207 unsigned long flags; 14208 14209 write_seqlock_irqsave(&ptp->ptp_lock, flags); 14210 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14211 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 14212 } else { 14213 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14214 } 14215 if (bp->pf.active_vfs && 14216 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 14217 n = bnxt_get_registered_vfs(bp); 14218 if (n < 0) { 14219 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 14220 n); 14221 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14222 netif_close(bp->dev); 14223 goto fw_reset_exit; 14224 } else if (n > 0) { 14225 u16 vf_tmo_dsecs = n * 10; 14226 14227 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 14228 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 14229 bp->fw_reset_state = 14230 BNXT_FW_RESET_STATE_POLL_VF; 14231 bnxt_queue_fw_reset_work(bp, HZ / 10); 14232 goto fw_reset_exit; 14233 } 14234 bnxt_fw_reset_close(bp); 14235 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14236 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 14237 tmo = HZ / 10; 14238 } else { 14239 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14240 tmo = bp->fw_reset_min_dsecs * HZ / 10; 14241 } 14242 bnxt_queue_fw_reset_work(bp, tmo); 14243 } 14244 fw_reset_exit: 14245 bnxt_unlock_sp(bp); 14246 } 14247 14248 static void bnxt_chk_missed_irq(struct bnxt *bp) 14249 { 14250 int i; 14251 14252 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 14253 return; 14254 14255 for (i = 0; i < bp->cp_nr_rings; i++) { 14256 struct bnxt_napi *bnapi = bp->bnapi[i]; 14257 struct bnxt_cp_ring_info *cpr; 14258 u32 fw_ring_id; 14259 int j; 14260 14261 if (!bnapi) 14262 continue; 14263 14264 cpr = &bnapi->cp_ring; 14265 for (j = 0; j < cpr->cp_ring_count; j++) { 14266 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 14267 u32 val[2]; 14268 14269 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 14270 continue; 14271 14272 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 14273 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 14274 continue; 14275 } 14276 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 14277 bnxt_dbg_hwrm_ring_info_get(bp, 14278 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 14279 fw_ring_id, &val[0], &val[1]); 14280 cpr->sw_stats->cmn.missed_irqs++; 14281 } 14282 } 14283 } 14284 14285 static void bnxt_cfg_ntp_filters(struct bnxt *); 14286 14287 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 14288 { 14289 struct bnxt_link_info *link_info = &bp->link_info; 14290 14291 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 14292 link_info->autoneg = BNXT_AUTONEG_SPEED; 14293 if (bp->hwrm_spec_code >= 0x10201) { 14294 if (link_info->auto_pause_setting & 14295 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 14296 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 14297 } else { 14298 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 14299 } 14300 bnxt_set_auto_speed(link_info); 14301 } else { 14302 bnxt_set_force_speed(link_info); 14303 link_info->req_duplex = link_info->duplex_setting; 14304 } 14305 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 14306 link_info->req_flow_ctrl = 14307 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 14308 else 14309 link_info->req_flow_ctrl = link_info->force_pause_setting; 14310 } 14311 14312 static void bnxt_fw_echo_reply(struct bnxt *bp) 14313 { 14314 struct bnxt_fw_health *fw_health = bp->fw_health; 14315 struct hwrm_func_echo_response_input *req; 14316 int rc; 14317 14318 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 14319 if (rc) 14320 return; 14321 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 14322 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 14323 hwrm_req_send(bp, req); 14324 } 14325 14326 static void bnxt_ulp_restart(struct bnxt *bp) 14327 { 14328 bnxt_ulp_stop(bp); 14329 bnxt_ulp_start(bp, 0); 14330 } 14331 14332 static void bnxt_sp_task(struct work_struct *work) 14333 { 14334 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 14335 14336 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14337 smp_mb__after_atomic(); 14338 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 14339 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14340 return; 14341 } 14342 14343 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { 14344 bnxt_ulp_restart(bp); 14345 bnxt_reenable_sriov(bp); 14346 } 14347 14348 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 14349 bnxt_cfg_rx_mode(bp); 14350 14351 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 14352 bnxt_cfg_ntp_filters(bp); 14353 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 14354 bnxt_hwrm_exec_fwd_req(bp); 14355 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 14356 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 14357 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 14358 bnxt_hwrm_port_qstats(bp, 0); 14359 bnxt_hwrm_port_qstats_ext(bp, 0); 14360 bnxt_accumulate_all_stats(bp); 14361 } 14362 14363 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 14364 int rc; 14365 14366 mutex_lock(&bp->link_lock); 14367 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 14368 &bp->sp_event)) 14369 bnxt_hwrm_phy_qcaps(bp); 14370 14371 rc = bnxt_update_link(bp, true); 14372 if (rc) 14373 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 14374 rc); 14375 14376 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 14377 &bp->sp_event)) 14378 bnxt_init_ethtool_link_settings(bp); 14379 mutex_unlock(&bp->link_lock); 14380 } 14381 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 14382 int rc; 14383 14384 mutex_lock(&bp->link_lock); 14385 rc = bnxt_update_phy_setting(bp); 14386 mutex_unlock(&bp->link_lock); 14387 if (rc) { 14388 netdev_warn(bp->dev, "update phy settings retry failed\n"); 14389 } else { 14390 bp->link_info.phy_retry = false; 14391 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 14392 } 14393 } 14394 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 14395 mutex_lock(&bp->link_lock); 14396 bnxt_get_port_module_status(bp); 14397 mutex_unlock(&bp->link_lock); 14398 } 14399 14400 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 14401 bnxt_tc_flow_stats_work(bp); 14402 14403 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 14404 bnxt_chk_missed_irq(bp); 14405 14406 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 14407 bnxt_fw_echo_reply(bp); 14408 14409 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 14410 bnxt_hwmon_notify_event(bp); 14411 14412 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 14413 * must be the last functions to be called before exiting. 14414 */ 14415 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 14416 bnxt_reset(bp, false); 14417 14418 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 14419 bnxt_reset(bp, true); 14420 14421 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 14422 bnxt_rx_ring_reset(bp); 14423 14424 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 14425 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 14426 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 14427 bnxt_devlink_health_fw_report(bp); 14428 else 14429 bnxt_fw_reset(bp); 14430 } 14431 14432 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 14433 if (!is_bnxt_fw_ok(bp)) 14434 bnxt_devlink_health_fw_report(bp); 14435 } 14436 14437 smp_mb__before_atomic(); 14438 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14439 } 14440 14441 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 14442 int *max_cp); 14443 14444 /* Under netdev instance lock */ 14445 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 14446 int tx_xdp) 14447 { 14448 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 14449 struct bnxt_hw_rings hwr = {0}; 14450 int rx_rings = rx; 14451 int rc; 14452 14453 if (tcs) 14454 tx_sets = tcs; 14455 14456 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 14457 14458 if (max_rx < rx_rings) 14459 return -ENOMEM; 14460 14461 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14462 rx_rings <<= 1; 14463 14464 hwr.rx = rx_rings; 14465 hwr.tx = tx * tx_sets + tx_xdp; 14466 if (max_tx < hwr.tx) 14467 return -ENOMEM; 14468 14469 hwr.vnic = bnxt_get_total_vnics(bp, rx); 14470 14471 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); 14472 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 14473 if (max_cp < hwr.cp) 14474 return -ENOMEM; 14475 hwr.stat = hwr.cp; 14476 if (BNXT_NEW_RM(bp)) { 14477 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); 14478 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); 14479 hwr.grp = rx; 14480 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 14481 } 14482 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 14483 hwr.cp_p5 = hwr.tx + rx; 14484 rc = bnxt_hwrm_check_rings(bp, &hwr); 14485 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) { 14486 if (!bnxt_ulp_registered(bp->edev)) { 14487 hwr.cp += bnxt_get_ulp_msix_num(bp); 14488 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp)); 14489 } 14490 if (hwr.cp > bp->total_irqs) { 14491 int total_msix = bnxt_change_msix(bp, hwr.cp); 14492 14493 if (total_msix < hwr.cp) { 14494 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n", 14495 hwr.cp, total_msix); 14496 rc = -ENOSPC; 14497 } 14498 } 14499 } 14500 return rc; 14501 } 14502 14503 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 14504 { 14505 if (bp->bar2) { 14506 pci_iounmap(pdev, bp->bar2); 14507 bp->bar2 = NULL; 14508 } 14509 14510 if (bp->bar1) { 14511 pci_iounmap(pdev, bp->bar1); 14512 bp->bar1 = NULL; 14513 } 14514 14515 if (bp->bar0) { 14516 pci_iounmap(pdev, bp->bar0); 14517 bp->bar0 = NULL; 14518 } 14519 } 14520 14521 static void bnxt_cleanup_pci(struct bnxt *bp) 14522 { 14523 bnxt_unmap_bars(bp, bp->pdev); 14524 pci_release_regions(bp->pdev); 14525 if (pci_is_enabled(bp->pdev)) 14526 pci_disable_device(bp->pdev); 14527 } 14528 14529 static void bnxt_init_dflt_coal(struct bnxt *bp) 14530 { 14531 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 14532 struct bnxt_coal *coal; 14533 u16 flags = 0; 14534 14535 if (coal_cap->cmpl_params & 14536 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 14537 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 14538 14539 /* Tick values in micro seconds. 14540 * 1 coal_buf x bufs_per_record = 1 completion record. 14541 */ 14542 coal = &bp->rx_coal; 14543 coal->coal_ticks = 10; 14544 coal->coal_bufs = 30; 14545 coal->coal_ticks_irq = 1; 14546 coal->coal_bufs_irq = 2; 14547 coal->idle_thresh = 50; 14548 coal->bufs_per_record = 2; 14549 coal->budget = 64; /* NAPI budget */ 14550 coal->flags = flags; 14551 14552 coal = &bp->tx_coal; 14553 coal->coal_ticks = 28; 14554 coal->coal_bufs = 30; 14555 coal->coal_ticks_irq = 2; 14556 coal->coal_bufs_irq = 2; 14557 coal->bufs_per_record = 1; 14558 coal->flags = flags; 14559 14560 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 14561 } 14562 14563 /* FW that pre-reserves 1 VNIC per function */ 14564 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 14565 { 14566 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 14567 14568 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 14569 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 14570 return true; 14571 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 14572 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 14573 return true; 14574 return false; 14575 } 14576 14577 static int bnxt_fw_init_one_p1(struct bnxt *bp) 14578 { 14579 int rc; 14580 14581 bp->fw_cap = 0; 14582 rc = bnxt_hwrm_ver_get(bp); 14583 /* FW may be unresponsive after FLR. FLR must complete within 100 msec 14584 * so wait before continuing with recovery. 14585 */ 14586 if (rc) 14587 msleep(100); 14588 bnxt_try_map_fw_health_reg(bp); 14589 if (rc) { 14590 rc = bnxt_try_recover_fw(bp); 14591 if (rc) 14592 return rc; 14593 rc = bnxt_hwrm_ver_get(bp); 14594 if (rc) 14595 return rc; 14596 } 14597 14598 bnxt_nvm_cfg_ver_get(bp); 14599 14600 rc = bnxt_hwrm_func_reset(bp); 14601 if (rc) 14602 return -ENODEV; 14603 14604 bnxt_hwrm_fw_set_time(bp); 14605 return 0; 14606 } 14607 14608 static int bnxt_fw_init_one_p2(struct bnxt *bp) 14609 { 14610 int rc; 14611 14612 /* Get the MAX capabilities for this function */ 14613 rc = bnxt_hwrm_func_qcaps(bp); 14614 if (rc) { 14615 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 14616 rc); 14617 return -ENODEV; 14618 } 14619 14620 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 14621 if (rc) 14622 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 14623 rc); 14624 14625 if (bnxt_alloc_fw_health(bp)) { 14626 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 14627 } else { 14628 rc = bnxt_hwrm_error_recovery_qcfg(bp); 14629 if (rc) 14630 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 14631 rc); 14632 } 14633 14634 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 14635 if (rc) 14636 return -ENODEV; 14637 14638 rc = bnxt_alloc_crash_dump_mem(bp); 14639 if (rc) 14640 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n", 14641 rc); 14642 if (!rc) { 14643 rc = bnxt_hwrm_crash_dump_mem_cfg(bp); 14644 if (rc) { 14645 bnxt_free_crash_dump_mem(bp); 14646 netdev_warn(bp->dev, 14647 "hwrm crash dump mem failure rc: %d\n", rc); 14648 } 14649 } 14650 14651 if (bnxt_fw_pre_resv_vnics(bp)) 14652 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 14653 14654 bnxt_hwrm_func_qcfg(bp); 14655 bnxt_hwrm_vnic_qcaps(bp); 14656 bnxt_hwrm_port_led_qcaps(bp); 14657 bnxt_ethtool_init(bp); 14658 if (bp->fw_cap & BNXT_FW_CAP_PTP) 14659 __bnxt_hwrm_ptp_qcfg(bp); 14660 bnxt_dcb_init(bp); 14661 bnxt_hwmon_init(bp); 14662 return 0; 14663 } 14664 14665 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 14666 { 14667 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; 14668 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 14669 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 14670 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 14671 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 14672 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 14673 bp->rss_hash_delta = bp->rss_hash_cfg; 14674 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 14675 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; 14676 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 14677 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 14678 } 14679 } 14680 14681 static void bnxt_set_dflt_rfs(struct bnxt *bp) 14682 { 14683 struct net_device *dev = bp->dev; 14684 14685 dev->hw_features &= ~NETIF_F_NTUPLE; 14686 dev->features &= ~NETIF_F_NTUPLE; 14687 bp->flags &= ~BNXT_FLAG_RFS; 14688 if (bnxt_rfs_supported(bp)) { 14689 dev->hw_features |= NETIF_F_NTUPLE; 14690 if (bnxt_rfs_capable(bp, false)) { 14691 bp->flags |= BNXT_FLAG_RFS; 14692 dev->features |= NETIF_F_NTUPLE; 14693 } 14694 } 14695 } 14696 14697 static void bnxt_fw_init_one_p3(struct bnxt *bp) 14698 { 14699 struct pci_dev *pdev = bp->pdev; 14700 14701 bnxt_set_dflt_rss_hash_type(bp); 14702 bnxt_set_dflt_rfs(bp); 14703 14704 bnxt_get_wol_settings(bp); 14705 if (bp->flags & BNXT_FLAG_WOL_CAP) 14706 device_set_wakeup_enable(&pdev->dev, bp->wol); 14707 else 14708 device_set_wakeup_capable(&pdev->dev, false); 14709 14710 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 14711 bnxt_hwrm_coal_params_qcaps(bp); 14712 } 14713 14714 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 14715 14716 int bnxt_fw_init_one(struct bnxt *bp) 14717 { 14718 int rc; 14719 14720 rc = bnxt_fw_init_one_p1(bp); 14721 if (rc) { 14722 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 14723 return rc; 14724 } 14725 rc = bnxt_fw_init_one_p2(bp); 14726 if (rc) { 14727 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 14728 return rc; 14729 } 14730 rc = bnxt_probe_phy(bp, false); 14731 if (rc) 14732 return rc; 14733 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 14734 if (rc) 14735 return rc; 14736 14737 bnxt_fw_init_one_p3(bp); 14738 return 0; 14739 } 14740 14741 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 14742 { 14743 struct bnxt_fw_health *fw_health = bp->fw_health; 14744 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 14745 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 14746 u32 reg_type, reg_off, delay_msecs; 14747 14748 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 14749 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 14750 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 14751 switch (reg_type) { 14752 case BNXT_FW_HEALTH_REG_TYPE_CFG: 14753 pci_write_config_dword(bp->pdev, reg_off, val); 14754 break; 14755 case BNXT_FW_HEALTH_REG_TYPE_GRC: 14756 writel(reg_off & BNXT_GRC_BASE_MASK, 14757 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 14758 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 14759 fallthrough; 14760 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 14761 writel(val, bp->bar0 + reg_off); 14762 break; 14763 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 14764 writel(val, bp->bar1 + reg_off); 14765 break; 14766 } 14767 if (delay_msecs) { 14768 pci_read_config_dword(bp->pdev, 0, &val); 14769 msleep(delay_msecs); 14770 } 14771 } 14772 14773 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 14774 { 14775 struct hwrm_func_qcfg_output *resp; 14776 struct hwrm_func_qcfg_input *req; 14777 bool result = true; /* firmware will enforce if unknown */ 14778 14779 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 14780 return result; 14781 14782 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 14783 return result; 14784 14785 req->fid = cpu_to_le16(0xffff); 14786 resp = hwrm_req_hold(bp, req); 14787 if (!hwrm_req_send(bp, req)) 14788 result = !!(le16_to_cpu(resp->flags) & 14789 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 14790 hwrm_req_drop(bp, req); 14791 return result; 14792 } 14793 14794 static void bnxt_reset_all(struct bnxt *bp) 14795 { 14796 struct bnxt_fw_health *fw_health = bp->fw_health; 14797 int i, rc; 14798 14799 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14800 bnxt_fw_reset_via_optee(bp); 14801 bp->fw_reset_timestamp = jiffies; 14802 return; 14803 } 14804 14805 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 14806 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 14807 bnxt_fw_reset_writel(bp, i); 14808 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 14809 struct hwrm_fw_reset_input *req; 14810 14811 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 14812 if (!rc) { 14813 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 14814 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 14815 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 14816 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 14817 rc = hwrm_req_send(bp, req); 14818 } 14819 if (rc != -ENODEV) 14820 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 14821 } 14822 bp->fw_reset_timestamp = jiffies; 14823 } 14824 14825 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 14826 { 14827 return time_after(jiffies, bp->fw_reset_timestamp + 14828 (bp->fw_reset_max_dsecs * HZ / 10)); 14829 } 14830 14831 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 14832 { 14833 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14834 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 14835 bnxt_dl_health_fw_status_update(bp, false); 14836 bp->fw_reset_state = 0; 14837 netif_close(bp->dev); 14838 } 14839 14840 static void bnxt_fw_reset_task(struct work_struct *work) 14841 { 14842 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 14843 int rc = 0; 14844 14845 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 14846 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 14847 return; 14848 } 14849 14850 switch (bp->fw_reset_state) { 14851 case BNXT_FW_RESET_STATE_POLL_VF: { 14852 int n = bnxt_get_registered_vfs(bp); 14853 int tmo; 14854 14855 if (n < 0) { 14856 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 14857 n, jiffies_to_msecs(jiffies - 14858 bp->fw_reset_timestamp)); 14859 goto fw_reset_abort; 14860 } else if (n > 0) { 14861 if (bnxt_fw_reset_timeout(bp)) { 14862 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14863 bp->fw_reset_state = 0; 14864 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 14865 n); 14866 goto ulp_start; 14867 } 14868 bnxt_queue_fw_reset_work(bp, HZ / 10); 14869 return; 14870 } 14871 bp->fw_reset_timestamp = jiffies; 14872 netdev_lock(bp->dev); 14873 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 14874 bnxt_fw_reset_abort(bp, rc); 14875 netdev_unlock(bp->dev); 14876 goto ulp_start; 14877 } 14878 bnxt_fw_reset_close(bp); 14879 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14880 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 14881 tmo = HZ / 10; 14882 } else { 14883 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14884 tmo = bp->fw_reset_min_dsecs * HZ / 10; 14885 } 14886 netdev_unlock(bp->dev); 14887 bnxt_queue_fw_reset_work(bp, tmo); 14888 return; 14889 } 14890 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 14891 u32 val; 14892 14893 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 14894 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 14895 !bnxt_fw_reset_timeout(bp)) { 14896 bnxt_queue_fw_reset_work(bp, HZ / 5); 14897 return; 14898 } 14899 14900 if (!bp->fw_health->primary) { 14901 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 14902 14903 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14904 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 14905 return; 14906 } 14907 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 14908 } 14909 fallthrough; 14910 case BNXT_FW_RESET_STATE_RESET_FW: 14911 bnxt_reset_all(bp); 14912 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14913 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 14914 return; 14915 case BNXT_FW_RESET_STATE_ENABLE_DEV: 14916 bnxt_inv_fw_health_reg(bp); 14917 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 14918 !bp->fw_reset_min_dsecs) { 14919 u16 val; 14920 14921 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 14922 if (val == 0xffff) { 14923 if (bnxt_fw_reset_timeout(bp)) { 14924 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 14925 rc = -ETIMEDOUT; 14926 goto fw_reset_abort; 14927 } 14928 bnxt_queue_fw_reset_work(bp, HZ / 1000); 14929 return; 14930 } 14931 } 14932 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 14933 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 14934 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 14935 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 14936 bnxt_dl_remote_reload(bp); 14937 if (pci_enable_device(bp->pdev)) { 14938 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 14939 rc = -ENODEV; 14940 goto fw_reset_abort; 14941 } 14942 pci_set_master(bp->pdev); 14943 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 14944 fallthrough; 14945 case BNXT_FW_RESET_STATE_POLL_FW: 14946 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 14947 rc = bnxt_hwrm_poll(bp); 14948 if (rc) { 14949 if (bnxt_fw_reset_timeout(bp)) { 14950 netdev_err(bp->dev, "Firmware reset aborted\n"); 14951 goto fw_reset_abort_status; 14952 } 14953 bnxt_queue_fw_reset_work(bp, HZ / 5); 14954 return; 14955 } 14956 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 14957 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 14958 fallthrough; 14959 case BNXT_FW_RESET_STATE_OPENING: 14960 while (!netdev_trylock(bp->dev)) { 14961 bnxt_queue_fw_reset_work(bp, HZ / 10); 14962 return; 14963 } 14964 rc = bnxt_open(bp->dev); 14965 if (rc) { 14966 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 14967 bnxt_fw_reset_abort(bp, rc); 14968 netdev_unlock(bp->dev); 14969 goto ulp_start; 14970 } 14971 14972 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 14973 bp->fw_health->enabled) { 14974 bp->fw_health->last_fw_reset_cnt = 14975 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14976 } 14977 bp->fw_reset_state = 0; 14978 /* Make sure fw_reset_state is 0 before clearing the flag */ 14979 smp_mb__before_atomic(); 14980 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14981 bnxt_ptp_reapply_pps(bp); 14982 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 14983 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 14984 bnxt_dl_health_fw_recovery_done(bp); 14985 bnxt_dl_health_fw_status_update(bp, true); 14986 } 14987 netdev_unlock(bp->dev); 14988 bnxt_ulp_start(bp, 0); 14989 bnxt_reenable_sriov(bp); 14990 netdev_lock(bp->dev); 14991 bnxt_vf_reps_alloc(bp); 14992 bnxt_vf_reps_open(bp); 14993 netdev_unlock(bp->dev); 14994 break; 14995 } 14996 return; 14997 14998 fw_reset_abort_status: 14999 if (bp->fw_health->status_reliable || 15000 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 15001 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 15002 15003 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 15004 } 15005 fw_reset_abort: 15006 netdev_lock(bp->dev); 15007 bnxt_fw_reset_abort(bp, rc); 15008 netdev_unlock(bp->dev); 15009 ulp_start: 15010 bnxt_ulp_start(bp, rc); 15011 } 15012 15013 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 15014 { 15015 int rc; 15016 struct bnxt *bp = netdev_priv(dev); 15017 15018 SET_NETDEV_DEV(dev, &pdev->dev); 15019 15020 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 15021 rc = pci_enable_device(pdev); 15022 if (rc) { 15023 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 15024 goto init_err; 15025 } 15026 15027 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 15028 dev_err(&pdev->dev, 15029 "Cannot find PCI device base address, aborting\n"); 15030 rc = -ENODEV; 15031 goto init_err_disable; 15032 } 15033 15034 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 15035 if (rc) { 15036 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 15037 goto init_err_disable; 15038 } 15039 15040 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 15041 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 15042 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 15043 rc = -EIO; 15044 goto init_err_release; 15045 } 15046 15047 pci_set_master(pdev); 15048 15049 bp->dev = dev; 15050 bp->pdev = pdev; 15051 15052 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 15053 * determines the BAR size. 15054 */ 15055 bp->bar0 = pci_ioremap_bar(pdev, 0); 15056 if (!bp->bar0) { 15057 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 15058 rc = -ENOMEM; 15059 goto init_err_release; 15060 } 15061 15062 bp->bar2 = pci_ioremap_bar(pdev, 4); 15063 if (!bp->bar2) { 15064 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 15065 rc = -ENOMEM; 15066 goto init_err_release; 15067 } 15068 15069 INIT_WORK(&bp->sp_task, bnxt_sp_task); 15070 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 15071 15072 spin_lock_init(&bp->ntp_fltr_lock); 15073 #if BITS_PER_LONG == 32 15074 spin_lock_init(&bp->db_lock); 15075 #endif 15076 15077 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 15078 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 15079 15080 timer_setup(&bp->timer, bnxt_timer, 0); 15081 bp->current_interval = BNXT_TIMER_INTERVAL; 15082 15083 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 15084 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 15085 15086 clear_bit(BNXT_STATE_OPEN, &bp->state); 15087 return 0; 15088 15089 init_err_release: 15090 bnxt_unmap_bars(bp, pdev); 15091 pci_release_regions(pdev); 15092 15093 init_err_disable: 15094 pci_disable_device(pdev); 15095 15096 init_err: 15097 return rc; 15098 } 15099 15100 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 15101 { 15102 struct sockaddr *addr = p; 15103 struct bnxt *bp = netdev_priv(dev); 15104 int rc = 0; 15105 15106 netdev_assert_locked(dev); 15107 15108 if (!is_valid_ether_addr(addr->sa_data)) 15109 return -EADDRNOTAVAIL; 15110 15111 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 15112 return 0; 15113 15114 rc = bnxt_approve_mac(bp, addr->sa_data, true); 15115 if (rc) 15116 return rc; 15117 15118 eth_hw_addr_set(dev, addr->sa_data); 15119 bnxt_clear_usr_fltrs(bp, true); 15120 if (netif_running(dev)) { 15121 bnxt_close_nic(bp, false, false); 15122 rc = bnxt_open_nic(bp, false, false); 15123 } 15124 15125 return rc; 15126 } 15127 15128 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 15129 { 15130 struct bnxt *bp = netdev_priv(dev); 15131 15132 netdev_assert_locked(dev); 15133 15134 if (netif_running(dev)) 15135 bnxt_close_nic(bp, true, false); 15136 15137 WRITE_ONCE(dev->mtu, new_mtu); 15138 15139 /* MTU change may change the AGG ring settings if an XDP multi-buffer 15140 * program is attached. We need to set the AGG rings settings and 15141 * rx_skb_func accordingly. 15142 */ 15143 if (READ_ONCE(bp->xdp_prog)) 15144 bnxt_set_rx_skb_mode(bp, true); 15145 15146 bnxt_set_ring_params(bp); 15147 15148 if (netif_running(dev)) 15149 return bnxt_open_nic(bp, true, false); 15150 15151 return 0; 15152 } 15153 15154 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 15155 { 15156 struct bnxt *bp = netdev_priv(dev); 15157 bool sh = false; 15158 int rc, tx_cp; 15159 15160 if (tc > bp->max_tc) { 15161 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 15162 tc, bp->max_tc); 15163 return -EINVAL; 15164 } 15165 15166 if (bp->num_tc == tc) 15167 return 0; 15168 15169 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 15170 sh = true; 15171 15172 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 15173 sh, tc, bp->tx_nr_rings_xdp); 15174 if (rc) 15175 return rc; 15176 15177 /* Needs to close the device and do hw resource re-allocations */ 15178 if (netif_running(bp->dev)) 15179 bnxt_close_nic(bp, true, false); 15180 15181 if (tc) { 15182 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 15183 netdev_set_num_tc(dev, tc); 15184 bp->num_tc = tc; 15185 } else { 15186 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 15187 netdev_reset_tc(dev); 15188 bp->num_tc = 0; 15189 } 15190 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 15191 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 15192 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 15193 tx_cp + bp->rx_nr_rings; 15194 15195 if (netif_running(bp->dev)) 15196 return bnxt_open_nic(bp, true, false); 15197 15198 return 0; 15199 } 15200 15201 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 15202 void *cb_priv) 15203 { 15204 struct bnxt *bp = cb_priv; 15205 15206 if (!bnxt_tc_flower_enabled(bp) || 15207 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 15208 return -EOPNOTSUPP; 15209 15210 switch (type) { 15211 case TC_SETUP_CLSFLOWER: 15212 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 15213 default: 15214 return -EOPNOTSUPP; 15215 } 15216 } 15217 15218 LIST_HEAD(bnxt_block_cb_list); 15219 15220 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 15221 void *type_data) 15222 { 15223 struct bnxt *bp = netdev_priv(dev); 15224 15225 switch (type) { 15226 case TC_SETUP_BLOCK: 15227 return flow_block_cb_setup_simple(type_data, 15228 &bnxt_block_cb_list, 15229 bnxt_setup_tc_block_cb, 15230 bp, bp, true); 15231 case TC_SETUP_QDISC_MQPRIO: { 15232 struct tc_mqprio_qopt *mqprio = type_data; 15233 15234 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 15235 15236 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 15237 } 15238 default: 15239 return -EOPNOTSUPP; 15240 } 15241 } 15242 15243 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, 15244 const struct sk_buff *skb) 15245 { 15246 struct bnxt_vnic_info *vnic; 15247 15248 if (skb) 15249 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 15250 15251 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 15252 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); 15253 } 15254 15255 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, 15256 u32 idx) 15257 { 15258 struct hlist_head *head; 15259 int bit_id; 15260 15261 spin_lock_bh(&bp->ntp_fltr_lock); 15262 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); 15263 if (bit_id < 0) { 15264 spin_unlock_bh(&bp->ntp_fltr_lock); 15265 return -ENOMEM; 15266 } 15267 15268 fltr->base.sw_id = (u16)bit_id; 15269 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; 15270 fltr->base.flags |= BNXT_ACT_RING_DST; 15271 head = &bp->ntp_fltr_hash_tbl[idx]; 15272 hlist_add_head_rcu(&fltr->base.hash, head); 15273 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 15274 bnxt_insert_usr_fltr(bp, &fltr->base); 15275 bp->ntp_fltr_count++; 15276 spin_unlock_bh(&bp->ntp_fltr_lock); 15277 return 0; 15278 } 15279 15280 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 15281 struct bnxt_ntuple_filter *f2) 15282 { 15283 struct bnxt_flow_masks *masks1 = &f1->fmasks; 15284 struct bnxt_flow_masks *masks2 = &f2->fmasks; 15285 struct flow_keys *keys1 = &f1->fkeys; 15286 struct flow_keys *keys2 = &f2->fkeys; 15287 15288 if (keys1->basic.n_proto != keys2->basic.n_proto || 15289 keys1->basic.ip_proto != keys2->basic.ip_proto) 15290 return false; 15291 15292 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 15293 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 15294 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || 15295 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || 15296 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) 15297 return false; 15298 } else { 15299 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src, 15300 &keys2->addrs.v6addrs.src) || 15301 !ipv6_addr_equal(&masks1->addrs.v6addrs.src, 15302 &masks2->addrs.v6addrs.src) || 15303 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst, 15304 &keys2->addrs.v6addrs.dst) || 15305 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst, 15306 &masks2->addrs.v6addrs.dst)) 15307 return false; 15308 } 15309 15310 return keys1->ports.src == keys2->ports.src && 15311 masks1->ports.src == masks2->ports.src && 15312 keys1->ports.dst == keys2->ports.dst && 15313 masks1->ports.dst == masks2->ports.dst && 15314 keys1->control.flags == keys2->control.flags && 15315 f1->l2_fltr == f2->l2_fltr; 15316 } 15317 15318 struct bnxt_ntuple_filter * 15319 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, 15320 struct bnxt_ntuple_filter *fltr, u32 idx) 15321 { 15322 struct bnxt_ntuple_filter *f; 15323 struct hlist_head *head; 15324 15325 head = &bp->ntp_fltr_hash_tbl[idx]; 15326 hlist_for_each_entry_rcu(f, head, base.hash) { 15327 if (bnxt_fltr_match(f, fltr)) 15328 return f; 15329 } 15330 return NULL; 15331 } 15332 15333 #ifdef CONFIG_RFS_ACCEL 15334 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 15335 u16 rxq_index, u32 flow_id) 15336 { 15337 struct bnxt *bp = netdev_priv(dev); 15338 struct bnxt_ntuple_filter *fltr, *new_fltr; 15339 struct flow_keys *fkeys; 15340 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 15341 struct bnxt_l2_filter *l2_fltr; 15342 int rc = 0, idx; 15343 u32 flags; 15344 15345 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { 15346 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 15347 atomic_inc(&l2_fltr->refcnt); 15348 } else { 15349 struct bnxt_l2_key key; 15350 15351 ether_addr_copy(key.dst_mac_addr, eth->h_dest); 15352 key.vlan = 0; 15353 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); 15354 if (!l2_fltr) 15355 return -EINVAL; 15356 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { 15357 bnxt_del_l2_filter(bp, l2_fltr); 15358 return -EINVAL; 15359 } 15360 } 15361 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 15362 if (!new_fltr) { 15363 bnxt_del_l2_filter(bp, l2_fltr); 15364 return -ENOMEM; 15365 } 15366 15367 fkeys = &new_fltr->fkeys; 15368 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 15369 rc = -EPROTONOSUPPORT; 15370 goto err_free; 15371 } 15372 15373 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 15374 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 15375 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 15376 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 15377 rc = -EPROTONOSUPPORT; 15378 goto err_free; 15379 } 15380 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; 15381 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 15382 if (bp->hwrm_spec_code < 0x10601) { 15383 rc = -EPROTONOSUPPORT; 15384 goto err_free; 15385 } 15386 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; 15387 } 15388 flags = fkeys->control.flags; 15389 if (((flags & FLOW_DIS_ENCAPSULATION) && 15390 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 15391 rc = -EPROTONOSUPPORT; 15392 goto err_free; 15393 } 15394 new_fltr->l2_fltr = l2_fltr; 15395 15396 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); 15397 rcu_read_lock(); 15398 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 15399 if (fltr) { 15400 rc = fltr->base.sw_id; 15401 rcu_read_unlock(); 15402 goto err_free; 15403 } 15404 rcu_read_unlock(); 15405 15406 new_fltr->flow_id = flow_id; 15407 new_fltr->base.rxq = rxq_index; 15408 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 15409 if (!rc) { 15410 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 15411 return new_fltr->base.sw_id; 15412 } 15413 15414 err_free: 15415 bnxt_del_l2_filter(bp, l2_fltr); 15416 kfree(new_fltr); 15417 return rc; 15418 } 15419 #endif 15420 15421 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) 15422 { 15423 spin_lock_bh(&bp->ntp_fltr_lock); 15424 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 15425 spin_unlock_bh(&bp->ntp_fltr_lock); 15426 return; 15427 } 15428 hlist_del_rcu(&fltr->base.hash); 15429 bnxt_del_one_usr_fltr(bp, &fltr->base); 15430 bp->ntp_fltr_count--; 15431 spin_unlock_bh(&bp->ntp_fltr_lock); 15432 bnxt_del_l2_filter(bp, fltr->l2_fltr); 15433 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 15434 kfree_rcu(fltr, base.rcu); 15435 } 15436 15437 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 15438 { 15439 #ifdef CONFIG_RFS_ACCEL 15440 int i; 15441 15442 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 15443 struct hlist_head *head; 15444 struct hlist_node *tmp; 15445 struct bnxt_ntuple_filter *fltr; 15446 int rc; 15447 15448 head = &bp->ntp_fltr_hash_tbl[i]; 15449 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 15450 bool del = false; 15451 15452 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { 15453 if (fltr->base.flags & BNXT_ACT_NO_AGING) 15454 continue; 15455 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, 15456 fltr->flow_id, 15457 fltr->base.sw_id)) { 15458 bnxt_hwrm_cfa_ntuple_filter_free(bp, 15459 fltr); 15460 del = true; 15461 } 15462 } else { 15463 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 15464 fltr); 15465 if (rc) 15466 del = true; 15467 else 15468 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 15469 } 15470 15471 if (del) 15472 bnxt_del_ntp_filter(bp, fltr); 15473 } 15474 } 15475 #endif 15476 } 15477 15478 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 15479 unsigned int entry, struct udp_tunnel_info *ti) 15480 { 15481 struct bnxt *bp = netdev_priv(netdev); 15482 unsigned int cmd; 15483 15484 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 15485 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 15486 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 15487 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; 15488 else 15489 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; 15490 15491 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 15492 } 15493 15494 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 15495 unsigned int entry, struct udp_tunnel_info *ti) 15496 { 15497 struct bnxt *bp = netdev_priv(netdev); 15498 unsigned int cmd; 15499 15500 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 15501 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 15502 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 15503 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 15504 else 15505 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; 15506 15507 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 15508 } 15509 15510 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 15511 .set_port = bnxt_udp_tunnel_set_port, 15512 .unset_port = bnxt_udp_tunnel_unset_port, 15513 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 15514 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 15515 .tables = { 15516 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 15517 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 15518 }, 15519 }, bnxt_udp_tunnels_p7 = { 15520 .set_port = bnxt_udp_tunnel_set_port, 15521 .unset_port = bnxt_udp_tunnel_unset_port, 15522 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 15523 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 15524 .tables = { 15525 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 15526 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 15527 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, 15528 }, 15529 }; 15530 15531 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 15532 struct net_device *dev, u32 filter_mask, 15533 int nlflags) 15534 { 15535 struct bnxt *bp = netdev_priv(dev); 15536 15537 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 15538 nlflags, filter_mask, NULL); 15539 } 15540 15541 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 15542 u16 flags, struct netlink_ext_ack *extack) 15543 { 15544 struct bnxt *bp = netdev_priv(dev); 15545 struct nlattr *attr, *br_spec; 15546 int rem, rc = 0; 15547 15548 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 15549 return -EOPNOTSUPP; 15550 15551 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 15552 if (!br_spec) 15553 return -EINVAL; 15554 15555 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { 15556 u16 mode; 15557 15558 mode = nla_get_u16(attr); 15559 if (mode == bp->br_mode) 15560 break; 15561 15562 rc = bnxt_hwrm_set_br_mode(bp, mode); 15563 if (!rc) 15564 bp->br_mode = mode; 15565 break; 15566 } 15567 return rc; 15568 } 15569 15570 int bnxt_get_port_parent_id(struct net_device *dev, 15571 struct netdev_phys_item_id *ppid) 15572 { 15573 struct bnxt *bp = netdev_priv(dev); 15574 15575 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 15576 return -EOPNOTSUPP; 15577 15578 /* The PF and it's VF-reps only support the switchdev framework */ 15579 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 15580 return -EOPNOTSUPP; 15581 15582 ppid->id_len = sizeof(bp->dsn); 15583 memcpy(ppid->id, bp->dsn, ppid->id_len); 15584 15585 return 0; 15586 } 15587 15588 static const struct net_device_ops bnxt_netdev_ops = { 15589 .ndo_open = bnxt_open, 15590 .ndo_start_xmit = bnxt_start_xmit, 15591 .ndo_stop = bnxt_close, 15592 .ndo_get_stats64 = bnxt_get_stats64, 15593 .ndo_set_rx_mode = bnxt_set_rx_mode, 15594 .ndo_eth_ioctl = bnxt_ioctl, 15595 .ndo_validate_addr = eth_validate_addr, 15596 .ndo_set_mac_address = bnxt_change_mac_addr, 15597 .ndo_change_mtu = bnxt_change_mtu, 15598 .ndo_fix_features = bnxt_fix_features, 15599 .ndo_set_features = bnxt_set_features, 15600 .ndo_features_check = bnxt_features_check, 15601 .ndo_tx_timeout = bnxt_tx_timeout, 15602 #ifdef CONFIG_BNXT_SRIOV 15603 .ndo_get_vf_config = bnxt_get_vf_config, 15604 .ndo_set_vf_mac = bnxt_set_vf_mac, 15605 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 15606 .ndo_set_vf_rate = bnxt_set_vf_bw, 15607 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 15608 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 15609 .ndo_set_vf_trust = bnxt_set_vf_trust, 15610 #endif 15611 .ndo_setup_tc = bnxt_setup_tc, 15612 #ifdef CONFIG_RFS_ACCEL 15613 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 15614 #endif 15615 .ndo_bpf = bnxt_xdp, 15616 .ndo_xdp_xmit = bnxt_xdp_xmit, 15617 .ndo_bridge_getlink = bnxt_bridge_getlink, 15618 .ndo_bridge_setlink = bnxt_bridge_setlink, 15619 }; 15620 15621 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, 15622 struct netdev_queue_stats_rx *stats) 15623 { 15624 struct bnxt *bp = netdev_priv(dev); 15625 struct bnxt_cp_ring_info *cpr; 15626 u64 *sw; 15627 15628 if (!bp->bnapi) 15629 return; 15630 15631 cpr = &bp->bnapi[i]->cp_ring; 15632 sw = cpr->stats.sw_stats; 15633 15634 stats->packets = 0; 15635 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 15636 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 15637 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 15638 15639 stats->bytes = 0; 15640 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 15641 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 15642 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 15643 15644 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards; 15645 } 15646 15647 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, 15648 struct netdev_queue_stats_tx *stats) 15649 { 15650 struct bnxt *bp = netdev_priv(dev); 15651 struct bnxt_napi *bnapi; 15652 u64 *sw; 15653 15654 if (!bp->tx_ring) 15655 return; 15656 15657 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; 15658 sw = bnapi->cp_ring.stats.sw_stats; 15659 15660 stats->packets = 0; 15661 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 15662 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 15663 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 15664 15665 stats->bytes = 0; 15666 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 15667 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 15668 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 15669 } 15670 15671 static void bnxt_get_base_stats(struct net_device *dev, 15672 struct netdev_queue_stats_rx *rx, 15673 struct netdev_queue_stats_tx *tx) 15674 { 15675 struct bnxt *bp = netdev_priv(dev); 15676 15677 rx->packets = bp->net_stats_prev.rx_packets; 15678 rx->bytes = bp->net_stats_prev.rx_bytes; 15679 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; 15680 15681 tx->packets = bp->net_stats_prev.tx_packets; 15682 tx->bytes = bp->net_stats_prev.tx_bytes; 15683 } 15684 15685 static const struct netdev_stat_ops bnxt_stat_ops = { 15686 .get_queue_stats_rx = bnxt_get_queue_stats_rx, 15687 .get_queue_stats_tx = bnxt_get_queue_stats_tx, 15688 .get_base_stats = bnxt_get_base_stats, 15689 }; 15690 15691 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) 15692 { 15693 struct bnxt_rx_ring_info *rxr, *clone; 15694 struct bnxt *bp = netdev_priv(dev); 15695 struct bnxt_ring_struct *ring; 15696 int rc; 15697 15698 if (!bp->rx_ring) 15699 return -ENETDOWN; 15700 15701 rxr = &bp->rx_ring[idx]; 15702 clone = qmem; 15703 memcpy(clone, rxr, sizeof(*rxr)); 15704 bnxt_init_rx_ring_struct(bp, clone); 15705 bnxt_reset_rx_ring_struct(bp, clone); 15706 15707 clone->rx_prod = 0; 15708 clone->rx_agg_prod = 0; 15709 clone->rx_sw_agg_prod = 0; 15710 clone->rx_next_cons = 0; 15711 15712 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); 15713 if (rc) 15714 return rc; 15715 15716 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0); 15717 if (rc < 0) 15718 goto err_page_pool_destroy; 15719 15720 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq, 15721 MEM_TYPE_PAGE_POOL, 15722 clone->page_pool); 15723 if (rc) 15724 goto err_rxq_info_unreg; 15725 15726 ring = &clone->rx_ring_struct; 15727 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15728 if (rc) 15729 goto err_free_rx_ring; 15730 15731 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 15732 ring = &clone->rx_agg_ring_struct; 15733 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15734 if (rc) 15735 goto err_free_rx_agg_ring; 15736 15737 rc = bnxt_alloc_rx_agg_bmap(bp, clone); 15738 if (rc) 15739 goto err_free_rx_agg_ring; 15740 } 15741 15742 if (bp->flags & BNXT_FLAG_TPA) { 15743 rc = bnxt_alloc_one_tpa_info(bp, clone); 15744 if (rc) 15745 goto err_free_tpa_info; 15746 } 15747 15748 bnxt_init_one_rx_ring_rxbd(bp, clone); 15749 bnxt_init_one_rx_agg_ring_rxbd(bp, clone); 15750 15751 bnxt_alloc_one_rx_ring_skb(bp, clone, idx); 15752 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15753 bnxt_alloc_one_rx_ring_page(bp, clone, idx); 15754 if (bp->flags & BNXT_FLAG_TPA) 15755 bnxt_alloc_one_tpa_info_data(bp, clone); 15756 15757 return 0; 15758 15759 err_free_tpa_info: 15760 bnxt_free_one_tpa_info(bp, clone); 15761 err_free_rx_agg_ring: 15762 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); 15763 err_free_rx_ring: 15764 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); 15765 err_rxq_info_unreg: 15766 xdp_rxq_info_unreg(&clone->xdp_rxq); 15767 err_page_pool_destroy: 15768 page_pool_destroy(clone->page_pool); 15769 if (bnxt_separate_head_pool()) 15770 page_pool_destroy(clone->head_pool); 15771 clone->page_pool = NULL; 15772 clone->head_pool = NULL; 15773 return rc; 15774 } 15775 15776 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) 15777 { 15778 struct bnxt_rx_ring_info *rxr = qmem; 15779 struct bnxt *bp = netdev_priv(dev); 15780 struct bnxt_ring_struct *ring; 15781 15782 bnxt_free_one_rx_ring_skbs(bp, rxr); 15783 bnxt_free_one_tpa_info(bp, rxr); 15784 15785 xdp_rxq_info_unreg(&rxr->xdp_rxq); 15786 15787 page_pool_destroy(rxr->page_pool); 15788 if (bnxt_separate_head_pool()) 15789 page_pool_destroy(rxr->head_pool); 15790 rxr->page_pool = NULL; 15791 rxr->head_pool = NULL; 15792 15793 ring = &rxr->rx_ring_struct; 15794 bnxt_free_ring(bp, &ring->ring_mem); 15795 15796 ring = &rxr->rx_agg_ring_struct; 15797 bnxt_free_ring(bp, &ring->ring_mem); 15798 15799 kfree(rxr->rx_agg_bmap); 15800 rxr->rx_agg_bmap = NULL; 15801 } 15802 15803 static void bnxt_copy_rx_ring(struct bnxt *bp, 15804 struct bnxt_rx_ring_info *dst, 15805 struct bnxt_rx_ring_info *src) 15806 { 15807 struct bnxt_ring_mem_info *dst_rmem, *src_rmem; 15808 struct bnxt_ring_struct *dst_ring, *src_ring; 15809 int i; 15810 15811 dst_ring = &dst->rx_ring_struct; 15812 dst_rmem = &dst_ring->ring_mem; 15813 src_ring = &src->rx_ring_struct; 15814 src_rmem = &src_ring->ring_mem; 15815 15816 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15817 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15818 WARN_ON(dst_rmem->flags != src_rmem->flags); 15819 WARN_ON(dst_rmem->depth != src_rmem->depth); 15820 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15821 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15822 15823 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15824 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15825 *dst_rmem->vmem = *src_rmem->vmem; 15826 for (i = 0; i < dst_rmem->nr_pages; i++) { 15827 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15828 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15829 } 15830 15831 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 15832 return; 15833 15834 dst_ring = &dst->rx_agg_ring_struct; 15835 dst_rmem = &dst_ring->ring_mem; 15836 src_ring = &src->rx_agg_ring_struct; 15837 src_rmem = &src_ring->ring_mem; 15838 15839 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15840 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15841 WARN_ON(dst_rmem->flags != src_rmem->flags); 15842 WARN_ON(dst_rmem->depth != src_rmem->depth); 15843 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15844 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15845 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size); 15846 15847 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15848 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15849 *dst_rmem->vmem = *src_rmem->vmem; 15850 for (i = 0; i < dst_rmem->nr_pages; i++) { 15851 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15852 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15853 } 15854 15855 dst->rx_agg_bmap = src->rx_agg_bmap; 15856 } 15857 15858 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) 15859 { 15860 struct bnxt *bp = netdev_priv(dev); 15861 struct bnxt_rx_ring_info *rxr, *clone; 15862 struct bnxt_cp_ring_info *cpr; 15863 struct bnxt_vnic_info *vnic; 15864 struct bnxt_napi *bnapi; 15865 int i, rc; 15866 15867 rxr = &bp->rx_ring[idx]; 15868 clone = qmem; 15869 15870 rxr->rx_prod = clone->rx_prod; 15871 rxr->rx_agg_prod = clone->rx_agg_prod; 15872 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; 15873 rxr->rx_next_cons = clone->rx_next_cons; 15874 rxr->rx_tpa = clone->rx_tpa; 15875 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map; 15876 rxr->page_pool = clone->page_pool; 15877 rxr->head_pool = clone->head_pool; 15878 rxr->xdp_rxq = clone->xdp_rxq; 15879 15880 bnxt_copy_rx_ring(bp, rxr, clone); 15881 15882 bnapi = rxr->bnapi; 15883 cpr = &bnapi->cp_ring; 15884 15885 /* All rings have been reserved and previously allocated. 15886 * Reallocating with the same parameters should never fail. 15887 */ 15888 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 15889 if (rc) 15890 goto err_reset; 15891 15892 if (bp->tph_mode) { 15893 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); 15894 if (rc) 15895 goto err_reset; 15896 } 15897 15898 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); 15899 if (rc) 15900 goto err_reset; 15901 15902 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 15903 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15904 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 15905 15906 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 15907 rc = bnxt_tx_queue_start(bp, idx); 15908 if (rc) 15909 goto err_reset; 15910 } 15911 15912 napi_enable_locked(&bnapi->napi); 15913 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 15914 15915 for (i = 0; i < bp->nr_vnics; i++) { 15916 vnic = &bp->vnic_info[i]; 15917 15918 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 15919 if (rc) { 15920 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 15921 vnic->vnic_id, rc); 15922 return rc; 15923 } 15924 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 15925 bnxt_hwrm_vnic_update(bp, vnic, 15926 VNIC_UPDATE_REQ_ENABLES_MRU_VALID); 15927 } 15928 15929 return 0; 15930 15931 err_reset: 15932 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n", 15933 rc); 15934 napi_enable_locked(&bnapi->napi); 15935 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 15936 bnxt_reset_task(bp, true); 15937 return rc; 15938 } 15939 15940 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) 15941 { 15942 struct bnxt *bp = netdev_priv(dev); 15943 struct bnxt_rx_ring_info *rxr; 15944 struct bnxt_cp_ring_info *cpr; 15945 struct bnxt_vnic_info *vnic; 15946 struct bnxt_napi *bnapi; 15947 int i; 15948 15949 for (i = 0; i < bp->nr_vnics; i++) { 15950 vnic = &bp->vnic_info[i]; 15951 vnic->mru = 0; 15952 bnxt_hwrm_vnic_update(bp, vnic, 15953 VNIC_UPDATE_REQ_ENABLES_MRU_VALID); 15954 } 15955 /* Make sure NAPI sees that the VNIC is disabled */ 15956 synchronize_net(); 15957 rxr = &bp->rx_ring[idx]; 15958 bnapi = rxr->bnapi; 15959 cpr = &bnapi->cp_ring; 15960 cancel_work_sync(&cpr->dim.work); 15961 bnxt_hwrm_rx_ring_free(bp, rxr, false); 15962 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); 15963 page_pool_disable_direct_recycling(rxr->page_pool); 15964 if (bnxt_separate_head_pool()) 15965 page_pool_disable_direct_recycling(rxr->head_pool); 15966 15967 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 15968 bnxt_tx_queue_stop(bp, idx); 15969 15970 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE 15971 * completion is handled in NAPI to guarantee no more DMA on that ring 15972 * after seeing the completion. 15973 */ 15974 napi_disable_locked(&bnapi->napi); 15975 15976 if (bp->tph_mode) { 15977 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); 15978 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr); 15979 } 15980 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 15981 15982 memcpy(qmem, rxr, sizeof(*rxr)); 15983 bnxt_init_rx_ring_struct(bp, qmem); 15984 15985 return 0; 15986 } 15987 15988 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = { 15989 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info), 15990 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc, 15991 .ndo_queue_mem_free = bnxt_queue_mem_free, 15992 .ndo_queue_start = bnxt_queue_start, 15993 .ndo_queue_stop = bnxt_queue_stop, 15994 }; 15995 15996 static void bnxt_remove_one(struct pci_dev *pdev) 15997 { 15998 struct net_device *dev = pci_get_drvdata(pdev); 15999 struct bnxt *bp = netdev_priv(dev); 16000 16001 if (BNXT_PF(bp)) 16002 bnxt_sriov_disable(bp); 16003 16004 bnxt_rdma_aux_device_del(bp); 16005 16006 bnxt_ptp_clear(bp); 16007 unregister_netdev(dev); 16008 16009 bnxt_rdma_aux_device_uninit(bp); 16010 16011 bnxt_free_l2_filters(bp, true); 16012 bnxt_free_ntp_fltrs(bp, true); 16013 WARN_ON(bp->num_rss_ctx); 16014 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 16015 /* Flush any pending tasks */ 16016 cancel_work_sync(&bp->sp_task); 16017 cancel_delayed_work_sync(&bp->fw_reset_task); 16018 bp->sp_event = 0; 16019 16020 bnxt_dl_fw_reporters_destroy(bp); 16021 bnxt_dl_unregister(bp); 16022 bnxt_shutdown_tc(bp); 16023 16024 bnxt_clear_int_mode(bp); 16025 bnxt_hwrm_func_drv_unrgtr(bp); 16026 bnxt_free_hwrm_resources(bp); 16027 bnxt_hwmon_uninit(bp); 16028 bnxt_ethtool_free(bp); 16029 bnxt_dcb_free(bp); 16030 kfree(bp->ptp_cfg); 16031 bp->ptp_cfg = NULL; 16032 kfree(bp->fw_health); 16033 bp->fw_health = NULL; 16034 bnxt_cleanup_pci(bp); 16035 bnxt_free_ctx_mem(bp, true); 16036 bnxt_free_crash_dump_mem(bp); 16037 kfree(bp->rss_indir_tbl); 16038 bp->rss_indir_tbl = NULL; 16039 bnxt_free_port_stats(bp); 16040 free_netdev(dev); 16041 } 16042 16043 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 16044 { 16045 int rc = 0; 16046 struct bnxt_link_info *link_info = &bp->link_info; 16047 16048 bp->phy_flags = 0; 16049 rc = bnxt_hwrm_phy_qcaps(bp); 16050 if (rc) { 16051 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 16052 rc); 16053 return rc; 16054 } 16055 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 16056 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 16057 else 16058 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 16059 16060 bp->mac_flags = 0; 16061 bnxt_hwrm_mac_qcaps(bp); 16062 16063 if (!fw_dflt) 16064 return 0; 16065 16066 mutex_lock(&bp->link_lock); 16067 rc = bnxt_update_link(bp, false); 16068 if (rc) { 16069 mutex_unlock(&bp->link_lock); 16070 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 16071 rc); 16072 return rc; 16073 } 16074 16075 /* Older firmware does not have supported_auto_speeds, so assume 16076 * that all supported speeds can be autonegotiated. 16077 */ 16078 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 16079 link_info->support_auto_speeds = link_info->support_speeds; 16080 16081 bnxt_init_ethtool_link_settings(bp); 16082 mutex_unlock(&bp->link_lock); 16083 return 0; 16084 } 16085 16086 static int bnxt_get_max_irq(struct pci_dev *pdev) 16087 { 16088 u16 ctrl; 16089 16090 if (!pdev->msix_cap) 16091 return 1; 16092 16093 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 16094 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 16095 } 16096 16097 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 16098 int *max_cp) 16099 { 16100 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 16101 int max_ring_grps = 0, max_irq; 16102 16103 *max_tx = hw_resc->max_tx_rings; 16104 *max_rx = hw_resc->max_rx_rings; 16105 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 16106 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 16107 bnxt_get_ulp_msix_num_in_use(bp), 16108 hw_resc->max_stat_ctxs - 16109 bnxt_get_ulp_stat_ctxs_in_use(bp)); 16110 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 16111 *max_cp = min_t(int, *max_cp, max_irq); 16112 max_ring_grps = hw_resc->max_hw_ring_grps; 16113 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 16114 *max_cp -= 1; 16115 *max_rx -= 2; 16116 } 16117 if (bp->flags & BNXT_FLAG_AGG_RINGS) 16118 *max_rx >>= 1; 16119 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 16120 int rc; 16121 16122 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 16123 if (rc) { 16124 *max_rx = 0; 16125 *max_tx = 0; 16126 } 16127 /* On P5 chips, max_cp output param should be available NQs */ 16128 *max_cp = max_irq; 16129 } 16130 *max_rx = min_t(int, *max_rx, max_ring_grps); 16131 } 16132 16133 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 16134 { 16135 int rx, tx, cp; 16136 16137 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 16138 *max_rx = rx; 16139 *max_tx = tx; 16140 if (!rx || !tx || !cp) 16141 return -ENOMEM; 16142 16143 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 16144 } 16145 16146 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 16147 bool shared) 16148 { 16149 int rc; 16150 16151 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 16152 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 16153 /* Not enough rings, try disabling agg rings. */ 16154 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 16155 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 16156 if (rc) { 16157 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 16158 bp->flags |= BNXT_FLAG_AGG_RINGS; 16159 return rc; 16160 } 16161 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 16162 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 16163 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 16164 bnxt_set_ring_params(bp); 16165 } 16166 16167 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 16168 int max_cp, max_stat, max_irq; 16169 16170 /* Reserve minimum resources for RoCE */ 16171 max_cp = bnxt_get_max_func_cp_rings(bp); 16172 max_stat = bnxt_get_max_func_stat_ctxs(bp); 16173 max_irq = bnxt_get_max_func_irqs(bp); 16174 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 16175 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 16176 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 16177 return 0; 16178 16179 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 16180 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 16181 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 16182 max_cp = min_t(int, max_cp, max_irq); 16183 max_cp = min_t(int, max_cp, max_stat); 16184 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 16185 if (rc) 16186 rc = 0; 16187 } 16188 return rc; 16189 } 16190 16191 /* In initial default shared ring setting, each shared ring must have a 16192 * RX/TX ring pair. 16193 */ 16194 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 16195 { 16196 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 16197 bp->rx_nr_rings = bp->cp_nr_rings; 16198 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 16199 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 16200 } 16201 16202 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 16203 { 16204 int dflt_rings, max_rx_rings, max_tx_rings, rc; 16205 int avail_msix; 16206 16207 if (!bnxt_can_reserve_rings(bp)) 16208 return 0; 16209 16210 if (sh) 16211 bp->flags |= BNXT_FLAG_SHARED_RINGS; 16212 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 16213 /* Reduce default rings on multi-port cards so that total default 16214 * rings do not exceed CPU count. 16215 */ 16216 if (bp->port_count > 1) { 16217 int max_rings = 16218 max_t(int, num_online_cpus() / bp->port_count, 1); 16219 16220 dflt_rings = min_t(int, dflt_rings, max_rings); 16221 } 16222 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 16223 if (rc) 16224 return rc; 16225 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 16226 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 16227 if (sh) 16228 bnxt_trim_dflt_sh_rings(bp); 16229 else 16230 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 16231 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 16232 16233 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; 16234 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { 16235 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); 16236 16237 bnxt_set_ulp_msix_num(bp, ulp_num_msix); 16238 bnxt_set_dflt_ulp_stat_ctxs(bp); 16239 } 16240 16241 rc = __bnxt_reserve_rings(bp); 16242 if (rc && rc != -ENODEV) 16243 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 16244 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16245 if (sh) 16246 bnxt_trim_dflt_sh_rings(bp); 16247 16248 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 16249 if (bnxt_need_reserve_rings(bp)) { 16250 rc = __bnxt_reserve_rings(bp); 16251 if (rc && rc != -ENODEV) 16252 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 16253 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16254 } 16255 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 16256 bp->rx_nr_rings++; 16257 bp->cp_nr_rings++; 16258 } 16259 if (rc) { 16260 bp->tx_nr_rings = 0; 16261 bp->rx_nr_rings = 0; 16262 } 16263 return rc; 16264 } 16265 16266 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 16267 { 16268 int rc; 16269 16270 if (bp->tx_nr_rings) 16271 return 0; 16272 16273 bnxt_ulp_irq_stop(bp); 16274 bnxt_clear_int_mode(bp); 16275 rc = bnxt_set_dflt_rings(bp, true); 16276 if (rc) { 16277 if (BNXT_VF(bp) && rc == -ENODEV) 16278 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 16279 else 16280 netdev_err(bp->dev, "Not enough rings available.\n"); 16281 goto init_dflt_ring_err; 16282 } 16283 rc = bnxt_init_int_mode(bp); 16284 if (rc) 16285 goto init_dflt_ring_err; 16286 16287 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16288 16289 bnxt_set_dflt_rfs(bp); 16290 16291 init_dflt_ring_err: 16292 bnxt_ulp_irq_restart(bp, rc); 16293 return rc; 16294 } 16295 16296 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 16297 { 16298 int rc; 16299 16300 netdev_ops_assert_locked(bp->dev); 16301 bnxt_hwrm_func_qcaps(bp); 16302 16303 if (netif_running(bp->dev)) 16304 __bnxt_close_nic(bp, true, false); 16305 16306 bnxt_ulp_irq_stop(bp); 16307 bnxt_clear_int_mode(bp); 16308 rc = bnxt_init_int_mode(bp); 16309 bnxt_ulp_irq_restart(bp, rc); 16310 16311 if (netif_running(bp->dev)) { 16312 if (rc) 16313 netif_close(bp->dev); 16314 else 16315 rc = bnxt_open_nic(bp, true, false); 16316 } 16317 16318 return rc; 16319 } 16320 16321 static int bnxt_init_mac_addr(struct bnxt *bp) 16322 { 16323 int rc = 0; 16324 16325 if (BNXT_PF(bp)) { 16326 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 16327 } else { 16328 #ifdef CONFIG_BNXT_SRIOV 16329 struct bnxt_vf_info *vf = &bp->vf; 16330 bool strict_approval = true; 16331 16332 if (is_valid_ether_addr(vf->mac_addr)) { 16333 /* overwrite netdev dev_addr with admin VF MAC */ 16334 eth_hw_addr_set(bp->dev, vf->mac_addr); 16335 /* Older PF driver or firmware may not approve this 16336 * correctly. 16337 */ 16338 strict_approval = false; 16339 } else { 16340 eth_hw_addr_random(bp->dev); 16341 } 16342 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 16343 #endif 16344 } 16345 return rc; 16346 } 16347 16348 static void bnxt_vpd_read_info(struct bnxt *bp) 16349 { 16350 struct pci_dev *pdev = bp->pdev; 16351 unsigned int vpd_size, kw_len; 16352 int pos, size; 16353 u8 *vpd_data; 16354 16355 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 16356 if (IS_ERR(vpd_data)) { 16357 pci_warn(pdev, "Unable to read VPD\n"); 16358 return; 16359 } 16360 16361 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 16362 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 16363 if (pos < 0) 16364 goto read_sn; 16365 16366 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 16367 memcpy(bp->board_partno, &vpd_data[pos], size); 16368 16369 read_sn: 16370 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 16371 PCI_VPD_RO_KEYWORD_SERIALNO, 16372 &kw_len); 16373 if (pos < 0) 16374 goto exit; 16375 16376 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 16377 memcpy(bp->board_serialno, &vpd_data[pos], size); 16378 exit: 16379 kfree(vpd_data); 16380 } 16381 16382 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 16383 { 16384 struct pci_dev *pdev = bp->pdev; 16385 u64 qword; 16386 16387 qword = pci_get_dsn(pdev); 16388 if (!qword) { 16389 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 16390 return -EOPNOTSUPP; 16391 } 16392 16393 put_unaligned_le64(qword, dsn); 16394 16395 bp->flags |= BNXT_FLAG_DSN_VALID; 16396 return 0; 16397 } 16398 16399 static int bnxt_map_db_bar(struct bnxt *bp) 16400 { 16401 if (!bp->db_size) 16402 return -ENODEV; 16403 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 16404 if (!bp->bar1) 16405 return -ENOMEM; 16406 return 0; 16407 } 16408 16409 void bnxt_print_device_info(struct bnxt *bp) 16410 { 16411 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 16412 board_info[bp->board_idx].name, 16413 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 16414 16415 pcie_print_link_status(bp->pdev); 16416 } 16417 16418 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 16419 { 16420 struct bnxt_hw_resc *hw_resc; 16421 struct net_device *dev; 16422 struct bnxt *bp; 16423 int rc, max_irqs; 16424 16425 if (pci_is_bridge(pdev)) 16426 return -ENODEV; 16427 16428 if (!pdev->msix_cap) { 16429 dev_err(&pdev->dev, "MSIX capability not found, aborting\n"); 16430 return -ENODEV; 16431 } 16432 16433 /* Clear any pending DMA transactions from crash kernel 16434 * while loading driver in capture kernel. 16435 */ 16436 if (is_kdump_kernel()) { 16437 pci_clear_master(pdev); 16438 pcie_flr(pdev); 16439 } 16440 16441 max_irqs = bnxt_get_max_irq(pdev); 16442 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 16443 max_irqs); 16444 if (!dev) 16445 return -ENOMEM; 16446 16447 bp = netdev_priv(dev); 16448 bp->board_idx = ent->driver_data; 16449 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 16450 bnxt_set_max_func_irqs(bp, max_irqs); 16451 16452 if (bnxt_vf_pciid(bp->board_idx)) 16453 bp->flags |= BNXT_FLAG_VF; 16454 16455 /* No devlink port registration in case of a VF */ 16456 if (BNXT_PF(bp)) 16457 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 16458 16459 rc = bnxt_init_board(pdev, dev); 16460 if (rc < 0) 16461 goto init_err_free; 16462 16463 dev->netdev_ops = &bnxt_netdev_ops; 16464 dev->stat_ops = &bnxt_stat_ops; 16465 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 16466 dev->ethtool_ops = &bnxt_ethtool_ops; 16467 pci_set_drvdata(pdev, dev); 16468 16469 rc = bnxt_alloc_hwrm_resources(bp); 16470 if (rc) 16471 goto init_err_pci_clean; 16472 16473 mutex_init(&bp->hwrm_cmd_lock); 16474 mutex_init(&bp->link_lock); 16475 16476 rc = bnxt_fw_init_one_p1(bp); 16477 if (rc) 16478 goto init_err_pci_clean; 16479 16480 if (BNXT_PF(bp)) 16481 bnxt_vpd_read_info(bp); 16482 16483 if (BNXT_CHIP_P5_PLUS(bp)) { 16484 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 16485 if (BNXT_CHIP_P7(bp)) 16486 bp->flags |= BNXT_FLAG_CHIP_P7; 16487 } 16488 16489 rc = bnxt_alloc_rss_indir_tbl(bp); 16490 if (rc) 16491 goto init_err_pci_clean; 16492 16493 rc = bnxt_fw_init_one_p2(bp); 16494 if (rc) 16495 goto init_err_pci_clean; 16496 16497 rc = bnxt_map_db_bar(bp); 16498 if (rc) { 16499 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 16500 rc); 16501 goto init_err_pci_clean; 16502 } 16503 16504 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 16505 NETIF_F_TSO | NETIF_F_TSO6 | 16506 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 16507 NETIF_F_GSO_IPXIP4 | 16508 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 16509 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 16510 NETIF_F_RXCSUM | NETIF_F_GRO; 16511 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 16512 dev->hw_features |= NETIF_F_GSO_UDP_L4; 16513 16514 if (BNXT_SUPPORTS_TPA(bp)) 16515 dev->hw_features |= NETIF_F_LRO; 16516 16517 dev->hw_enc_features = 16518 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 16519 NETIF_F_TSO | NETIF_F_TSO6 | 16520 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 16521 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 16522 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 16523 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 16524 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 16525 if (bp->flags & BNXT_FLAG_CHIP_P7) 16526 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; 16527 else 16528 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 16529 16530 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 16531 NETIF_F_GSO_GRE_CSUM; 16532 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 16533 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 16534 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 16535 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 16536 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 16537 if (BNXT_SUPPORTS_TPA(bp)) 16538 dev->hw_features |= NETIF_F_GRO_HW; 16539 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 16540 if (dev->features & NETIF_F_GRO_HW) 16541 dev->features &= ~NETIF_F_LRO; 16542 dev->priv_flags |= IFF_UNICAST_FLT; 16543 16544 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 16545 if (bp->tso_max_segs) 16546 netif_set_tso_max_segs(dev, bp->tso_max_segs); 16547 16548 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 16549 NETDEV_XDP_ACT_RX_SG; 16550 16551 #ifdef CONFIG_BNXT_SRIOV 16552 init_waitqueue_head(&bp->sriov_cfg_wait); 16553 #endif 16554 if (BNXT_SUPPORTS_TPA(bp)) { 16555 bp->gro_func = bnxt_gro_func_5730x; 16556 if (BNXT_CHIP_P4(bp)) 16557 bp->gro_func = bnxt_gro_func_5731x; 16558 else if (BNXT_CHIP_P5_PLUS(bp)) 16559 bp->gro_func = bnxt_gro_func_5750x; 16560 } 16561 if (!BNXT_CHIP_P4_PLUS(bp)) 16562 bp->flags |= BNXT_FLAG_DOUBLE_DB; 16563 16564 rc = bnxt_init_mac_addr(bp); 16565 if (rc) { 16566 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 16567 rc = -EADDRNOTAVAIL; 16568 goto init_err_pci_clean; 16569 } 16570 16571 if (BNXT_PF(bp)) { 16572 /* Read the adapter's DSN to use as the eswitch switch_id */ 16573 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 16574 } 16575 16576 /* MTU range: 60 - FW defined max */ 16577 dev->min_mtu = ETH_ZLEN; 16578 dev->max_mtu = bp->max_mtu; 16579 16580 rc = bnxt_probe_phy(bp, true); 16581 if (rc) 16582 goto init_err_pci_clean; 16583 16584 hw_resc = &bp->hw_resc; 16585 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + 16586 BNXT_L2_FLTR_MAX_FLTR; 16587 /* Older firmware may not report these filters properly */ 16588 if (bp->max_fltr < BNXT_MAX_FLTR) 16589 bp->max_fltr = BNXT_MAX_FLTR; 16590 bnxt_init_l2_fltr_tbl(bp); 16591 __bnxt_set_rx_skb_mode(bp, false); 16592 bnxt_set_tpa_flags(bp); 16593 bnxt_init_ring_params(bp); 16594 bnxt_set_ring_params(bp); 16595 bnxt_rdma_aux_device_init(bp); 16596 rc = bnxt_set_dflt_rings(bp, true); 16597 if (rc) { 16598 if (BNXT_VF(bp) && rc == -ENODEV) { 16599 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 16600 } else { 16601 netdev_err(bp->dev, "Not enough rings available.\n"); 16602 rc = -ENOMEM; 16603 } 16604 goto init_err_pci_clean; 16605 } 16606 16607 bnxt_fw_init_one_p3(bp); 16608 16609 bnxt_init_dflt_coal(bp); 16610 16611 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 16612 bp->flags |= BNXT_FLAG_STRIP_VLAN; 16613 16614 rc = bnxt_init_int_mode(bp); 16615 if (rc) 16616 goto init_err_pci_clean; 16617 16618 /* No TC has been set yet and rings may have been trimmed due to 16619 * limited MSIX, so we re-initialize the TX rings per TC. 16620 */ 16621 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16622 16623 if (BNXT_PF(bp)) { 16624 if (!bnxt_pf_wq) { 16625 bnxt_pf_wq = 16626 create_singlethread_workqueue("bnxt_pf_wq"); 16627 if (!bnxt_pf_wq) { 16628 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 16629 rc = -ENOMEM; 16630 goto init_err_pci_clean; 16631 } 16632 } 16633 rc = bnxt_init_tc(bp); 16634 if (rc) 16635 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 16636 rc); 16637 } 16638 16639 bnxt_inv_fw_health_reg(bp); 16640 rc = bnxt_dl_register(bp); 16641 if (rc) 16642 goto init_err_dl; 16643 16644 INIT_LIST_HEAD(&bp->usr_fltr_list); 16645 16646 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 16647 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; 16648 if (BNXT_SUPPORTS_QUEUE_API(bp)) 16649 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; 16650 dev->request_ops_lock = true; 16651 16652 rc = register_netdev(dev); 16653 if (rc) 16654 goto init_err_cleanup; 16655 16656 bnxt_dl_fw_reporters_create(bp); 16657 16658 bnxt_rdma_aux_device_add(bp); 16659 16660 bnxt_print_device_info(bp); 16661 16662 pci_save_state(pdev); 16663 16664 return 0; 16665 init_err_cleanup: 16666 bnxt_rdma_aux_device_uninit(bp); 16667 bnxt_dl_unregister(bp); 16668 init_err_dl: 16669 bnxt_shutdown_tc(bp); 16670 bnxt_clear_int_mode(bp); 16671 16672 init_err_pci_clean: 16673 bnxt_hwrm_func_drv_unrgtr(bp); 16674 bnxt_free_hwrm_resources(bp); 16675 bnxt_hwmon_uninit(bp); 16676 bnxt_ethtool_free(bp); 16677 bnxt_ptp_clear(bp); 16678 kfree(bp->ptp_cfg); 16679 bp->ptp_cfg = NULL; 16680 kfree(bp->fw_health); 16681 bp->fw_health = NULL; 16682 bnxt_cleanup_pci(bp); 16683 bnxt_free_ctx_mem(bp, true); 16684 bnxt_free_crash_dump_mem(bp); 16685 kfree(bp->rss_indir_tbl); 16686 bp->rss_indir_tbl = NULL; 16687 16688 init_err_free: 16689 free_netdev(dev); 16690 return rc; 16691 } 16692 16693 static void bnxt_shutdown(struct pci_dev *pdev) 16694 { 16695 struct net_device *dev = pci_get_drvdata(pdev); 16696 struct bnxt *bp; 16697 16698 if (!dev) 16699 return; 16700 16701 rtnl_lock(); 16702 netdev_lock(dev); 16703 bp = netdev_priv(dev); 16704 if (!bp) 16705 goto shutdown_exit; 16706 16707 if (netif_running(dev)) 16708 netif_close(dev); 16709 16710 bnxt_ptp_clear(bp); 16711 bnxt_clear_int_mode(bp); 16712 pci_disable_device(pdev); 16713 16714 if (system_state == SYSTEM_POWER_OFF) { 16715 pci_wake_from_d3(pdev, bp->wol); 16716 pci_set_power_state(pdev, PCI_D3hot); 16717 } 16718 16719 shutdown_exit: 16720 netdev_unlock(dev); 16721 rtnl_unlock(); 16722 } 16723 16724 #ifdef CONFIG_PM_SLEEP 16725 static int bnxt_suspend(struct device *device) 16726 { 16727 struct net_device *dev = dev_get_drvdata(device); 16728 struct bnxt *bp = netdev_priv(dev); 16729 int rc = 0; 16730 16731 bnxt_ulp_stop(bp); 16732 16733 netdev_lock(dev); 16734 if (netif_running(dev)) { 16735 netif_device_detach(dev); 16736 rc = bnxt_close(dev); 16737 } 16738 bnxt_hwrm_func_drv_unrgtr(bp); 16739 bnxt_ptp_clear(bp); 16740 pci_disable_device(bp->pdev); 16741 bnxt_free_ctx_mem(bp, false); 16742 netdev_unlock(dev); 16743 return rc; 16744 } 16745 16746 static int bnxt_resume(struct device *device) 16747 { 16748 struct net_device *dev = dev_get_drvdata(device); 16749 struct bnxt *bp = netdev_priv(dev); 16750 int rc = 0; 16751 16752 netdev_lock(dev); 16753 rc = pci_enable_device(bp->pdev); 16754 if (rc) { 16755 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 16756 rc); 16757 goto resume_exit; 16758 } 16759 pci_set_master(bp->pdev); 16760 if (bnxt_hwrm_ver_get(bp)) { 16761 rc = -ENODEV; 16762 goto resume_exit; 16763 } 16764 rc = bnxt_hwrm_func_reset(bp); 16765 if (rc) { 16766 rc = -EBUSY; 16767 goto resume_exit; 16768 } 16769 16770 rc = bnxt_hwrm_func_qcaps(bp); 16771 if (rc) 16772 goto resume_exit; 16773 16774 bnxt_clear_reservations(bp, true); 16775 16776 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 16777 rc = -ENODEV; 16778 goto resume_exit; 16779 } 16780 if (bp->fw_crash_mem) 16781 bnxt_hwrm_crash_dump_mem_cfg(bp); 16782 16783 if (bnxt_ptp_init(bp)) { 16784 kfree(bp->ptp_cfg); 16785 bp->ptp_cfg = NULL; 16786 } 16787 bnxt_get_wol_settings(bp); 16788 if (netif_running(dev)) { 16789 rc = bnxt_open(dev); 16790 if (!rc) 16791 netif_device_attach(dev); 16792 } 16793 16794 resume_exit: 16795 netdev_unlock(bp->dev); 16796 bnxt_ulp_start(bp, rc); 16797 if (!rc) 16798 bnxt_reenable_sriov(bp); 16799 return rc; 16800 } 16801 16802 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 16803 #define BNXT_PM_OPS (&bnxt_pm_ops) 16804 16805 #else 16806 16807 #define BNXT_PM_OPS NULL 16808 16809 #endif /* CONFIG_PM_SLEEP */ 16810 16811 /** 16812 * bnxt_io_error_detected - called when PCI error is detected 16813 * @pdev: Pointer to PCI device 16814 * @state: The current pci connection state 16815 * 16816 * This function is called after a PCI bus error affecting 16817 * this device has been detected. 16818 */ 16819 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 16820 pci_channel_state_t state) 16821 { 16822 struct net_device *netdev = pci_get_drvdata(pdev); 16823 struct bnxt *bp = netdev_priv(netdev); 16824 bool abort = false; 16825 16826 netdev_info(netdev, "PCI I/O error detected\n"); 16827 16828 bnxt_ulp_stop(bp); 16829 16830 netdev_lock(netdev); 16831 netif_device_detach(netdev); 16832 16833 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 16834 netdev_err(bp->dev, "Firmware reset already in progress\n"); 16835 abort = true; 16836 } 16837 16838 if (abort || state == pci_channel_io_perm_failure) { 16839 netdev_unlock(netdev); 16840 return PCI_ERS_RESULT_DISCONNECT; 16841 } 16842 16843 /* Link is not reliable anymore if state is pci_channel_io_frozen 16844 * so we disable bus master to prevent any potential bad DMAs before 16845 * freeing kernel memory. 16846 */ 16847 if (state == pci_channel_io_frozen) { 16848 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 16849 bnxt_fw_fatal_close(bp); 16850 } 16851 16852 if (netif_running(netdev)) 16853 __bnxt_close_nic(bp, true, true); 16854 16855 if (pci_is_enabled(pdev)) 16856 pci_disable_device(pdev); 16857 bnxt_free_ctx_mem(bp, false); 16858 netdev_unlock(netdev); 16859 16860 /* Request a slot slot reset. */ 16861 return PCI_ERS_RESULT_NEED_RESET; 16862 } 16863 16864 /** 16865 * bnxt_io_slot_reset - called after the pci bus has been reset. 16866 * @pdev: Pointer to PCI device 16867 * 16868 * Restart the card from scratch, as if from a cold-boot. 16869 * At this point, the card has experienced a hard reset, 16870 * followed by fixups by BIOS, and has its config space 16871 * set up identically to what it was at cold boot. 16872 */ 16873 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 16874 { 16875 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 16876 struct net_device *netdev = pci_get_drvdata(pdev); 16877 struct bnxt *bp = netdev_priv(netdev); 16878 int retry = 0; 16879 int err = 0; 16880 int off; 16881 16882 netdev_info(bp->dev, "PCI Slot Reset\n"); 16883 16884 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 16885 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) 16886 msleep(900); 16887 16888 netdev_lock(netdev); 16889 16890 if (pci_enable_device(pdev)) { 16891 dev_err(&pdev->dev, 16892 "Cannot re-enable PCI device after reset.\n"); 16893 } else { 16894 pci_set_master(pdev); 16895 /* Upon fatal error, our device internal logic that latches to 16896 * BAR value is getting reset and will restore only upon 16897 * rewriting the BARs. 16898 * 16899 * As pci_restore_state() does not re-write the BARs if the 16900 * value is same as saved value earlier, driver needs to 16901 * write the BARs to 0 to force restore, in case of fatal error. 16902 */ 16903 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 16904 &bp->state)) { 16905 for (off = PCI_BASE_ADDRESS_0; 16906 off <= PCI_BASE_ADDRESS_5; off += 4) 16907 pci_write_config_dword(bp->pdev, off, 0); 16908 } 16909 pci_restore_state(pdev); 16910 pci_save_state(pdev); 16911 16912 bnxt_inv_fw_health_reg(bp); 16913 bnxt_try_map_fw_health_reg(bp); 16914 16915 /* In some PCIe AER scenarios, firmware may take up to 16916 * 10 seconds to become ready in the worst case. 16917 */ 16918 do { 16919 err = bnxt_try_recover_fw(bp); 16920 if (!err) 16921 break; 16922 retry++; 16923 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 16924 16925 if (err) { 16926 dev_err(&pdev->dev, "Firmware not ready\n"); 16927 goto reset_exit; 16928 } 16929 16930 err = bnxt_hwrm_func_reset(bp); 16931 if (!err) 16932 result = PCI_ERS_RESULT_RECOVERED; 16933 16934 bnxt_ulp_irq_stop(bp); 16935 bnxt_clear_int_mode(bp); 16936 err = bnxt_init_int_mode(bp); 16937 bnxt_ulp_irq_restart(bp, err); 16938 } 16939 16940 reset_exit: 16941 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 16942 bnxt_clear_reservations(bp, true); 16943 netdev_unlock(netdev); 16944 16945 return result; 16946 } 16947 16948 /** 16949 * bnxt_io_resume - called when traffic can start flowing again. 16950 * @pdev: Pointer to PCI device 16951 * 16952 * This callback is called when the error recovery driver tells 16953 * us that its OK to resume normal operation. 16954 */ 16955 static void bnxt_io_resume(struct pci_dev *pdev) 16956 { 16957 struct net_device *netdev = pci_get_drvdata(pdev); 16958 struct bnxt *bp = netdev_priv(netdev); 16959 int err; 16960 16961 netdev_info(bp->dev, "PCI Slot Resume\n"); 16962 netdev_lock(netdev); 16963 16964 err = bnxt_hwrm_func_qcaps(bp); 16965 if (!err) { 16966 if (netif_running(netdev)) 16967 err = bnxt_open(netdev); 16968 else 16969 err = bnxt_reserve_rings(bp, true); 16970 } 16971 16972 if (!err) 16973 netif_device_attach(netdev); 16974 16975 netdev_unlock(netdev); 16976 bnxt_ulp_start(bp, err); 16977 if (!err) 16978 bnxt_reenable_sriov(bp); 16979 } 16980 16981 static const struct pci_error_handlers bnxt_err_handler = { 16982 .error_detected = bnxt_io_error_detected, 16983 .slot_reset = bnxt_io_slot_reset, 16984 .resume = bnxt_io_resume 16985 }; 16986 16987 static struct pci_driver bnxt_pci_driver = { 16988 .name = DRV_MODULE_NAME, 16989 .id_table = bnxt_pci_tbl, 16990 .probe = bnxt_init_one, 16991 .remove = bnxt_remove_one, 16992 .shutdown = bnxt_shutdown, 16993 .driver.pm = BNXT_PM_OPS, 16994 .err_handler = &bnxt_err_handler, 16995 #if defined(CONFIG_BNXT_SRIOV) 16996 .sriov_configure = bnxt_sriov_configure, 16997 #endif 16998 }; 16999 17000 static int __init bnxt_init(void) 17001 { 17002 int err; 17003 17004 bnxt_debug_init(); 17005 err = pci_register_driver(&bnxt_pci_driver); 17006 if (err) { 17007 bnxt_debug_exit(); 17008 return err; 17009 } 17010 17011 return 0; 17012 } 17013 17014 static void __exit bnxt_exit(void) 17015 { 17016 pci_unregister_driver(&bnxt_pci_driver); 17017 if (bnxt_pf_wq) 17018 destroy_workqueue(bnxt_pf_wq); 17019 bnxt_debug_exit(); 17020 } 17021 17022 module_init(bnxt_init); 17023 module_exit(bnxt_exit); 17024