1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_queues.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_hwrm.h" 62 #include "bnxt_ulp.h" 63 #include "bnxt_sriov.h" 64 #include "bnxt_ethtool.h" 65 #include "bnxt_dcb.h" 66 #include "bnxt_xdp.h" 67 #include "bnxt_ptp.h" 68 #include "bnxt_vfr.h" 69 #include "bnxt_tc.h" 70 #include "bnxt_devlink.h" 71 #include "bnxt_debugfs.h" 72 #include "bnxt_coredump.h" 73 #include "bnxt_hwmon.h" 74 75 #define BNXT_TX_TIMEOUT (5 * HZ) 76 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 77 NETIF_MSG_TX_ERR) 78 79 MODULE_LICENSE("GPL"); 80 MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); 81 82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 84 #define BNXT_RX_COPY_THRESH 256 85 86 #define BNXT_TX_PUSH_THRESH 164 87 88 /* indexed by enum board_idx */ 89 static const struct { 90 char *name; 91 } board_info[] = { 92 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 93 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 94 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 95 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 96 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 97 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 98 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 99 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 100 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 101 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 102 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 103 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 104 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 105 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 106 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 108 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 109 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 110 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 111 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 112 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 113 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 114 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 115 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 116 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 117 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 118 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 119 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 120 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 121 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 122 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 123 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 124 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 125 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 126 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, 127 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 128 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 129 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 130 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 131 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 132 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 133 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 134 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 135 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 136 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 137 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 138 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 139 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 140 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 141 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, 142 }; 143 144 static const struct pci_device_id bnxt_pci_tbl[] = { 145 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 146 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 147 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 148 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 149 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 150 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 151 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 152 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 153 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 154 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 155 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 156 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 157 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 158 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 159 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 160 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 162 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 163 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 164 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 165 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 166 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 167 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 168 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 169 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 170 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 171 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 172 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 175 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 176 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 179 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 180 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 181 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 182 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 183 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, 184 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, 185 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, 186 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, 187 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 189 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 190 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 192 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 193 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 194 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 195 #ifdef CONFIG_BNXT_SRIOV 196 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 197 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 198 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 199 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 201 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 202 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 203 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 204 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 205 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 206 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 207 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 208 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 209 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 210 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 211 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 212 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 213 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 214 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 215 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 216 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, 217 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 218 #endif 219 { 0 } 220 }; 221 222 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 223 224 static const u16 bnxt_vf_req_snif[] = { 225 HWRM_FUNC_CFG, 226 HWRM_FUNC_VF_CFG, 227 HWRM_PORT_PHY_QCFG, 228 HWRM_CFA_L2_FILTER_ALLOC, 229 }; 230 231 static const u16 bnxt_async_events_arr[] = { 232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 233 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 234 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 235 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 236 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 237 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 238 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 239 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 240 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 241 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 242 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 243 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 244 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 245 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 246 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 247 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 248 }; 249 250 static struct workqueue_struct *bnxt_pf_wq; 251 252 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 253 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} 254 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} 255 256 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { 257 .ports = { 258 .src = 0, 259 .dst = 0, 260 }, 261 .addrs = { 262 .v6addrs = { 263 .src = BNXT_IPV6_MASK_NONE, 264 .dst = BNXT_IPV6_MASK_NONE, 265 }, 266 }, 267 }; 268 269 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { 270 .ports = { 271 .src = cpu_to_be16(0xffff), 272 .dst = cpu_to_be16(0xffff), 273 }, 274 .addrs = { 275 .v6addrs = { 276 .src = BNXT_IPV6_MASK_ALL, 277 .dst = BNXT_IPV6_MASK_ALL, 278 }, 279 }, 280 }; 281 282 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { 283 .ports = { 284 .src = cpu_to_be16(0xffff), 285 .dst = cpu_to_be16(0xffff), 286 }, 287 .addrs = { 288 .v4addrs = { 289 .src = cpu_to_be32(0xffffffff), 290 .dst = cpu_to_be32(0xffffffff), 291 }, 292 }, 293 }; 294 295 static bool bnxt_vf_pciid(enum board_idx idx) 296 { 297 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 298 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 299 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 300 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); 301 } 302 303 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 304 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 305 306 #define BNXT_DB_CQ(db, idx) \ 307 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 308 309 #define BNXT_DB_NQ_P5(db, idx) \ 310 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 311 (db)->doorbell) 312 313 #define BNXT_DB_NQ_P7(db, idx) \ 314 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ 315 DB_RING_IDX(db, idx), (db)->doorbell) 316 317 #define BNXT_DB_CQ_ARM(db, idx) \ 318 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 319 320 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 321 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 322 DB_RING_IDX(db, idx), (db)->doorbell) 323 324 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 325 { 326 if (bp->flags & BNXT_FLAG_CHIP_P7) 327 BNXT_DB_NQ_P7(db, idx); 328 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 329 BNXT_DB_NQ_P5(db, idx); 330 else 331 BNXT_DB_CQ(db, idx); 332 } 333 334 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 335 { 336 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 337 BNXT_DB_NQ_ARM_P5(db, idx); 338 else 339 BNXT_DB_CQ_ARM(db, idx); 340 } 341 342 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 343 { 344 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 345 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 346 DB_RING_IDX(db, idx), db->doorbell); 347 else 348 BNXT_DB_CQ(db, idx); 349 } 350 351 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 352 { 353 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 354 return; 355 356 if (BNXT_PF(bp)) 357 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 358 else 359 schedule_delayed_work(&bp->fw_reset_task, delay); 360 } 361 362 static void __bnxt_queue_sp_work(struct bnxt *bp) 363 { 364 if (BNXT_PF(bp)) 365 queue_work(bnxt_pf_wq, &bp->sp_task); 366 else 367 schedule_work(&bp->sp_task); 368 } 369 370 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 371 { 372 set_bit(event, &bp->sp_event); 373 __bnxt_queue_sp_work(bp); 374 } 375 376 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 377 { 378 if (!rxr->bnapi->in_reset) { 379 rxr->bnapi->in_reset = true; 380 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 381 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 382 else 383 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 384 __bnxt_queue_sp_work(bp); 385 } 386 rxr->rx_next_cons = 0xffff; 387 } 388 389 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 390 u16 curr) 391 { 392 struct bnxt_napi *bnapi = txr->bnapi; 393 394 if (bnapi->tx_fault) 395 return; 396 397 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 398 txr->txq_index, txr->tx_hw_cons, 399 txr->tx_cons, txr->tx_prod, curr); 400 WARN_ON_ONCE(1); 401 bnapi->tx_fault = 1; 402 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 403 } 404 405 const u16 bnxt_lhint_arr[] = { 406 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 407 TX_BD_FLAGS_LHINT_512_TO_1023, 408 TX_BD_FLAGS_LHINT_1024_TO_2047, 409 TX_BD_FLAGS_LHINT_1024_TO_2047, 410 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 411 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 412 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 413 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 414 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 415 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 416 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 417 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 418 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 419 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 420 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 421 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 422 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 423 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 424 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 425 }; 426 427 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 428 { 429 struct metadata_dst *md_dst = skb_metadata_dst(skb); 430 431 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 432 return 0; 433 434 return md_dst->u.port_info.port_id; 435 } 436 437 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 438 u16 prod) 439 { 440 /* Sync BD data before updating doorbell */ 441 wmb(); 442 bnxt_db_write(bp, &txr->tx_db, prod); 443 txr->kick_pending = 0; 444 } 445 446 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 447 { 448 struct bnxt *bp = netdev_priv(dev); 449 struct tx_bd *txbd, *txbd0; 450 struct tx_bd_ext *txbd1; 451 struct netdev_queue *txq; 452 int i; 453 dma_addr_t mapping; 454 unsigned int length, pad = 0; 455 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 456 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 457 struct pci_dev *pdev = bp->pdev; 458 u16 prod, last_frag, txts_prod; 459 struct bnxt_tx_ring_info *txr; 460 struct bnxt_sw_tx_bd *tx_buf; 461 __le32 lflags = 0; 462 463 i = skb_get_queue_mapping(skb); 464 if (unlikely(i >= bp->tx_nr_rings)) { 465 dev_kfree_skb_any(skb); 466 dev_core_stats_tx_dropped_inc(dev); 467 return NETDEV_TX_OK; 468 } 469 470 txq = netdev_get_tx_queue(dev, i); 471 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 472 prod = txr->tx_prod; 473 474 free_size = bnxt_tx_avail(bp, txr); 475 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 476 /* We must have raced with NAPI cleanup */ 477 if (net_ratelimit() && txr->kick_pending) 478 netif_warn(bp, tx_err, dev, 479 "bnxt: ring busy w/ flush pending!\n"); 480 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 481 bp->tx_wake_thresh)) 482 return NETDEV_TX_BUSY; 483 } 484 485 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 486 goto tx_free; 487 488 length = skb->len; 489 len = skb_headlen(skb); 490 last_frag = skb_shinfo(skb)->nr_frags; 491 492 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 493 494 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 495 tx_buf->skb = skb; 496 tx_buf->nr_frags = last_frag; 497 498 vlan_tag_flags = 0; 499 cfa_action = bnxt_xmit_get_cfa_action(skb); 500 if (skb_vlan_tag_present(skb)) { 501 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 502 skb_vlan_tag_get(skb); 503 /* Currently supports 8021Q, 8021AD vlan offloads 504 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 505 */ 506 if (skb->vlan_proto == htons(ETH_P_8021Q)) 507 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 508 } 509 510 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && 511 ptp->tx_tstamp_en) { 512 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { 513 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 514 tx_buf->is_ts_pkt = 1; 515 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 516 } else if (!skb_is_gso(skb)) { 517 u16 seq_id, hdr_off; 518 519 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && 520 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { 521 if (vlan_tag_flags) 522 hdr_off += VLAN_HLEN; 523 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 524 tx_buf->is_ts_pkt = 1; 525 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 526 527 ptp->txts_req[txts_prod].tx_seqid = seq_id; 528 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; 529 tx_buf->txts_prod = txts_prod; 530 } 531 } 532 } 533 if (unlikely(skb->no_fcs)) 534 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 535 536 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 537 !lflags) { 538 struct tx_push_buffer *tx_push_buf = txr->tx_push; 539 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 540 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 541 void __iomem *db = txr->tx_db.doorbell; 542 void *pdata = tx_push_buf->data; 543 u64 *end; 544 int j, push_len; 545 546 /* Set COAL_NOW to be ready quickly for the next push */ 547 tx_push->tx_bd_len_flags_type = 548 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 549 TX_BD_TYPE_LONG_TX_BD | 550 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 551 TX_BD_FLAGS_COAL_NOW | 552 TX_BD_FLAGS_PACKET_END | 553 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 554 555 if (skb->ip_summed == CHECKSUM_PARTIAL) 556 tx_push1->tx_bd_hsize_lflags = 557 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 558 else 559 tx_push1->tx_bd_hsize_lflags = 0; 560 561 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 562 tx_push1->tx_bd_cfa_action = 563 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 564 565 end = pdata + length; 566 end = PTR_ALIGN(end, 8) - 1; 567 *end = 0; 568 569 skb_copy_from_linear_data(skb, pdata, len); 570 pdata += len; 571 for (j = 0; j < last_frag; j++) { 572 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 573 void *fptr; 574 575 fptr = skb_frag_address_safe(frag); 576 if (!fptr) 577 goto normal_tx; 578 579 memcpy(pdata, fptr, skb_frag_size(frag)); 580 pdata += skb_frag_size(frag); 581 } 582 583 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 584 txbd->tx_bd_haddr = txr->data_mapping; 585 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 586 prod = NEXT_TX(prod); 587 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 588 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 589 memcpy(txbd, tx_push1, sizeof(*txbd)); 590 prod = NEXT_TX(prod); 591 tx_push->doorbell = 592 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 593 DB_RING_IDX(&txr->tx_db, prod)); 594 WRITE_ONCE(txr->tx_prod, prod); 595 596 tx_buf->is_push = 1; 597 netdev_tx_sent_queue(txq, skb->len); 598 wmb(); /* Sync is_push and byte queue before pushing data */ 599 600 push_len = (length + sizeof(*tx_push) + 7) / 8; 601 if (push_len > 16) { 602 __iowrite64_copy(db, tx_push_buf, 16); 603 __iowrite32_copy(db + 4, tx_push_buf + 1, 604 (push_len - 16) << 1); 605 } else { 606 __iowrite64_copy(db, tx_push_buf, push_len); 607 } 608 609 goto tx_done; 610 } 611 612 normal_tx: 613 if (length < BNXT_MIN_PKT_SIZE) { 614 pad = BNXT_MIN_PKT_SIZE - length; 615 if (skb_pad(skb, pad)) 616 /* SKB already freed. */ 617 goto tx_kick_pending; 618 length = BNXT_MIN_PKT_SIZE; 619 } 620 621 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 622 623 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 624 goto tx_free; 625 626 dma_unmap_addr_set(tx_buf, mapping, mapping); 627 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 628 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 629 630 txbd->tx_bd_haddr = cpu_to_le64(mapping); 631 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 632 633 prod = NEXT_TX(prod); 634 txbd1 = (struct tx_bd_ext *) 635 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 636 637 txbd1->tx_bd_hsize_lflags = lflags; 638 if (skb_is_gso(skb)) { 639 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); 640 u32 hdr_len; 641 642 if (skb->encapsulation) { 643 if (udp_gso) 644 hdr_len = skb_inner_transport_offset(skb) + 645 sizeof(struct udphdr); 646 else 647 hdr_len = skb_inner_tcp_all_headers(skb); 648 } else if (udp_gso) { 649 hdr_len = skb_transport_offset(skb) + 650 sizeof(struct udphdr); 651 } else { 652 hdr_len = skb_tcp_all_headers(skb); 653 } 654 655 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 656 TX_BD_FLAGS_T_IPID | 657 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 658 length = skb_shinfo(skb)->gso_size; 659 txbd1->tx_bd_mss = cpu_to_le32(length); 660 length += hdr_len; 661 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 662 txbd1->tx_bd_hsize_lflags |= 663 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 664 txbd1->tx_bd_mss = 0; 665 } 666 667 length >>= 9; 668 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 669 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 670 skb->len); 671 i = 0; 672 goto tx_dma_error; 673 } 674 flags |= bnxt_lhint_arr[length]; 675 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 676 677 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 678 txbd1->tx_bd_cfa_action = 679 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 680 txbd0 = txbd; 681 for (i = 0; i < last_frag; i++) { 682 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 683 684 prod = NEXT_TX(prod); 685 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 686 687 len = skb_frag_size(frag); 688 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 689 DMA_TO_DEVICE); 690 691 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 692 goto tx_dma_error; 693 694 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 695 dma_unmap_addr_set(tx_buf, mapping, mapping); 696 697 txbd->tx_bd_haddr = cpu_to_le64(mapping); 698 699 flags = len << TX_BD_LEN_SHIFT; 700 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 701 } 702 703 flags &= ~TX_BD_LEN; 704 txbd->tx_bd_len_flags_type = 705 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 706 TX_BD_FLAGS_PACKET_END); 707 708 netdev_tx_sent_queue(txq, skb->len); 709 710 skb_tx_timestamp(skb); 711 712 prod = NEXT_TX(prod); 713 WRITE_ONCE(txr->tx_prod, prod); 714 715 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 716 bnxt_txr_db_kick(bp, txr, prod); 717 } else { 718 if (free_size >= bp->tx_wake_thresh) 719 txbd0->tx_bd_len_flags_type |= 720 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 721 txr->kick_pending = 1; 722 } 723 724 tx_done: 725 726 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 727 if (netdev_xmit_more() && !tx_buf->is_push) { 728 txbd0->tx_bd_len_flags_type &= 729 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); 730 bnxt_txr_db_kick(bp, txr, prod); 731 } 732 733 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 734 bp->tx_wake_thresh); 735 } 736 return NETDEV_TX_OK; 737 738 tx_dma_error: 739 last_frag = i; 740 741 /* start back at beginning and unmap skb */ 742 prod = txr->tx_prod; 743 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 744 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 745 skb_headlen(skb), DMA_TO_DEVICE); 746 prod = NEXT_TX(prod); 747 748 /* unmap remaining mapped pages */ 749 for (i = 0; i < last_frag; i++) { 750 prod = NEXT_TX(prod); 751 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 752 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 753 skb_frag_size(&skb_shinfo(skb)->frags[i]), 754 DMA_TO_DEVICE); 755 } 756 757 tx_free: 758 dev_kfree_skb_any(skb); 759 tx_kick_pending: 760 if (BNXT_TX_PTP_IS_SET(lflags)) { 761 txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; 762 atomic64_inc(&bp->ptp_cfg->stats.ts_err); 763 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 764 /* set SKB to err so PTP worker will clean up */ 765 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO); 766 } 767 if (txr->kick_pending) 768 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 769 txr->tx_buf_ring[txr->tx_prod].skb = NULL; 770 dev_core_stats_tx_dropped_inc(dev); 771 return NETDEV_TX_OK; 772 } 773 774 /* Returns true if some remaining TX packets not processed. */ 775 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 776 int budget) 777 { 778 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 779 struct pci_dev *pdev = bp->pdev; 780 u16 hw_cons = txr->tx_hw_cons; 781 unsigned int tx_bytes = 0; 782 u16 cons = txr->tx_cons; 783 int tx_pkts = 0; 784 bool rc = false; 785 786 while (RING_TX(bp, cons) != hw_cons) { 787 struct bnxt_sw_tx_bd *tx_buf; 788 struct sk_buff *skb; 789 bool is_ts_pkt; 790 int j, last; 791 792 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 793 skb = tx_buf->skb; 794 795 if (unlikely(!skb)) { 796 bnxt_sched_reset_txr(bp, txr, cons); 797 return rc; 798 } 799 800 is_ts_pkt = tx_buf->is_ts_pkt; 801 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { 802 rc = true; 803 break; 804 } 805 806 cons = NEXT_TX(cons); 807 tx_pkts++; 808 tx_bytes += skb->len; 809 tx_buf->skb = NULL; 810 tx_buf->is_ts_pkt = 0; 811 812 if (tx_buf->is_push) { 813 tx_buf->is_push = 0; 814 goto next_tx_int; 815 } 816 817 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 818 skb_headlen(skb), DMA_TO_DEVICE); 819 last = tx_buf->nr_frags; 820 821 for (j = 0; j < last; j++) { 822 cons = NEXT_TX(cons); 823 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 824 dma_unmap_page( 825 &pdev->dev, 826 dma_unmap_addr(tx_buf, mapping), 827 skb_frag_size(&skb_shinfo(skb)->frags[j]), 828 DMA_TO_DEVICE); 829 } 830 if (unlikely(is_ts_pkt)) { 831 if (BNXT_CHIP_P5(bp)) { 832 /* PTP worker takes ownership of the skb */ 833 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); 834 skb = NULL; 835 } 836 } 837 838 next_tx_int: 839 cons = NEXT_TX(cons); 840 841 dev_consume_skb_any(skb); 842 } 843 844 WRITE_ONCE(txr->tx_cons, cons); 845 846 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 847 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 848 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 849 850 return rc; 851 } 852 853 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 854 { 855 struct bnxt_tx_ring_info *txr; 856 bool more = false; 857 int i; 858 859 bnxt_for_each_napi_tx(i, bnapi, txr) { 860 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) 861 more |= __bnxt_tx_int(bp, txr, budget); 862 } 863 if (!more) 864 bnapi->events &= ~BNXT_TX_CMP_EVENT; 865 } 866 867 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 868 struct bnxt_rx_ring_info *rxr, 869 unsigned int *offset, 870 gfp_t gfp) 871 { 872 struct page *page; 873 874 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 875 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 876 BNXT_RX_PAGE_SIZE); 877 } else { 878 page = page_pool_dev_alloc_pages(rxr->page_pool); 879 *offset = 0; 880 } 881 if (!page) 882 return NULL; 883 884 *mapping = page_pool_get_dma_addr(page) + *offset; 885 return page; 886 } 887 888 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 889 gfp_t gfp) 890 { 891 u8 *data; 892 struct pci_dev *pdev = bp->pdev; 893 894 if (gfp == GFP_ATOMIC) 895 data = napi_alloc_frag(bp->rx_buf_size); 896 else 897 data = netdev_alloc_frag(bp->rx_buf_size); 898 if (!data) 899 return NULL; 900 901 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 902 bp->rx_buf_use_size, bp->rx_dir, 903 DMA_ATTR_WEAK_ORDERING); 904 905 if (dma_mapping_error(&pdev->dev, *mapping)) { 906 skb_free_frag(data); 907 data = NULL; 908 } 909 return data; 910 } 911 912 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 913 u16 prod, gfp_t gfp) 914 { 915 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 916 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 917 dma_addr_t mapping; 918 919 if (BNXT_RX_PAGE_MODE(bp)) { 920 unsigned int offset; 921 struct page *page = 922 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 923 924 if (!page) 925 return -ENOMEM; 926 927 mapping += bp->rx_dma_offset; 928 rx_buf->data = page; 929 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 930 } else { 931 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); 932 933 if (!data) 934 return -ENOMEM; 935 936 rx_buf->data = data; 937 rx_buf->data_ptr = data + bp->rx_offset; 938 } 939 rx_buf->mapping = mapping; 940 941 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 942 return 0; 943 } 944 945 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 946 { 947 u16 prod = rxr->rx_prod; 948 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 949 struct bnxt *bp = rxr->bnapi->bp; 950 struct rx_bd *cons_bd, *prod_bd; 951 952 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 953 cons_rx_buf = &rxr->rx_buf_ring[cons]; 954 955 prod_rx_buf->data = data; 956 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 957 958 prod_rx_buf->mapping = cons_rx_buf->mapping; 959 960 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 961 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 962 963 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 964 } 965 966 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 967 { 968 u16 next, max = rxr->rx_agg_bmap_size; 969 970 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 971 if (next >= max) 972 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 973 return next; 974 } 975 976 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 977 struct bnxt_rx_ring_info *rxr, 978 u16 prod, gfp_t gfp) 979 { 980 struct rx_bd *rxbd = 981 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 982 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 983 struct page *page; 984 dma_addr_t mapping; 985 u16 sw_prod = rxr->rx_sw_agg_prod; 986 unsigned int offset = 0; 987 988 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 989 990 if (!page) 991 return -ENOMEM; 992 993 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 994 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 995 996 __set_bit(sw_prod, rxr->rx_agg_bmap); 997 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 998 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 999 1000 rx_agg_buf->page = page; 1001 rx_agg_buf->offset = offset; 1002 rx_agg_buf->mapping = mapping; 1003 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 1004 rxbd->rx_bd_opaque = sw_prod; 1005 return 0; 1006 } 1007 1008 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 1009 struct bnxt_cp_ring_info *cpr, 1010 u16 cp_cons, u16 curr) 1011 { 1012 struct rx_agg_cmp *agg; 1013 1014 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 1015 agg = (struct rx_agg_cmp *) 1016 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1017 return agg; 1018 } 1019 1020 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 1021 struct bnxt_rx_ring_info *rxr, 1022 u16 agg_id, u16 curr) 1023 { 1024 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 1025 1026 return &tpa_info->agg_arr[curr]; 1027 } 1028 1029 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 1030 u16 start, u32 agg_bufs, bool tpa) 1031 { 1032 struct bnxt_napi *bnapi = cpr->bnapi; 1033 struct bnxt *bp = bnapi->bp; 1034 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1035 u16 prod = rxr->rx_agg_prod; 1036 u16 sw_prod = rxr->rx_sw_agg_prod; 1037 bool p5_tpa = false; 1038 u32 i; 1039 1040 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1041 p5_tpa = true; 1042 1043 for (i = 0; i < agg_bufs; i++) { 1044 u16 cons; 1045 struct rx_agg_cmp *agg; 1046 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 1047 struct rx_bd *prod_bd; 1048 struct page *page; 1049 1050 if (p5_tpa) 1051 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 1052 else 1053 agg = bnxt_get_agg(bp, cpr, idx, start + i); 1054 cons = agg->rx_agg_cmp_opaque; 1055 __clear_bit(cons, rxr->rx_agg_bmap); 1056 1057 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1058 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1059 1060 __set_bit(sw_prod, rxr->rx_agg_bmap); 1061 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 1062 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1063 1064 /* It is possible for sw_prod to be equal to cons, so 1065 * set cons_rx_buf->page to NULL first. 1066 */ 1067 page = cons_rx_buf->page; 1068 cons_rx_buf->page = NULL; 1069 prod_rx_buf->page = page; 1070 prod_rx_buf->offset = cons_rx_buf->offset; 1071 1072 prod_rx_buf->mapping = cons_rx_buf->mapping; 1073 1074 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1075 1076 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 1077 prod_bd->rx_bd_opaque = sw_prod; 1078 1079 prod = NEXT_RX_AGG(prod); 1080 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1081 } 1082 rxr->rx_agg_prod = prod; 1083 rxr->rx_sw_agg_prod = sw_prod; 1084 } 1085 1086 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 1087 struct bnxt_rx_ring_info *rxr, 1088 u16 cons, void *data, u8 *data_ptr, 1089 dma_addr_t dma_addr, 1090 unsigned int offset_and_len) 1091 { 1092 unsigned int len = offset_and_len & 0xffff; 1093 struct page *page = data; 1094 u16 prod = rxr->rx_prod; 1095 struct sk_buff *skb; 1096 int err; 1097 1098 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1099 if (unlikely(err)) { 1100 bnxt_reuse_rx_data(rxr, cons, data); 1101 return NULL; 1102 } 1103 dma_addr -= bp->rx_dma_offset; 1104 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1105 bp->rx_dir); 1106 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1107 if (!skb) { 1108 page_pool_recycle_direct(rxr->page_pool, page); 1109 return NULL; 1110 } 1111 skb_mark_for_recycle(skb); 1112 skb_reserve(skb, bp->rx_offset); 1113 __skb_put(skb, len); 1114 1115 return skb; 1116 } 1117 1118 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1119 struct bnxt_rx_ring_info *rxr, 1120 u16 cons, void *data, u8 *data_ptr, 1121 dma_addr_t dma_addr, 1122 unsigned int offset_and_len) 1123 { 1124 unsigned int payload = offset_and_len >> 16; 1125 unsigned int len = offset_and_len & 0xffff; 1126 skb_frag_t *frag; 1127 struct page *page = data; 1128 u16 prod = rxr->rx_prod; 1129 struct sk_buff *skb; 1130 int off, err; 1131 1132 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1133 if (unlikely(err)) { 1134 bnxt_reuse_rx_data(rxr, cons, data); 1135 return NULL; 1136 } 1137 dma_addr -= bp->rx_dma_offset; 1138 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1139 bp->rx_dir); 1140 1141 if (unlikely(!payload)) 1142 payload = eth_get_headlen(bp->dev, data_ptr, len); 1143 1144 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1145 if (!skb) { 1146 page_pool_recycle_direct(rxr->page_pool, page); 1147 return NULL; 1148 } 1149 1150 skb_mark_for_recycle(skb); 1151 off = (void *)data_ptr - page_address(page); 1152 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1153 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1154 payload + NET_IP_ALIGN); 1155 1156 frag = &skb_shinfo(skb)->frags[0]; 1157 skb_frag_size_sub(frag, payload); 1158 skb_frag_off_add(frag, payload); 1159 skb->data_len -= payload; 1160 skb->tail += payload; 1161 1162 return skb; 1163 } 1164 1165 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1166 struct bnxt_rx_ring_info *rxr, u16 cons, 1167 void *data, u8 *data_ptr, 1168 dma_addr_t dma_addr, 1169 unsigned int offset_and_len) 1170 { 1171 u16 prod = rxr->rx_prod; 1172 struct sk_buff *skb; 1173 int err; 1174 1175 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1176 if (unlikely(err)) { 1177 bnxt_reuse_rx_data(rxr, cons, data); 1178 return NULL; 1179 } 1180 1181 skb = napi_build_skb(data, bp->rx_buf_size); 1182 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1183 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 1184 if (!skb) { 1185 skb_free_frag(data); 1186 return NULL; 1187 } 1188 1189 skb_reserve(skb, bp->rx_offset); 1190 skb_put(skb, offset_and_len & 0xffff); 1191 return skb; 1192 } 1193 1194 static u32 __bnxt_rx_agg_pages(struct bnxt *bp, 1195 struct bnxt_cp_ring_info *cpr, 1196 struct skb_shared_info *shinfo, 1197 u16 idx, u32 agg_bufs, bool tpa, 1198 struct xdp_buff *xdp) 1199 { 1200 struct bnxt_napi *bnapi = cpr->bnapi; 1201 struct pci_dev *pdev = bp->pdev; 1202 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1203 u16 prod = rxr->rx_agg_prod; 1204 u32 i, total_frag_len = 0; 1205 bool p5_tpa = false; 1206 1207 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1208 p5_tpa = true; 1209 1210 for (i = 0; i < agg_bufs; i++) { 1211 skb_frag_t *frag = &shinfo->frags[i]; 1212 u16 cons, frag_len; 1213 struct rx_agg_cmp *agg; 1214 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1215 struct page *page; 1216 dma_addr_t mapping; 1217 1218 if (p5_tpa) 1219 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1220 else 1221 agg = bnxt_get_agg(bp, cpr, idx, i); 1222 cons = agg->rx_agg_cmp_opaque; 1223 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1224 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1225 1226 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1227 skb_frag_fill_page_desc(frag, cons_rx_buf->page, 1228 cons_rx_buf->offset, frag_len); 1229 shinfo->nr_frags = i + 1; 1230 __clear_bit(cons, rxr->rx_agg_bmap); 1231 1232 /* It is possible for bnxt_alloc_rx_page() to allocate 1233 * a sw_prod index that equals the cons index, so we 1234 * need to clear the cons entry now. 1235 */ 1236 mapping = cons_rx_buf->mapping; 1237 page = cons_rx_buf->page; 1238 cons_rx_buf->page = NULL; 1239 1240 if (xdp && page_is_pfmemalloc(page)) 1241 xdp_buff_set_frag_pfmemalloc(xdp); 1242 1243 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1244 --shinfo->nr_frags; 1245 cons_rx_buf->page = page; 1246 1247 /* Update prod since possibly some pages have been 1248 * allocated already. 1249 */ 1250 rxr->rx_agg_prod = prod; 1251 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1252 return 0; 1253 } 1254 1255 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1256 bp->rx_dir); 1257 1258 total_frag_len += frag_len; 1259 prod = NEXT_RX_AGG(prod); 1260 } 1261 rxr->rx_agg_prod = prod; 1262 return total_frag_len; 1263 } 1264 1265 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, 1266 struct bnxt_cp_ring_info *cpr, 1267 struct sk_buff *skb, u16 idx, 1268 u32 agg_bufs, bool tpa) 1269 { 1270 struct skb_shared_info *shinfo = skb_shinfo(skb); 1271 u32 total_frag_len = 0; 1272 1273 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, 1274 agg_bufs, tpa, NULL); 1275 if (!total_frag_len) { 1276 skb_mark_for_recycle(skb); 1277 dev_kfree_skb(skb); 1278 return NULL; 1279 } 1280 1281 skb->data_len += total_frag_len; 1282 skb->len += total_frag_len; 1283 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; 1284 return skb; 1285 } 1286 1287 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, 1288 struct bnxt_cp_ring_info *cpr, 1289 struct xdp_buff *xdp, u16 idx, 1290 u32 agg_bufs, bool tpa) 1291 { 1292 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1293 u32 total_frag_len = 0; 1294 1295 if (!xdp_buff_has_frags(xdp)) 1296 shinfo->nr_frags = 0; 1297 1298 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, 1299 idx, agg_bufs, tpa, xdp); 1300 if (total_frag_len) { 1301 xdp_buff_set_frags_flag(xdp); 1302 shinfo->nr_frags = agg_bufs; 1303 shinfo->xdp_frags_size = total_frag_len; 1304 } 1305 return total_frag_len; 1306 } 1307 1308 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1309 u8 agg_bufs, u32 *raw_cons) 1310 { 1311 u16 last; 1312 struct rx_agg_cmp *agg; 1313 1314 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1315 last = RING_CMP(*raw_cons); 1316 agg = (struct rx_agg_cmp *) 1317 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1318 return RX_AGG_CMP_VALID(agg, *raw_cons); 1319 } 1320 1321 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, 1322 unsigned int len, 1323 dma_addr_t mapping) 1324 { 1325 struct bnxt *bp = bnapi->bp; 1326 struct pci_dev *pdev = bp->pdev; 1327 struct sk_buff *skb; 1328 1329 skb = napi_alloc_skb(&bnapi->napi, len); 1330 if (!skb) 1331 return NULL; 1332 1333 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1334 bp->rx_dir); 1335 1336 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1337 len + NET_IP_ALIGN); 1338 1339 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1340 bp->rx_dir); 1341 1342 skb_put(skb, len); 1343 1344 return skb; 1345 } 1346 1347 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1348 unsigned int len, 1349 dma_addr_t mapping) 1350 { 1351 return bnxt_copy_data(bnapi, data, len, mapping); 1352 } 1353 1354 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, 1355 struct xdp_buff *xdp, 1356 unsigned int len, 1357 dma_addr_t mapping) 1358 { 1359 unsigned int metasize = 0; 1360 u8 *data = xdp->data; 1361 struct sk_buff *skb; 1362 1363 len = xdp->data_end - xdp->data_meta; 1364 metasize = xdp->data - xdp->data_meta; 1365 data = xdp->data_meta; 1366 1367 skb = bnxt_copy_data(bnapi, data, len, mapping); 1368 if (!skb) 1369 return skb; 1370 1371 if (metasize) { 1372 skb_metadata_set(skb, metasize); 1373 __skb_pull(skb, metasize); 1374 } 1375 1376 return skb; 1377 } 1378 1379 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1380 u32 *raw_cons, void *cmp) 1381 { 1382 struct rx_cmp *rxcmp = cmp; 1383 u32 tmp_raw_cons = *raw_cons; 1384 u8 cmp_type, agg_bufs = 0; 1385 1386 cmp_type = RX_CMP_TYPE(rxcmp); 1387 1388 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1389 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1390 RX_CMP_AGG_BUFS) >> 1391 RX_CMP_AGG_BUFS_SHIFT; 1392 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1393 struct rx_tpa_end_cmp *tpa_end = cmp; 1394 1395 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1396 return 0; 1397 1398 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1399 } 1400 1401 if (agg_bufs) { 1402 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1403 return -EBUSY; 1404 } 1405 *raw_cons = tmp_raw_cons; 1406 return 0; 1407 } 1408 1409 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1410 { 1411 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1412 u16 idx = agg_id & MAX_TPA_P5_MASK; 1413 1414 if (test_bit(idx, map->agg_idx_bmap)) 1415 idx = find_first_zero_bit(map->agg_idx_bmap, 1416 BNXT_AGG_IDX_BMAP_SIZE); 1417 __set_bit(idx, map->agg_idx_bmap); 1418 map->agg_id_tbl[agg_id] = idx; 1419 return idx; 1420 } 1421 1422 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1423 { 1424 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1425 1426 __clear_bit(idx, map->agg_idx_bmap); 1427 } 1428 1429 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1430 { 1431 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1432 1433 return map->agg_id_tbl[agg_id]; 1434 } 1435 1436 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, 1437 struct rx_tpa_start_cmp *tpa_start, 1438 struct rx_tpa_start_cmp_ext *tpa_start1) 1439 { 1440 tpa_info->cfa_code_valid = 1; 1441 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1442 tpa_info->vlan_valid = 0; 1443 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1444 tpa_info->vlan_valid = 1; 1445 tpa_info->metadata = 1446 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1447 } 1448 } 1449 1450 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, 1451 struct rx_tpa_start_cmp *tpa_start, 1452 struct rx_tpa_start_cmp_ext *tpa_start1) 1453 { 1454 tpa_info->vlan_valid = 0; 1455 if (TPA_START_VLAN_VALID(tpa_start)) { 1456 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 1457 u32 vlan_proto = ETH_P_8021Q; 1458 1459 tpa_info->vlan_valid = 1; 1460 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 1461 vlan_proto = ETH_P_8021AD; 1462 tpa_info->metadata = vlan_proto << 16 | 1463 TPA_START_METADATA0_TCI(tpa_start1); 1464 } 1465 } 1466 1467 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1468 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 1469 struct rx_tpa_start_cmp_ext *tpa_start1) 1470 { 1471 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1472 struct bnxt_tpa_info *tpa_info; 1473 u16 cons, prod, agg_id; 1474 struct rx_bd *prod_bd; 1475 dma_addr_t mapping; 1476 1477 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1478 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1479 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1480 } else { 1481 agg_id = TPA_START_AGG_ID(tpa_start); 1482 } 1483 cons = tpa_start->rx_tpa_start_cmp_opaque; 1484 prod = rxr->rx_prod; 1485 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1486 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1487 tpa_info = &rxr->rx_tpa[agg_id]; 1488 1489 if (unlikely(cons != rxr->rx_next_cons || 1490 TPA_START_ERROR(tpa_start))) { 1491 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1492 cons, rxr->rx_next_cons, 1493 TPA_START_ERROR_CODE(tpa_start1)); 1494 bnxt_sched_reset_rxr(bp, rxr); 1495 return; 1496 } 1497 prod_rx_buf->data = tpa_info->data; 1498 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1499 1500 mapping = tpa_info->mapping; 1501 prod_rx_buf->mapping = mapping; 1502 1503 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1504 1505 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1506 1507 tpa_info->data = cons_rx_buf->data; 1508 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1509 cons_rx_buf->data = NULL; 1510 tpa_info->mapping = cons_rx_buf->mapping; 1511 1512 tpa_info->len = 1513 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1514 RX_TPA_START_CMP_LEN_SHIFT; 1515 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1516 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1517 tpa_info->gso_type = SKB_GSO_TCPV4; 1518 if (TPA_START_IS_IPV6(tpa_start1)) 1519 tpa_info->gso_type = SKB_GSO_TCPV6; 1520 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1521 else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && 1522 TPA_START_HASH_TYPE(tpa_start) == 3) 1523 tpa_info->gso_type = SKB_GSO_TCPV6; 1524 tpa_info->rss_hash = 1525 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1526 } else { 1527 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1528 tpa_info->gso_type = 0; 1529 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1530 } 1531 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1532 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1533 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 1534 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); 1535 else 1536 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 1537 tpa_info->agg_count = 0; 1538 1539 rxr->rx_prod = NEXT_RX(prod); 1540 cons = RING_RX(bp, NEXT_RX(cons)); 1541 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1542 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1543 1544 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1545 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1546 cons_rx_buf->data = NULL; 1547 } 1548 1549 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1550 { 1551 if (agg_bufs) 1552 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1553 } 1554 1555 #ifdef CONFIG_INET 1556 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1557 { 1558 struct udphdr *uh = NULL; 1559 1560 if (ip_proto == htons(ETH_P_IP)) { 1561 struct iphdr *iph = (struct iphdr *)skb->data; 1562 1563 if (iph->protocol == IPPROTO_UDP) 1564 uh = (struct udphdr *)(iph + 1); 1565 } else { 1566 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1567 1568 if (iph->nexthdr == IPPROTO_UDP) 1569 uh = (struct udphdr *)(iph + 1); 1570 } 1571 if (uh) { 1572 if (uh->check) 1573 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1574 else 1575 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1576 } 1577 } 1578 #endif 1579 1580 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1581 int payload_off, int tcp_ts, 1582 struct sk_buff *skb) 1583 { 1584 #ifdef CONFIG_INET 1585 struct tcphdr *th; 1586 int len, nw_off; 1587 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1588 u32 hdr_info = tpa_info->hdr_info; 1589 bool loopback = false; 1590 1591 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1592 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1593 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1594 1595 /* If the packet is an internal loopback packet, the offsets will 1596 * have an extra 4 bytes. 1597 */ 1598 if (inner_mac_off == 4) { 1599 loopback = true; 1600 } else if (inner_mac_off > 4) { 1601 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1602 ETH_HLEN - 2)); 1603 1604 /* We only support inner iPv4/ipv6. If we don't see the 1605 * correct protocol ID, it must be a loopback packet where 1606 * the offsets are off by 4. 1607 */ 1608 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1609 loopback = true; 1610 } 1611 if (loopback) { 1612 /* internal loopback packet, subtract all offsets by 4 */ 1613 inner_ip_off -= 4; 1614 inner_mac_off -= 4; 1615 outer_ip_off -= 4; 1616 } 1617 1618 nw_off = inner_ip_off - ETH_HLEN; 1619 skb_set_network_header(skb, nw_off); 1620 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1621 struct ipv6hdr *iph = ipv6_hdr(skb); 1622 1623 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1624 len = skb->len - skb_transport_offset(skb); 1625 th = tcp_hdr(skb); 1626 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1627 } else { 1628 struct iphdr *iph = ip_hdr(skb); 1629 1630 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1631 len = skb->len - skb_transport_offset(skb); 1632 th = tcp_hdr(skb); 1633 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1634 } 1635 1636 if (inner_mac_off) { /* tunnel */ 1637 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1638 ETH_HLEN - 2)); 1639 1640 bnxt_gro_tunnel(skb, proto); 1641 } 1642 #endif 1643 return skb; 1644 } 1645 1646 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1647 int payload_off, int tcp_ts, 1648 struct sk_buff *skb) 1649 { 1650 #ifdef CONFIG_INET 1651 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1652 u32 hdr_info = tpa_info->hdr_info; 1653 int iphdr_len, nw_off; 1654 1655 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1656 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1657 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1658 1659 nw_off = inner_ip_off - ETH_HLEN; 1660 skb_set_network_header(skb, nw_off); 1661 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1662 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1663 skb_set_transport_header(skb, nw_off + iphdr_len); 1664 1665 if (inner_mac_off) { /* tunnel */ 1666 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1667 ETH_HLEN - 2)); 1668 1669 bnxt_gro_tunnel(skb, proto); 1670 } 1671 #endif 1672 return skb; 1673 } 1674 1675 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1676 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1677 1678 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1679 int payload_off, int tcp_ts, 1680 struct sk_buff *skb) 1681 { 1682 #ifdef CONFIG_INET 1683 struct tcphdr *th; 1684 int len, nw_off, tcp_opt_len = 0; 1685 1686 if (tcp_ts) 1687 tcp_opt_len = 12; 1688 1689 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1690 struct iphdr *iph; 1691 1692 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1693 ETH_HLEN; 1694 skb_set_network_header(skb, nw_off); 1695 iph = ip_hdr(skb); 1696 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1697 len = skb->len - skb_transport_offset(skb); 1698 th = tcp_hdr(skb); 1699 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1700 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1701 struct ipv6hdr *iph; 1702 1703 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1704 ETH_HLEN; 1705 skb_set_network_header(skb, nw_off); 1706 iph = ipv6_hdr(skb); 1707 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1708 len = skb->len - skb_transport_offset(skb); 1709 th = tcp_hdr(skb); 1710 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1711 } else { 1712 dev_kfree_skb_any(skb); 1713 return NULL; 1714 } 1715 1716 if (nw_off) /* tunnel */ 1717 bnxt_gro_tunnel(skb, skb->protocol); 1718 #endif 1719 return skb; 1720 } 1721 1722 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1723 struct bnxt_tpa_info *tpa_info, 1724 struct rx_tpa_end_cmp *tpa_end, 1725 struct rx_tpa_end_cmp_ext *tpa_end1, 1726 struct sk_buff *skb) 1727 { 1728 #ifdef CONFIG_INET 1729 int payload_off; 1730 u16 segs; 1731 1732 segs = TPA_END_TPA_SEGS(tpa_end); 1733 if (segs == 1) 1734 return skb; 1735 1736 NAPI_GRO_CB(skb)->count = segs; 1737 skb_shinfo(skb)->gso_size = 1738 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1739 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1740 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1741 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1742 else 1743 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1744 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1745 if (likely(skb)) 1746 tcp_gro_complete(skb); 1747 #endif 1748 return skb; 1749 } 1750 1751 /* Given the cfa_code of a received packet determine which 1752 * netdev (vf-rep or PF) the packet is destined to. 1753 */ 1754 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1755 { 1756 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1757 1758 /* if vf-rep dev is NULL, the must belongs to the PF */ 1759 return dev ? dev : bp->dev; 1760 } 1761 1762 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1763 struct bnxt_cp_ring_info *cpr, 1764 u32 *raw_cons, 1765 struct rx_tpa_end_cmp *tpa_end, 1766 struct rx_tpa_end_cmp_ext *tpa_end1, 1767 u8 *event) 1768 { 1769 struct bnxt_napi *bnapi = cpr->bnapi; 1770 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1771 struct net_device *dev = bp->dev; 1772 u8 *data_ptr, agg_bufs; 1773 unsigned int len; 1774 struct bnxt_tpa_info *tpa_info; 1775 dma_addr_t mapping; 1776 struct sk_buff *skb; 1777 u16 idx = 0, agg_id; 1778 void *data; 1779 bool gro; 1780 1781 if (unlikely(bnapi->in_reset)) { 1782 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1783 1784 if (rc < 0) 1785 return ERR_PTR(-EBUSY); 1786 return NULL; 1787 } 1788 1789 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1790 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1791 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1792 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1793 tpa_info = &rxr->rx_tpa[agg_id]; 1794 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1795 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1796 agg_bufs, tpa_info->agg_count); 1797 agg_bufs = tpa_info->agg_count; 1798 } 1799 tpa_info->agg_count = 0; 1800 *event |= BNXT_AGG_EVENT; 1801 bnxt_free_agg_idx(rxr, agg_id); 1802 idx = agg_id; 1803 gro = !!(bp->flags & BNXT_FLAG_GRO); 1804 } else { 1805 agg_id = TPA_END_AGG_ID(tpa_end); 1806 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1807 tpa_info = &rxr->rx_tpa[agg_id]; 1808 idx = RING_CMP(*raw_cons); 1809 if (agg_bufs) { 1810 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1811 return ERR_PTR(-EBUSY); 1812 1813 *event |= BNXT_AGG_EVENT; 1814 idx = NEXT_CMP(idx); 1815 } 1816 gro = !!TPA_END_GRO(tpa_end); 1817 } 1818 data = tpa_info->data; 1819 data_ptr = tpa_info->data_ptr; 1820 prefetch(data_ptr); 1821 len = tpa_info->len; 1822 mapping = tpa_info->mapping; 1823 1824 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1825 bnxt_abort_tpa(cpr, idx, agg_bufs); 1826 if (agg_bufs > MAX_SKB_FRAGS) 1827 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1828 agg_bufs, (int)MAX_SKB_FRAGS); 1829 return NULL; 1830 } 1831 1832 if (len <= bp->rx_copy_thresh) { 1833 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1834 if (!skb) { 1835 bnxt_abort_tpa(cpr, idx, agg_bufs); 1836 cpr->sw_stats->rx.rx_oom_discards += 1; 1837 return NULL; 1838 } 1839 } else { 1840 u8 *new_data; 1841 dma_addr_t new_mapping; 1842 1843 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); 1844 if (!new_data) { 1845 bnxt_abort_tpa(cpr, idx, agg_bufs); 1846 cpr->sw_stats->rx.rx_oom_discards += 1; 1847 return NULL; 1848 } 1849 1850 tpa_info->data = new_data; 1851 tpa_info->data_ptr = new_data + bp->rx_offset; 1852 tpa_info->mapping = new_mapping; 1853 1854 skb = napi_build_skb(data, bp->rx_buf_size); 1855 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1856 bp->rx_buf_use_size, bp->rx_dir, 1857 DMA_ATTR_WEAK_ORDERING); 1858 1859 if (!skb) { 1860 skb_free_frag(data); 1861 bnxt_abort_tpa(cpr, idx, agg_bufs); 1862 cpr->sw_stats->rx.rx_oom_discards += 1; 1863 return NULL; 1864 } 1865 skb_reserve(skb, bp->rx_offset); 1866 skb_put(skb, len); 1867 } 1868 1869 if (agg_bufs) { 1870 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); 1871 if (!skb) { 1872 /* Page reuse already handled by bnxt_rx_pages(). */ 1873 cpr->sw_stats->rx.rx_oom_discards += 1; 1874 return NULL; 1875 } 1876 } 1877 1878 if (tpa_info->cfa_code_valid) 1879 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); 1880 skb->protocol = eth_type_trans(skb, dev); 1881 1882 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1883 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1884 1885 if (tpa_info->vlan_valid && 1886 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1887 __be16 vlan_proto = htons(tpa_info->metadata >> 1888 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1889 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1890 1891 if (eth_type_vlan(vlan_proto)) { 1892 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1893 } else { 1894 dev_kfree_skb(skb); 1895 return NULL; 1896 } 1897 } 1898 1899 skb_checksum_none_assert(skb); 1900 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1901 skb->ip_summed = CHECKSUM_UNNECESSARY; 1902 skb->csum_level = 1903 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1904 } 1905 1906 if (gro) 1907 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1908 1909 return skb; 1910 } 1911 1912 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1913 struct rx_agg_cmp *rx_agg) 1914 { 1915 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1916 struct bnxt_tpa_info *tpa_info; 1917 1918 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1919 tpa_info = &rxr->rx_tpa[agg_id]; 1920 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1921 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1922 } 1923 1924 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1925 struct sk_buff *skb) 1926 { 1927 skb_mark_for_recycle(skb); 1928 1929 if (skb->dev != bp->dev) { 1930 /* this packet belongs to a vf-rep */ 1931 bnxt_vf_rep_rx(bp, skb); 1932 return; 1933 } 1934 skb_record_rx_queue(skb, bnapi->index); 1935 napi_gro_receive(&bnapi->napi, skb); 1936 } 1937 1938 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, 1939 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) 1940 { 1941 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 1942 1943 if (BNXT_PTP_RX_TS_VALID(flags)) 1944 goto ts_valid; 1945 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) 1946 return false; 1947 1948 ts_valid: 1949 *cmpl_ts = ts; 1950 return true; 1951 } 1952 1953 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, 1954 struct rx_cmp *rxcmp, 1955 struct rx_cmp_ext *rxcmp1) 1956 { 1957 __be16 vlan_proto; 1958 u16 vtag; 1959 1960 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1961 __le32 flags2 = rxcmp1->rx_cmp_flags2; 1962 u32 meta_data; 1963 1964 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 1965 return skb; 1966 1967 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1968 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1969 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 1970 if (eth_type_vlan(vlan_proto)) 1971 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1972 else 1973 goto vlan_err; 1974 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 1975 if (RX_CMP_VLAN_VALID(rxcmp)) { 1976 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 1977 1978 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 1979 vlan_proto = htons(ETH_P_8021Q); 1980 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 1981 vlan_proto = htons(ETH_P_8021AD); 1982 else 1983 goto vlan_err; 1984 vtag = RX_CMP_METADATA0_TCI(rxcmp1); 1985 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1986 } 1987 } 1988 return skb; 1989 vlan_err: 1990 dev_kfree_skb(skb); 1991 return NULL; 1992 } 1993 1994 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, 1995 struct rx_cmp *rxcmp) 1996 { 1997 u8 ext_op; 1998 1999 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); 2000 switch (ext_op) { 2001 case EXT_OP_INNER_4: 2002 case EXT_OP_OUTER_4: 2003 case EXT_OP_INNFL_3: 2004 case EXT_OP_OUTFL_3: 2005 return PKT_HASH_TYPE_L4; 2006 default: 2007 return PKT_HASH_TYPE_L3; 2008 } 2009 } 2010 2011 /* returns the following: 2012 * 1 - 1 packet successfully received 2013 * 0 - successful TPA_START, packet not completed yet 2014 * -EBUSY - completion ring does not have all the agg buffers yet 2015 * -ENOMEM - packet aborted due to out of memory 2016 * -EIO - packet aborted due to hw error indicated in BD 2017 */ 2018 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2019 u32 *raw_cons, u8 *event) 2020 { 2021 struct bnxt_napi *bnapi = cpr->bnapi; 2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2023 struct net_device *dev = bp->dev; 2024 struct rx_cmp *rxcmp; 2025 struct rx_cmp_ext *rxcmp1; 2026 u32 tmp_raw_cons = *raw_cons; 2027 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 2028 struct bnxt_sw_rx_bd *rx_buf; 2029 unsigned int len; 2030 u8 *data_ptr, agg_bufs, cmp_type; 2031 bool xdp_active = false; 2032 dma_addr_t dma_addr; 2033 struct sk_buff *skb; 2034 struct xdp_buff xdp; 2035 u32 flags, misc; 2036 u32 cmpl_ts; 2037 void *data; 2038 int rc = 0; 2039 2040 rxcmp = (struct rx_cmp *) 2041 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2042 2043 cmp_type = RX_CMP_TYPE(rxcmp); 2044 2045 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 2046 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 2047 goto next_rx_no_prod_no_len; 2048 } 2049 2050 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2051 cp_cons = RING_CMP(tmp_raw_cons); 2052 rxcmp1 = (struct rx_cmp_ext *) 2053 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2054 2055 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2056 return -EBUSY; 2057 2058 /* The valid test of the entry must be done first before 2059 * reading any further. 2060 */ 2061 dma_rmb(); 2062 prod = rxr->rx_prod; 2063 2064 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 2065 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2066 bnxt_tpa_start(bp, rxr, cmp_type, 2067 (struct rx_tpa_start_cmp *)rxcmp, 2068 (struct rx_tpa_start_cmp_ext *)rxcmp1); 2069 2070 *event |= BNXT_RX_EVENT; 2071 goto next_rx_no_prod_no_len; 2072 2073 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2074 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 2075 (struct rx_tpa_end_cmp *)rxcmp, 2076 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 2077 2078 if (IS_ERR(skb)) 2079 return -EBUSY; 2080 2081 rc = -ENOMEM; 2082 if (likely(skb)) { 2083 bnxt_deliver_skb(bp, bnapi, skb); 2084 rc = 1; 2085 } 2086 *event |= BNXT_RX_EVENT; 2087 goto next_rx_no_prod_no_len; 2088 } 2089 2090 cons = rxcmp->rx_cmp_opaque; 2091 if (unlikely(cons != rxr->rx_next_cons)) { 2092 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 2093 2094 /* 0xffff is forced error, don't print it */ 2095 if (rxr->rx_next_cons != 0xffff) 2096 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 2097 cons, rxr->rx_next_cons); 2098 bnxt_sched_reset_rxr(bp, rxr); 2099 if (rc1) 2100 return rc1; 2101 goto next_rx_no_prod_no_len; 2102 } 2103 rx_buf = &rxr->rx_buf_ring[cons]; 2104 data = rx_buf->data; 2105 data_ptr = rx_buf->data_ptr; 2106 prefetch(data_ptr); 2107 2108 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 2109 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 2110 2111 if (agg_bufs) { 2112 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 2113 return -EBUSY; 2114 2115 cp_cons = NEXT_CMP(cp_cons); 2116 *event |= BNXT_AGG_EVENT; 2117 } 2118 *event |= BNXT_RX_EVENT; 2119 2120 rx_buf->data = NULL; 2121 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 2122 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 2123 2124 bnxt_reuse_rx_data(rxr, cons, data); 2125 if (agg_bufs) 2126 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 2127 false); 2128 2129 rc = -EIO; 2130 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 2131 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; 2132 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 2133 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 2134 netdev_warn_once(bp->dev, "RX buffer error %x\n", 2135 rx_err); 2136 bnxt_sched_reset_rxr(bp, rxr); 2137 } 2138 } 2139 goto next_rx_no_len; 2140 } 2141 2142 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 2143 len = flags >> RX_CMP_LEN_SHIFT; 2144 dma_addr = rx_buf->mapping; 2145 2146 if (bnxt_xdp_attached(bp, rxr)) { 2147 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 2148 if (agg_bufs) { 2149 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, 2150 cp_cons, agg_bufs, 2151 false); 2152 if (!frag_len) 2153 goto oom_next_rx; 2154 } 2155 xdp_active = true; 2156 } 2157 2158 if (xdp_active) { 2159 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { 2160 rc = 1; 2161 goto next_rx; 2162 } 2163 } 2164 2165 if (len <= bp->rx_copy_thresh) { 2166 if (!xdp_active) 2167 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 2168 else 2169 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); 2170 bnxt_reuse_rx_data(rxr, cons, data); 2171 if (!skb) { 2172 if (agg_bufs) { 2173 if (!xdp_active) 2174 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 2175 agg_bufs, false); 2176 else 2177 bnxt_xdp_buff_frags_free(rxr, &xdp); 2178 } 2179 goto oom_next_rx; 2180 } 2181 } else { 2182 u32 payload; 2183 2184 if (rx_buf->data_ptr == data_ptr) 2185 payload = misc & RX_CMP_PAYLOAD_OFFSET; 2186 else 2187 payload = 0; 2188 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 2189 payload | len); 2190 if (!skb) 2191 goto oom_next_rx; 2192 } 2193 2194 if (agg_bufs) { 2195 if (!xdp_active) { 2196 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); 2197 if (!skb) 2198 goto oom_next_rx; 2199 } else { 2200 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); 2201 if (!skb) { 2202 /* we should be able to free the old skb here */ 2203 bnxt_xdp_buff_frags_free(rxr, &xdp); 2204 goto oom_next_rx; 2205 } 2206 } 2207 } 2208 2209 if (RX_CMP_HASH_VALID(rxcmp)) { 2210 enum pkt_hash_types type; 2211 2212 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2213 type = bnxt_rss_ext_op(bp, rxcmp); 2214 } else { 2215 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 2216 2217 /* RSS profiles 1 and 3 with extract code 0 for inner 2218 * 4-tuple 2219 */ 2220 if (hash_type != 1 && hash_type != 3) 2221 type = PKT_HASH_TYPE_L3; 2222 else 2223 type = PKT_HASH_TYPE_L4; 2224 } 2225 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 2226 } 2227 2228 if (cmp_type == CMP_TYPE_RX_L2_CMP) 2229 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); 2230 skb->protocol = eth_type_trans(skb, dev); 2231 2232 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { 2233 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 2234 if (!skb) 2235 goto next_rx; 2236 } 2237 2238 skb_checksum_none_assert(skb); 2239 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2240 if (dev->features & NETIF_F_RXCSUM) { 2241 skb->ip_summed = CHECKSUM_UNNECESSARY; 2242 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2243 } 2244 } else { 2245 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2246 if (dev->features & NETIF_F_RXCSUM) 2247 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; 2248 } 2249 } 2250 2251 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { 2252 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2253 u64 ns, ts; 2254 2255 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2256 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2257 unsigned long flags; 2258 2259 spin_lock_irqsave(&ptp->ptp_lock, flags); 2260 ns = timecounter_cyc2time(&ptp->tc, ts); 2261 spin_unlock_irqrestore(&ptp->ptp_lock, flags); 2262 memset(skb_hwtstamps(skb), 0, 2263 sizeof(*skb_hwtstamps(skb))); 2264 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2265 } 2266 } 2267 } 2268 bnxt_deliver_skb(bp, bnapi, skb); 2269 rc = 1; 2270 2271 next_rx: 2272 cpr->rx_packets += 1; 2273 cpr->rx_bytes += len; 2274 2275 next_rx_no_len: 2276 rxr->rx_prod = NEXT_RX(prod); 2277 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2278 2279 next_rx_no_prod_no_len: 2280 *raw_cons = tmp_raw_cons; 2281 2282 return rc; 2283 2284 oom_next_rx: 2285 cpr->sw_stats->rx.rx_oom_discards += 1; 2286 rc = -ENOMEM; 2287 goto next_rx; 2288 } 2289 2290 /* In netpoll mode, if we are using a combined completion ring, we need to 2291 * discard the rx packets and recycle the buffers. 2292 */ 2293 static int bnxt_force_rx_discard(struct bnxt *bp, 2294 struct bnxt_cp_ring_info *cpr, 2295 u32 *raw_cons, u8 *event) 2296 { 2297 u32 tmp_raw_cons = *raw_cons; 2298 struct rx_cmp_ext *rxcmp1; 2299 struct rx_cmp *rxcmp; 2300 u16 cp_cons; 2301 u8 cmp_type; 2302 int rc; 2303 2304 cp_cons = RING_CMP(tmp_raw_cons); 2305 rxcmp = (struct rx_cmp *) 2306 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2307 2308 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2309 cp_cons = RING_CMP(tmp_raw_cons); 2310 rxcmp1 = (struct rx_cmp_ext *) 2311 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2312 2313 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2314 return -EBUSY; 2315 2316 /* The valid test of the entry must be done first before 2317 * reading any further. 2318 */ 2319 dma_rmb(); 2320 cmp_type = RX_CMP_TYPE(rxcmp); 2321 if (cmp_type == CMP_TYPE_RX_L2_CMP || 2322 cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2323 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2324 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2325 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2326 struct rx_tpa_end_cmp_ext *tpa_end1; 2327 2328 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2329 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2330 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2331 } 2332 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2333 if (rc && rc != -EBUSY) 2334 cpr->sw_stats->rx.rx_netpoll_discards += 1; 2335 return rc; 2336 } 2337 2338 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2339 { 2340 struct bnxt_fw_health *fw_health = bp->fw_health; 2341 u32 reg = fw_health->regs[reg_idx]; 2342 u32 reg_type, reg_off, val = 0; 2343 2344 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2345 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2346 switch (reg_type) { 2347 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2348 pci_read_config_dword(bp->pdev, reg_off, &val); 2349 break; 2350 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2351 reg_off = fw_health->mapped_regs[reg_idx]; 2352 fallthrough; 2353 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2354 val = readl(bp->bar0 + reg_off); 2355 break; 2356 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2357 val = readl(bp->bar1 + reg_off); 2358 break; 2359 } 2360 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2361 val &= fw_health->fw_reset_inprog_reg_mask; 2362 return val; 2363 } 2364 2365 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2366 { 2367 int i; 2368 2369 for (i = 0; i < bp->rx_nr_rings; i++) { 2370 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2371 struct bnxt_ring_grp_info *grp_info; 2372 2373 grp_info = &bp->grp_info[grp_idx]; 2374 if (grp_info->agg_fw_ring_id == ring_id) 2375 return grp_idx; 2376 } 2377 return INVALID_HW_RING_ID; 2378 } 2379 2380 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2381 { 2382 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2383 2384 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 2385 return link_info->force_link_speed2; 2386 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2387 return link_info->force_pam4_link_speed; 2388 return link_info->force_link_speed; 2389 } 2390 2391 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2392 { 2393 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2394 2395 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2396 link_info->req_link_speed = link_info->force_link_speed2; 2397 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2398 switch (link_info->req_link_speed) { 2399 case BNXT_LINK_SPEED_50GB_PAM4: 2400 case BNXT_LINK_SPEED_100GB_PAM4: 2401 case BNXT_LINK_SPEED_200GB_PAM4: 2402 case BNXT_LINK_SPEED_400GB_PAM4: 2403 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2404 break; 2405 case BNXT_LINK_SPEED_100GB_PAM4_112: 2406 case BNXT_LINK_SPEED_200GB_PAM4_112: 2407 case BNXT_LINK_SPEED_400GB_PAM4_112: 2408 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; 2409 break; 2410 default: 2411 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2412 } 2413 return; 2414 } 2415 link_info->req_link_speed = link_info->force_link_speed; 2416 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2417 if (link_info->force_pam4_link_speed) { 2418 link_info->req_link_speed = link_info->force_pam4_link_speed; 2419 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2420 } 2421 } 2422 2423 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2424 { 2425 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2426 2427 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2428 link_info->advertising = link_info->auto_link_speeds2; 2429 return; 2430 } 2431 link_info->advertising = link_info->auto_link_speeds; 2432 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2433 } 2434 2435 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2436 { 2437 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2438 2439 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2440 if (link_info->req_link_speed != link_info->force_link_speed2) 2441 return true; 2442 return false; 2443 } 2444 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2445 link_info->req_link_speed != link_info->force_link_speed) 2446 return true; 2447 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2448 link_info->req_link_speed != link_info->force_pam4_link_speed) 2449 return true; 2450 return false; 2451 } 2452 2453 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2454 { 2455 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2456 2457 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2458 if (link_info->advertising != link_info->auto_link_speeds2) 2459 return true; 2460 return false; 2461 } 2462 if (link_info->advertising != link_info->auto_link_speeds || 2463 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2464 return true; 2465 return false; 2466 } 2467 2468 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2469 ((data2) & \ 2470 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2471 2472 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2473 (((data2) & \ 2474 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2475 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2476 2477 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2478 ((data1) & \ 2479 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2480 2481 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2482 (((data1) & \ 2483 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2484 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2485 2486 /* Return true if the workqueue has to be scheduled */ 2487 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2488 { 2489 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2490 2491 switch (err_type) { 2492 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2493 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2494 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2495 break; 2496 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2497 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2498 break; 2499 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2500 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2501 break; 2502 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2503 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2504 char *threshold_type; 2505 bool notify = false; 2506 char *dir_str; 2507 2508 switch (type) { 2509 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2510 threshold_type = "warning"; 2511 break; 2512 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2513 threshold_type = "critical"; 2514 break; 2515 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2516 threshold_type = "fatal"; 2517 break; 2518 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2519 threshold_type = "shutdown"; 2520 break; 2521 default: 2522 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2523 return false; 2524 } 2525 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2526 dir_str = "above"; 2527 notify = true; 2528 } else { 2529 dir_str = "below"; 2530 } 2531 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2532 dir_str, threshold_type); 2533 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2534 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2535 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2536 if (notify) { 2537 bp->thermal_threshold_type = type; 2538 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2539 return true; 2540 } 2541 return false; 2542 } 2543 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: 2544 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); 2545 break; 2546 default: 2547 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2548 err_type); 2549 break; 2550 } 2551 return false; 2552 } 2553 2554 #define BNXT_GET_EVENT_PORT(data) \ 2555 ((data) & \ 2556 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2557 2558 #define BNXT_EVENT_RING_TYPE(data2) \ 2559 ((data2) & \ 2560 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2561 2562 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2563 (BNXT_EVENT_RING_TYPE(data2) == \ 2564 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2565 2566 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2567 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2568 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2569 2570 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2571 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2572 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2573 2574 #define BNXT_PHC_BITS 48 2575 2576 static int bnxt_async_event_process(struct bnxt *bp, 2577 struct hwrm_async_event_cmpl *cmpl) 2578 { 2579 u16 event_id = le16_to_cpu(cmpl->event_id); 2580 u32 data1 = le32_to_cpu(cmpl->event_data1); 2581 u32 data2 = le32_to_cpu(cmpl->event_data2); 2582 2583 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2584 event_id, data1, data2); 2585 2586 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2587 switch (event_id) { 2588 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2589 struct bnxt_link_info *link_info = &bp->link_info; 2590 2591 if (BNXT_VF(bp)) 2592 goto async_event_process_exit; 2593 2594 /* print unsupported speed warning in forced speed mode only */ 2595 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2596 (data1 & 0x20000)) { 2597 u16 fw_speed = bnxt_get_force_speed(link_info); 2598 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2599 2600 if (speed != SPEED_UNKNOWN) 2601 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2602 speed); 2603 } 2604 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2605 } 2606 fallthrough; 2607 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2608 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2609 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2610 fallthrough; 2611 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2612 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2613 break; 2614 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2615 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2616 break; 2617 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2618 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2619 2620 if (BNXT_VF(bp)) 2621 break; 2622 2623 if (bp->pf.port_id != port_id) 2624 break; 2625 2626 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2627 break; 2628 } 2629 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2630 if (BNXT_PF(bp)) 2631 goto async_event_process_exit; 2632 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2633 break; 2634 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2635 char *type_str = "Solicited"; 2636 2637 if (!bp->fw_health) 2638 goto async_event_process_exit; 2639 2640 bp->fw_reset_timestamp = jiffies; 2641 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2642 if (!bp->fw_reset_min_dsecs) 2643 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2644 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2645 if (!bp->fw_reset_max_dsecs) 2646 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2647 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2648 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2649 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2650 type_str = "Fatal"; 2651 bp->fw_health->fatalities++; 2652 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2653 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2654 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2655 type_str = "Non-fatal"; 2656 bp->fw_health->survivals++; 2657 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2658 } 2659 netif_warn(bp, hw, bp->dev, 2660 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2661 type_str, data1, data2, 2662 bp->fw_reset_min_dsecs * 100, 2663 bp->fw_reset_max_dsecs * 100); 2664 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2665 break; 2666 } 2667 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2668 struct bnxt_fw_health *fw_health = bp->fw_health; 2669 char *status_desc = "healthy"; 2670 u32 status; 2671 2672 if (!fw_health) 2673 goto async_event_process_exit; 2674 2675 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2676 fw_health->enabled = false; 2677 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2678 break; 2679 } 2680 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2681 fw_health->tmr_multiplier = 2682 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2683 bp->current_interval * 10); 2684 fw_health->tmr_counter = fw_health->tmr_multiplier; 2685 if (!fw_health->enabled) 2686 fw_health->last_fw_heartbeat = 2687 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2688 fw_health->last_fw_reset_cnt = 2689 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2690 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2691 if (status != BNXT_FW_STATUS_HEALTHY) 2692 status_desc = "unhealthy"; 2693 netif_info(bp, drv, bp->dev, 2694 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2695 fw_health->primary ? "primary" : "backup", status, 2696 status_desc, fw_health->last_fw_reset_cnt); 2697 if (!fw_health->enabled) { 2698 /* Make sure tmr_counter is set and visible to 2699 * bnxt_health_check() before setting enabled to true. 2700 */ 2701 smp_wmb(); 2702 fw_health->enabled = true; 2703 } 2704 goto async_event_process_exit; 2705 } 2706 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2707 netif_notice(bp, hw, bp->dev, 2708 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2709 data1, data2); 2710 goto async_event_process_exit; 2711 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2712 struct bnxt_rx_ring_info *rxr; 2713 u16 grp_idx; 2714 2715 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2716 goto async_event_process_exit; 2717 2718 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2719 BNXT_EVENT_RING_TYPE(data2), data1); 2720 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2721 goto async_event_process_exit; 2722 2723 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2724 if (grp_idx == INVALID_HW_RING_ID) { 2725 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2726 data1); 2727 goto async_event_process_exit; 2728 } 2729 rxr = bp->bnapi[grp_idx]->rx_ring; 2730 bnxt_sched_reset_rxr(bp, rxr); 2731 goto async_event_process_exit; 2732 } 2733 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2734 struct bnxt_fw_health *fw_health = bp->fw_health; 2735 2736 netif_notice(bp, hw, bp->dev, 2737 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2738 data1, data2); 2739 if (fw_health) { 2740 fw_health->echo_req_data1 = data1; 2741 fw_health->echo_req_data2 = data2; 2742 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2743 break; 2744 } 2745 goto async_event_process_exit; 2746 } 2747 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2748 bnxt_ptp_pps_event(bp, data1, data2); 2749 goto async_event_process_exit; 2750 } 2751 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2752 if (bnxt_event_error_report(bp, data1, data2)) 2753 break; 2754 goto async_event_process_exit; 2755 } 2756 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2757 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2758 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2759 if (BNXT_PTP_USE_RTC(bp)) { 2760 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2761 unsigned long flags; 2762 u64 ns; 2763 2764 if (!ptp) 2765 goto async_event_process_exit; 2766 2767 spin_lock_irqsave(&ptp->ptp_lock, flags); 2768 bnxt_ptp_update_current_time(bp); 2769 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2770 BNXT_PHC_BITS) | ptp->current_time); 2771 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2772 spin_unlock_irqrestore(&ptp->ptp_lock, flags); 2773 } 2774 break; 2775 } 2776 goto async_event_process_exit; 2777 } 2778 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2779 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2780 2781 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2782 goto async_event_process_exit; 2783 } 2784 default: 2785 goto async_event_process_exit; 2786 } 2787 __bnxt_queue_sp_work(bp); 2788 async_event_process_exit: 2789 return 0; 2790 } 2791 2792 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2793 { 2794 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2795 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2796 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2797 (struct hwrm_fwd_req_cmpl *)txcmp; 2798 2799 switch (cmpl_type) { 2800 case CMPL_BASE_TYPE_HWRM_DONE: 2801 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2802 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2803 break; 2804 2805 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2806 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2807 2808 if ((vf_id < bp->pf.first_vf_id) || 2809 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2810 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2811 vf_id); 2812 return -EINVAL; 2813 } 2814 2815 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2816 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2817 break; 2818 2819 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2820 bnxt_async_event_process(bp, 2821 (struct hwrm_async_event_cmpl *)txcmp); 2822 break; 2823 2824 default: 2825 break; 2826 } 2827 2828 return 0; 2829 } 2830 2831 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2832 { 2833 struct bnxt_napi *bnapi = dev_instance; 2834 struct bnxt *bp = bnapi->bp; 2835 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2836 u32 cons = RING_CMP(cpr->cp_raw_cons); 2837 2838 cpr->event_ctr++; 2839 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2840 napi_schedule(&bnapi->napi); 2841 return IRQ_HANDLED; 2842 } 2843 2844 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2845 { 2846 u32 raw_cons = cpr->cp_raw_cons; 2847 u16 cons = RING_CMP(raw_cons); 2848 struct tx_cmp *txcmp; 2849 2850 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2851 2852 return TX_CMP_VALID(txcmp, raw_cons); 2853 } 2854 2855 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2856 int budget) 2857 { 2858 struct bnxt_napi *bnapi = cpr->bnapi; 2859 u32 raw_cons = cpr->cp_raw_cons; 2860 u32 cons; 2861 int rx_pkts = 0; 2862 u8 event = 0; 2863 struct tx_cmp *txcmp; 2864 2865 cpr->has_more_work = 0; 2866 cpr->had_work_done = 1; 2867 while (1) { 2868 u8 cmp_type; 2869 int rc; 2870 2871 cons = RING_CMP(raw_cons); 2872 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2873 2874 if (!TX_CMP_VALID(txcmp, raw_cons)) 2875 break; 2876 2877 /* The valid test of the entry must be done first before 2878 * reading any further. 2879 */ 2880 dma_rmb(); 2881 cmp_type = TX_CMP_TYPE(txcmp); 2882 if (cmp_type == CMP_TYPE_TX_L2_CMP || 2883 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { 2884 u32 opaque = txcmp->tx_cmp_opaque; 2885 struct bnxt_tx_ring_info *txr; 2886 u16 tx_freed; 2887 2888 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 2889 event |= BNXT_TX_CMP_EVENT; 2890 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) 2891 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); 2892 else 2893 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 2894 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 2895 bp->tx_ring_mask; 2896 /* return full budget so NAPI will complete. */ 2897 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 2898 rx_pkts = budget; 2899 raw_cons = NEXT_RAW_CMP(raw_cons); 2900 if (budget) 2901 cpr->has_more_work = 1; 2902 break; 2903 } 2904 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { 2905 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); 2906 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 2907 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2908 if (likely(budget)) 2909 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2910 else 2911 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2912 &event); 2913 if (likely(rc >= 0)) 2914 rx_pkts += rc; 2915 /* Increment rx_pkts when rc is -ENOMEM to count towards 2916 * the NAPI budget. Otherwise, we may potentially loop 2917 * here forever if we consistently cannot allocate 2918 * buffers. 2919 */ 2920 else if (rc == -ENOMEM && budget) 2921 rx_pkts++; 2922 else if (rc == -EBUSY) /* partial completion */ 2923 break; 2924 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 2925 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 2926 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { 2927 bnxt_hwrm_handler(bp, txcmp); 2928 } 2929 raw_cons = NEXT_RAW_CMP(raw_cons); 2930 2931 if (rx_pkts && rx_pkts == budget) { 2932 cpr->has_more_work = 1; 2933 break; 2934 } 2935 } 2936 2937 if (event & BNXT_REDIRECT_EVENT) { 2938 xdp_do_flush(); 2939 event &= ~BNXT_REDIRECT_EVENT; 2940 } 2941 2942 if (event & BNXT_TX_EVENT) { 2943 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 2944 u16 prod = txr->tx_prod; 2945 2946 /* Sync BD data before updating doorbell */ 2947 wmb(); 2948 2949 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2950 event &= ~BNXT_TX_EVENT; 2951 } 2952 2953 cpr->cp_raw_cons = raw_cons; 2954 bnapi->events |= event; 2955 return rx_pkts; 2956 } 2957 2958 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2959 int budget) 2960 { 2961 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 2962 bnapi->tx_int(bp, bnapi, budget); 2963 2964 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 2965 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2966 2967 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2968 bnapi->events &= ~BNXT_RX_EVENT; 2969 } 2970 if (bnapi->events & BNXT_AGG_EVENT) { 2971 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2972 2973 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2974 bnapi->events &= ~BNXT_AGG_EVENT; 2975 } 2976 } 2977 2978 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2979 int budget) 2980 { 2981 struct bnxt_napi *bnapi = cpr->bnapi; 2982 int rx_pkts; 2983 2984 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2985 2986 /* ACK completion ring before freeing tx ring and producing new 2987 * buffers in rx/agg rings to prevent overflowing the completion 2988 * ring. 2989 */ 2990 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2991 2992 __bnxt_poll_work_done(bp, bnapi, budget); 2993 return rx_pkts; 2994 } 2995 2996 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2997 { 2998 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2999 struct bnxt *bp = bnapi->bp; 3000 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3001 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3002 struct tx_cmp *txcmp; 3003 struct rx_cmp_ext *rxcmp1; 3004 u32 cp_cons, tmp_raw_cons; 3005 u32 raw_cons = cpr->cp_raw_cons; 3006 bool flush_xdp = false; 3007 u32 rx_pkts = 0; 3008 u8 event = 0; 3009 3010 while (1) { 3011 int rc; 3012 3013 cp_cons = RING_CMP(raw_cons); 3014 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3015 3016 if (!TX_CMP_VALID(txcmp, raw_cons)) 3017 break; 3018 3019 /* The valid test of the entry must be done first before 3020 * reading any further. 3021 */ 3022 dma_rmb(); 3023 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 3024 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 3025 cp_cons = RING_CMP(tmp_raw_cons); 3026 rxcmp1 = (struct rx_cmp_ext *) 3027 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3028 3029 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 3030 break; 3031 3032 /* force an error to recycle the buffer */ 3033 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 3034 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 3035 3036 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3037 if (likely(rc == -EIO) && budget) 3038 rx_pkts++; 3039 else if (rc == -EBUSY) /* partial completion */ 3040 break; 3041 if (event & BNXT_REDIRECT_EVENT) 3042 flush_xdp = true; 3043 } else if (unlikely(TX_CMP_TYPE(txcmp) == 3044 CMPL_BASE_TYPE_HWRM_DONE)) { 3045 bnxt_hwrm_handler(bp, txcmp); 3046 } else { 3047 netdev_err(bp->dev, 3048 "Invalid completion received on special ring\n"); 3049 } 3050 raw_cons = NEXT_RAW_CMP(raw_cons); 3051 3052 if (rx_pkts == budget) 3053 break; 3054 } 3055 3056 cpr->cp_raw_cons = raw_cons; 3057 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 3058 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3059 3060 if (event & BNXT_AGG_EVENT) 3061 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3062 if (flush_xdp) 3063 xdp_do_flush(); 3064 3065 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 3066 napi_complete_done(napi, rx_pkts); 3067 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3068 } 3069 return rx_pkts; 3070 } 3071 3072 static int bnxt_poll(struct napi_struct *napi, int budget) 3073 { 3074 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3075 struct bnxt *bp = bnapi->bp; 3076 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3077 int work_done = 0; 3078 3079 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3080 napi_complete(napi); 3081 return 0; 3082 } 3083 while (1) { 3084 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 3085 3086 if (work_done >= budget) { 3087 if (!budget) 3088 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3089 break; 3090 } 3091 3092 if (!bnxt_has_work(bp, cpr)) { 3093 if (napi_complete_done(napi, work_done)) 3094 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3095 break; 3096 } 3097 } 3098 if (bp->flags & BNXT_FLAG_DIM) { 3099 struct dim_sample dim_sample = {}; 3100 3101 dim_update_sample(cpr->event_ctr, 3102 cpr->rx_packets, 3103 cpr->rx_bytes, 3104 &dim_sample); 3105 net_dim(&cpr->dim, dim_sample); 3106 } 3107 return work_done; 3108 } 3109 3110 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 3111 { 3112 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3113 int i, work_done = 0; 3114 3115 for (i = 0; i < cpr->cp_ring_count; i++) { 3116 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3117 3118 if (cpr2->had_nqe_notify) { 3119 work_done += __bnxt_poll_work(bp, cpr2, 3120 budget - work_done); 3121 cpr->has_more_work |= cpr2->has_more_work; 3122 } 3123 } 3124 return work_done; 3125 } 3126 3127 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3128 u64 dbr_type, int budget) 3129 { 3130 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3131 int i; 3132 3133 for (i = 0; i < cpr->cp_ring_count; i++) { 3134 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3135 struct bnxt_db_info *db; 3136 3137 if (cpr2->had_work_done) { 3138 u32 tgl = 0; 3139 3140 if (dbr_type == DBR_TYPE_CQ_ARMALL) { 3141 cpr2->had_nqe_notify = 0; 3142 tgl = cpr2->toggle; 3143 } 3144 db = &cpr2->cp_db; 3145 bnxt_writeq(bp, 3146 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 3147 DB_RING_IDX(db, cpr2->cp_raw_cons), 3148 db->doorbell); 3149 cpr2->had_work_done = 0; 3150 } 3151 } 3152 __bnxt_poll_work_done(bp, bnapi, budget); 3153 } 3154 3155 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 3156 { 3157 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3158 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3159 struct bnxt_cp_ring_info *cpr_rx; 3160 u32 raw_cons = cpr->cp_raw_cons; 3161 struct bnxt *bp = bnapi->bp; 3162 struct nqe_cn *nqcmp; 3163 int work_done = 0; 3164 u32 cons; 3165 3166 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3167 napi_complete(napi); 3168 return 0; 3169 } 3170 if (cpr->has_more_work) { 3171 cpr->has_more_work = 0; 3172 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 3173 } 3174 while (1) { 3175 u16 type; 3176 3177 cons = RING_CMP(raw_cons); 3178 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3179 3180 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 3181 if (cpr->has_more_work) 3182 break; 3183 3184 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 3185 budget); 3186 cpr->cp_raw_cons = raw_cons; 3187 if (napi_complete_done(napi, work_done)) 3188 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 3189 cpr->cp_raw_cons); 3190 goto poll_done; 3191 } 3192 3193 /* The valid test of the entry must be done first before 3194 * reading any further. 3195 */ 3196 dma_rmb(); 3197 3198 type = le16_to_cpu(nqcmp->type); 3199 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 3200 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 3201 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 3202 struct bnxt_cp_ring_info *cpr2; 3203 3204 /* No more budget for RX work */ 3205 if (budget && work_done >= budget && 3206 cq_type == BNXT_NQ_HDL_TYPE_RX) 3207 break; 3208 3209 idx = BNXT_NQ_HDL_IDX(idx); 3210 cpr2 = &cpr->cp_ring_arr[idx]; 3211 cpr2->had_nqe_notify = 1; 3212 cpr2->toggle = NQE_CN_TOGGLE(type); 3213 work_done += __bnxt_poll_work(bp, cpr2, 3214 budget - work_done); 3215 cpr->has_more_work |= cpr2->has_more_work; 3216 } else { 3217 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 3218 } 3219 raw_cons = NEXT_RAW_CMP(raw_cons); 3220 } 3221 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 3222 if (raw_cons != cpr->cp_raw_cons) { 3223 cpr->cp_raw_cons = raw_cons; 3224 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 3225 } 3226 poll_done: 3227 cpr_rx = &cpr->cp_ring_arr[0]; 3228 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 3229 (bp->flags & BNXT_FLAG_DIM)) { 3230 struct dim_sample dim_sample = {}; 3231 3232 dim_update_sample(cpr->event_ctr, 3233 cpr_rx->rx_packets, 3234 cpr_rx->rx_bytes, 3235 &dim_sample); 3236 net_dim(&cpr->dim, dim_sample); 3237 } 3238 return work_done; 3239 } 3240 3241 static void bnxt_free_tx_skbs(struct bnxt *bp) 3242 { 3243 int i, max_idx; 3244 struct pci_dev *pdev = bp->pdev; 3245 3246 if (!bp->tx_ring) 3247 return; 3248 3249 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 3250 for (i = 0; i < bp->tx_nr_rings; i++) { 3251 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3252 int j; 3253 3254 if (!txr->tx_buf_ring) 3255 continue; 3256 3257 for (j = 0; j < max_idx;) { 3258 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 3259 struct sk_buff *skb; 3260 int k, last; 3261 3262 if (i < bp->tx_nr_rings_xdp && 3263 tx_buf->action == XDP_REDIRECT) { 3264 dma_unmap_single(&pdev->dev, 3265 dma_unmap_addr(tx_buf, mapping), 3266 dma_unmap_len(tx_buf, len), 3267 DMA_TO_DEVICE); 3268 xdp_return_frame(tx_buf->xdpf); 3269 tx_buf->action = 0; 3270 tx_buf->xdpf = NULL; 3271 j++; 3272 continue; 3273 } 3274 3275 skb = tx_buf->skb; 3276 if (!skb) { 3277 j++; 3278 continue; 3279 } 3280 3281 tx_buf->skb = NULL; 3282 3283 if (tx_buf->is_push) { 3284 dev_kfree_skb(skb); 3285 j += 2; 3286 continue; 3287 } 3288 3289 dma_unmap_single(&pdev->dev, 3290 dma_unmap_addr(tx_buf, mapping), 3291 skb_headlen(skb), 3292 DMA_TO_DEVICE); 3293 3294 last = tx_buf->nr_frags; 3295 j += 2; 3296 for (k = 0; k < last; k++, j++) { 3297 int ring_idx = j & bp->tx_ring_mask; 3298 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 3299 3300 tx_buf = &txr->tx_buf_ring[ring_idx]; 3301 dma_unmap_page( 3302 &pdev->dev, 3303 dma_unmap_addr(tx_buf, mapping), 3304 skb_frag_size(frag), DMA_TO_DEVICE); 3305 } 3306 dev_kfree_skb(skb); 3307 } 3308 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 3309 } 3310 } 3311 3312 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3313 { 3314 struct pci_dev *pdev = bp->pdev; 3315 int i, max_idx; 3316 3317 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3318 3319 for (i = 0; i < max_idx; i++) { 3320 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3321 dma_addr_t mapping = rx_buf->mapping; 3322 void *data = rx_buf->data; 3323 3324 if (!data) 3325 continue; 3326 3327 rx_buf->data = NULL; 3328 if (BNXT_RX_PAGE_MODE(bp)) { 3329 page_pool_recycle_direct(rxr->page_pool, data); 3330 } else { 3331 dma_unmap_single_attrs(&pdev->dev, mapping, 3332 bp->rx_buf_use_size, bp->rx_dir, 3333 DMA_ATTR_WEAK_ORDERING); 3334 skb_free_frag(data); 3335 } 3336 } 3337 } 3338 3339 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3340 { 3341 int i, max_idx; 3342 3343 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3344 3345 for (i = 0; i < max_idx; i++) { 3346 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3347 struct page *page = rx_agg_buf->page; 3348 3349 if (!page) 3350 continue; 3351 3352 rx_agg_buf->page = NULL; 3353 __clear_bit(i, rxr->rx_agg_bmap); 3354 3355 page_pool_recycle_direct(rxr->page_pool, page); 3356 } 3357 } 3358 3359 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) 3360 { 3361 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3362 struct pci_dev *pdev = bp->pdev; 3363 struct bnxt_tpa_idx_map *map; 3364 int i; 3365 3366 if (!rxr->rx_tpa) 3367 goto skip_rx_tpa_free; 3368 3369 for (i = 0; i < bp->max_tpa; i++) { 3370 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3371 u8 *data = tpa_info->data; 3372 3373 if (!data) 3374 continue; 3375 3376 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, 3377 bp->rx_buf_use_size, bp->rx_dir, 3378 DMA_ATTR_WEAK_ORDERING); 3379 3380 tpa_info->data = NULL; 3381 3382 skb_free_frag(data); 3383 } 3384 3385 skip_rx_tpa_free: 3386 if (!rxr->rx_buf_ring) 3387 goto skip_rx_buf_free; 3388 3389 bnxt_free_one_rx_ring(bp, rxr); 3390 3391 skip_rx_buf_free: 3392 if (!rxr->rx_agg_ring) 3393 goto skip_rx_agg_free; 3394 3395 bnxt_free_one_rx_agg_ring(bp, rxr); 3396 3397 skip_rx_agg_free: 3398 map = rxr->rx_tpa_idx_map; 3399 if (map) 3400 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3401 } 3402 3403 static void bnxt_free_rx_skbs(struct bnxt *bp) 3404 { 3405 int i; 3406 3407 if (!bp->rx_ring) 3408 return; 3409 3410 for (i = 0; i < bp->rx_nr_rings; i++) 3411 bnxt_free_one_rx_ring_skbs(bp, i); 3412 } 3413 3414 static void bnxt_free_skbs(struct bnxt *bp) 3415 { 3416 bnxt_free_tx_skbs(bp); 3417 bnxt_free_rx_skbs(bp); 3418 } 3419 3420 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3421 { 3422 u8 init_val = ctxm->init_value; 3423 u16 offset = ctxm->init_offset; 3424 u8 *p2 = p; 3425 int i; 3426 3427 if (!init_val) 3428 return; 3429 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3430 memset(p, init_val, len); 3431 return; 3432 } 3433 for (i = 0; i < len; i += ctxm->entry_size) 3434 *(p2 + i + offset) = init_val; 3435 } 3436 3437 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3438 { 3439 struct pci_dev *pdev = bp->pdev; 3440 int i; 3441 3442 if (!rmem->pg_arr) 3443 goto skip_pages; 3444 3445 for (i = 0; i < rmem->nr_pages; i++) { 3446 if (!rmem->pg_arr[i]) 3447 continue; 3448 3449 dma_free_coherent(&pdev->dev, rmem->page_size, 3450 rmem->pg_arr[i], rmem->dma_arr[i]); 3451 3452 rmem->pg_arr[i] = NULL; 3453 } 3454 skip_pages: 3455 if (rmem->pg_tbl) { 3456 size_t pg_tbl_size = rmem->nr_pages * 8; 3457 3458 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3459 pg_tbl_size = rmem->page_size; 3460 dma_free_coherent(&pdev->dev, pg_tbl_size, 3461 rmem->pg_tbl, rmem->pg_tbl_map); 3462 rmem->pg_tbl = NULL; 3463 } 3464 if (rmem->vmem_size && *rmem->vmem) { 3465 vfree(*rmem->vmem); 3466 *rmem->vmem = NULL; 3467 } 3468 } 3469 3470 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3471 { 3472 struct pci_dev *pdev = bp->pdev; 3473 u64 valid_bit = 0; 3474 int i; 3475 3476 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3477 valid_bit = PTU_PTE_VALID; 3478 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3479 size_t pg_tbl_size = rmem->nr_pages * 8; 3480 3481 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3482 pg_tbl_size = rmem->page_size; 3483 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3484 &rmem->pg_tbl_map, 3485 GFP_KERNEL); 3486 if (!rmem->pg_tbl) 3487 return -ENOMEM; 3488 } 3489 3490 for (i = 0; i < rmem->nr_pages; i++) { 3491 u64 extra_bits = valid_bit; 3492 3493 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3494 rmem->page_size, 3495 &rmem->dma_arr[i], 3496 GFP_KERNEL); 3497 if (!rmem->pg_arr[i]) 3498 return -ENOMEM; 3499 3500 if (rmem->ctx_mem) 3501 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3502 rmem->page_size); 3503 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3504 if (i == rmem->nr_pages - 2 && 3505 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3506 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3507 else if (i == rmem->nr_pages - 1 && 3508 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3509 extra_bits |= PTU_PTE_LAST; 3510 rmem->pg_tbl[i] = 3511 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3512 } 3513 } 3514 3515 if (rmem->vmem_size) { 3516 *rmem->vmem = vzalloc(rmem->vmem_size); 3517 if (!(*rmem->vmem)) 3518 return -ENOMEM; 3519 } 3520 return 0; 3521 } 3522 3523 static void bnxt_free_tpa_info(struct bnxt *bp) 3524 { 3525 int i, j; 3526 3527 for (i = 0; i < bp->rx_nr_rings; i++) { 3528 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3529 3530 kfree(rxr->rx_tpa_idx_map); 3531 rxr->rx_tpa_idx_map = NULL; 3532 if (rxr->rx_tpa) { 3533 for (j = 0; j < bp->max_tpa; j++) { 3534 kfree(rxr->rx_tpa[j].agg_arr); 3535 rxr->rx_tpa[j].agg_arr = NULL; 3536 } 3537 } 3538 kfree(rxr->rx_tpa); 3539 rxr->rx_tpa = NULL; 3540 } 3541 } 3542 3543 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3544 { 3545 int i, j; 3546 3547 bp->max_tpa = MAX_TPA; 3548 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3549 if (!bp->max_tpa_v2) 3550 return 0; 3551 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3552 } 3553 3554 for (i = 0; i < bp->rx_nr_rings; i++) { 3555 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3556 struct rx_agg_cmp *agg; 3557 3558 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3559 GFP_KERNEL); 3560 if (!rxr->rx_tpa) 3561 return -ENOMEM; 3562 3563 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3564 continue; 3565 for (j = 0; j < bp->max_tpa; j++) { 3566 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3567 if (!agg) 3568 return -ENOMEM; 3569 rxr->rx_tpa[j].agg_arr = agg; 3570 } 3571 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3572 GFP_KERNEL); 3573 if (!rxr->rx_tpa_idx_map) 3574 return -ENOMEM; 3575 } 3576 return 0; 3577 } 3578 3579 static void bnxt_free_rx_rings(struct bnxt *bp) 3580 { 3581 int i; 3582 3583 if (!bp->rx_ring) 3584 return; 3585 3586 bnxt_free_tpa_info(bp); 3587 for (i = 0; i < bp->rx_nr_rings; i++) { 3588 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3589 struct bnxt_ring_struct *ring; 3590 3591 if (rxr->xdp_prog) 3592 bpf_prog_put(rxr->xdp_prog); 3593 3594 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3595 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3596 3597 page_pool_destroy(rxr->page_pool); 3598 rxr->page_pool = NULL; 3599 3600 kfree(rxr->rx_agg_bmap); 3601 rxr->rx_agg_bmap = NULL; 3602 3603 ring = &rxr->rx_ring_struct; 3604 bnxt_free_ring(bp, &ring->ring_mem); 3605 3606 ring = &rxr->rx_agg_ring_struct; 3607 bnxt_free_ring(bp, &ring->ring_mem); 3608 } 3609 } 3610 3611 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3612 struct bnxt_rx_ring_info *rxr, 3613 int numa_node) 3614 { 3615 struct page_pool_params pp = { 0 }; 3616 3617 pp.pool_size = bp->rx_agg_ring_size; 3618 if (BNXT_RX_PAGE_MODE(bp)) 3619 pp.pool_size += bp->rx_ring_size; 3620 pp.nid = numa_node; 3621 pp.napi = &rxr->bnapi->napi; 3622 pp.netdev = bp->dev; 3623 pp.dev = &bp->pdev->dev; 3624 pp.dma_dir = bp->rx_dir; 3625 pp.max_len = PAGE_SIZE; 3626 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3627 3628 rxr->page_pool = page_pool_create(&pp); 3629 if (IS_ERR(rxr->page_pool)) { 3630 int err = PTR_ERR(rxr->page_pool); 3631 3632 rxr->page_pool = NULL; 3633 return err; 3634 } 3635 return 0; 3636 } 3637 3638 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3639 { 3640 int numa_node = dev_to_node(&bp->pdev->dev); 3641 int i, rc = 0, agg_rings = 0, cpu; 3642 3643 if (!bp->rx_ring) 3644 return -ENOMEM; 3645 3646 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3647 agg_rings = 1; 3648 3649 for (i = 0; i < bp->rx_nr_rings; i++) { 3650 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3651 struct bnxt_ring_struct *ring; 3652 int cpu_node; 3653 3654 ring = &rxr->rx_ring_struct; 3655 3656 cpu = cpumask_local_spread(i, numa_node); 3657 cpu_node = cpu_to_node(cpu); 3658 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", 3659 i, cpu_node); 3660 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); 3661 if (rc) 3662 return rc; 3663 3664 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3665 if (rc < 0) 3666 return rc; 3667 3668 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3669 MEM_TYPE_PAGE_POOL, 3670 rxr->page_pool); 3671 if (rc) { 3672 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3673 return rc; 3674 } 3675 3676 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3677 if (rc) 3678 return rc; 3679 3680 ring->grp_idx = i; 3681 if (agg_rings) { 3682 u16 mem_size; 3683 3684 ring = &rxr->rx_agg_ring_struct; 3685 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3686 if (rc) 3687 return rc; 3688 3689 ring->grp_idx = i; 3690 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3691 mem_size = rxr->rx_agg_bmap_size / 8; 3692 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3693 if (!rxr->rx_agg_bmap) 3694 return -ENOMEM; 3695 } 3696 } 3697 if (bp->flags & BNXT_FLAG_TPA) 3698 rc = bnxt_alloc_tpa_info(bp); 3699 return rc; 3700 } 3701 3702 static void bnxt_free_tx_rings(struct bnxt *bp) 3703 { 3704 int i; 3705 struct pci_dev *pdev = bp->pdev; 3706 3707 if (!bp->tx_ring) 3708 return; 3709 3710 for (i = 0; i < bp->tx_nr_rings; i++) { 3711 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3712 struct bnxt_ring_struct *ring; 3713 3714 if (txr->tx_push) { 3715 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3716 txr->tx_push, txr->tx_push_mapping); 3717 txr->tx_push = NULL; 3718 } 3719 3720 ring = &txr->tx_ring_struct; 3721 3722 bnxt_free_ring(bp, &ring->ring_mem); 3723 } 3724 } 3725 3726 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3727 ((tc) * (bp)->tx_nr_rings_per_tc) 3728 3729 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3730 ((tx) % (bp)->tx_nr_rings_per_tc) 3731 3732 #define BNXT_RING_TO_TC(bp, tx) \ 3733 ((tx) / (bp)->tx_nr_rings_per_tc) 3734 3735 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3736 { 3737 int i, j, rc; 3738 struct pci_dev *pdev = bp->pdev; 3739 3740 bp->tx_push_size = 0; 3741 if (bp->tx_push_thresh) { 3742 int push_size; 3743 3744 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3745 bp->tx_push_thresh); 3746 3747 if (push_size > 256) { 3748 push_size = 0; 3749 bp->tx_push_thresh = 0; 3750 } 3751 3752 bp->tx_push_size = push_size; 3753 } 3754 3755 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3756 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3757 struct bnxt_ring_struct *ring; 3758 u8 qidx; 3759 3760 ring = &txr->tx_ring_struct; 3761 3762 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3763 if (rc) 3764 return rc; 3765 3766 ring->grp_idx = txr->bnapi->index; 3767 if (bp->tx_push_size) { 3768 dma_addr_t mapping; 3769 3770 /* One pre-allocated DMA buffer to backup 3771 * TX push operation 3772 */ 3773 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3774 bp->tx_push_size, 3775 &txr->tx_push_mapping, 3776 GFP_KERNEL); 3777 3778 if (!txr->tx_push) 3779 return -ENOMEM; 3780 3781 mapping = txr->tx_push_mapping + 3782 sizeof(struct tx_push_bd); 3783 txr->data_mapping = cpu_to_le64(mapping); 3784 } 3785 qidx = bp->tc_to_qidx[j]; 3786 ring->queue_id = bp->q_info[qidx].queue_id; 3787 spin_lock_init(&txr->xdp_tx_lock); 3788 if (i < bp->tx_nr_rings_xdp) 3789 continue; 3790 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 3791 j++; 3792 } 3793 return 0; 3794 } 3795 3796 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3797 { 3798 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3799 3800 kfree(cpr->cp_desc_ring); 3801 cpr->cp_desc_ring = NULL; 3802 ring->ring_mem.pg_arr = NULL; 3803 kfree(cpr->cp_desc_mapping); 3804 cpr->cp_desc_mapping = NULL; 3805 ring->ring_mem.dma_arr = NULL; 3806 } 3807 3808 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 3809 { 3810 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 3811 if (!cpr->cp_desc_ring) 3812 return -ENOMEM; 3813 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 3814 GFP_KERNEL); 3815 if (!cpr->cp_desc_mapping) 3816 return -ENOMEM; 3817 return 0; 3818 } 3819 3820 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 3821 { 3822 int i; 3823 3824 if (!bp->bnapi) 3825 return; 3826 for (i = 0; i < bp->cp_nr_rings; i++) { 3827 struct bnxt_napi *bnapi = bp->bnapi[i]; 3828 3829 if (!bnapi) 3830 continue; 3831 bnxt_free_cp_arrays(&bnapi->cp_ring); 3832 } 3833 } 3834 3835 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 3836 { 3837 int i, n = bp->cp_nr_pages; 3838 3839 for (i = 0; i < bp->cp_nr_rings; i++) { 3840 struct bnxt_napi *bnapi = bp->bnapi[i]; 3841 int rc; 3842 3843 if (!bnapi) 3844 continue; 3845 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 3846 if (rc) 3847 return rc; 3848 } 3849 return 0; 3850 } 3851 3852 static void bnxt_free_cp_rings(struct bnxt *bp) 3853 { 3854 int i; 3855 3856 if (!bp->bnapi) 3857 return; 3858 3859 for (i = 0; i < bp->cp_nr_rings; i++) { 3860 struct bnxt_napi *bnapi = bp->bnapi[i]; 3861 struct bnxt_cp_ring_info *cpr; 3862 struct bnxt_ring_struct *ring; 3863 int j; 3864 3865 if (!bnapi) 3866 continue; 3867 3868 cpr = &bnapi->cp_ring; 3869 ring = &cpr->cp_ring_struct; 3870 3871 bnxt_free_ring(bp, &ring->ring_mem); 3872 3873 if (!cpr->cp_ring_arr) 3874 continue; 3875 3876 for (j = 0; j < cpr->cp_ring_count; j++) { 3877 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 3878 3879 ring = &cpr2->cp_ring_struct; 3880 bnxt_free_ring(bp, &ring->ring_mem); 3881 bnxt_free_cp_arrays(cpr2); 3882 } 3883 kfree(cpr->cp_ring_arr); 3884 cpr->cp_ring_arr = NULL; 3885 cpr->cp_ring_count = 0; 3886 } 3887 } 3888 3889 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 3890 struct bnxt_cp_ring_info *cpr) 3891 { 3892 struct bnxt_ring_mem_info *rmem; 3893 struct bnxt_ring_struct *ring; 3894 int rc; 3895 3896 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 3897 if (rc) { 3898 bnxt_free_cp_arrays(cpr); 3899 return -ENOMEM; 3900 } 3901 ring = &cpr->cp_ring_struct; 3902 rmem = &ring->ring_mem; 3903 rmem->nr_pages = bp->cp_nr_pages; 3904 rmem->page_size = HW_CMPD_RING_SIZE; 3905 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3906 rmem->dma_arr = cpr->cp_desc_mapping; 3907 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3908 rc = bnxt_alloc_ring(bp, rmem); 3909 if (rc) { 3910 bnxt_free_ring(bp, rmem); 3911 bnxt_free_cp_arrays(cpr); 3912 } 3913 return rc; 3914 } 3915 3916 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3917 { 3918 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3919 int i, j, rc, ulp_msix; 3920 int tcs = bp->num_tc; 3921 3922 if (!tcs) 3923 tcs = 1; 3924 ulp_msix = bnxt_get_ulp_msix_num(bp); 3925 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 3926 struct bnxt_napi *bnapi = bp->bnapi[i]; 3927 struct bnxt_cp_ring_info *cpr, *cpr2; 3928 struct bnxt_ring_struct *ring; 3929 int cp_count = 0, k; 3930 int rx = 0, tx = 0; 3931 3932 if (!bnapi) 3933 continue; 3934 3935 cpr = &bnapi->cp_ring; 3936 cpr->bnapi = bnapi; 3937 ring = &cpr->cp_ring_struct; 3938 3939 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3940 if (rc) 3941 return rc; 3942 3943 ring->map_idx = ulp_msix + i; 3944 3945 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3946 continue; 3947 3948 if (i < bp->rx_nr_rings) { 3949 cp_count++; 3950 rx = 1; 3951 } 3952 if (i < bp->tx_nr_rings_xdp) { 3953 cp_count++; 3954 tx = 1; 3955 } else if ((sh && i < bp->tx_nr_rings) || 3956 (!sh && i >= bp->rx_nr_rings)) { 3957 cp_count += tcs; 3958 tx = 1; 3959 } 3960 3961 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 3962 GFP_KERNEL); 3963 if (!cpr->cp_ring_arr) 3964 return -ENOMEM; 3965 cpr->cp_ring_count = cp_count; 3966 3967 for (k = 0; k < cp_count; k++) { 3968 cpr2 = &cpr->cp_ring_arr[k]; 3969 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 3970 if (rc) 3971 return rc; 3972 cpr2->bnapi = bnapi; 3973 cpr2->sw_stats = cpr->sw_stats; 3974 cpr2->cp_idx = k; 3975 if (!k && rx) { 3976 bp->rx_ring[i].rx_cpr = cpr2; 3977 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 3978 } else { 3979 int n, tc = k - rx; 3980 3981 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 3982 bp->tx_ring[n].tx_cpr = cpr2; 3983 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 3984 } 3985 } 3986 if (tx) 3987 j++; 3988 } 3989 return 0; 3990 } 3991 3992 static void bnxt_init_rx_ring_struct(struct bnxt *bp, 3993 struct bnxt_rx_ring_info *rxr) 3994 { 3995 struct bnxt_ring_mem_info *rmem; 3996 struct bnxt_ring_struct *ring; 3997 3998 ring = &rxr->rx_ring_struct; 3999 rmem = &ring->ring_mem; 4000 rmem->nr_pages = bp->rx_nr_pages; 4001 rmem->page_size = HW_RXBD_RING_SIZE; 4002 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4003 rmem->dma_arr = rxr->rx_desc_mapping; 4004 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4005 rmem->vmem = (void **)&rxr->rx_buf_ring; 4006 4007 ring = &rxr->rx_agg_ring_struct; 4008 rmem = &ring->ring_mem; 4009 rmem->nr_pages = bp->rx_agg_nr_pages; 4010 rmem->page_size = HW_RXBD_RING_SIZE; 4011 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4012 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4013 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4014 rmem->vmem = (void **)&rxr->rx_agg_ring; 4015 } 4016 4017 static void bnxt_reset_rx_ring_struct(struct bnxt *bp, 4018 struct bnxt_rx_ring_info *rxr) 4019 { 4020 struct bnxt_ring_mem_info *rmem; 4021 struct bnxt_ring_struct *ring; 4022 int i; 4023 4024 rxr->page_pool->p.napi = NULL; 4025 rxr->page_pool = NULL; 4026 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info)); 4027 4028 ring = &rxr->rx_ring_struct; 4029 rmem = &ring->ring_mem; 4030 rmem->pg_tbl = NULL; 4031 rmem->pg_tbl_map = 0; 4032 for (i = 0; i < rmem->nr_pages; i++) { 4033 rmem->pg_arr[i] = NULL; 4034 rmem->dma_arr[i] = 0; 4035 } 4036 *rmem->vmem = NULL; 4037 4038 ring = &rxr->rx_agg_ring_struct; 4039 rmem = &ring->ring_mem; 4040 rmem->pg_tbl = NULL; 4041 rmem->pg_tbl_map = 0; 4042 for (i = 0; i < rmem->nr_pages; i++) { 4043 rmem->pg_arr[i] = NULL; 4044 rmem->dma_arr[i] = 0; 4045 } 4046 *rmem->vmem = NULL; 4047 } 4048 4049 static void bnxt_init_ring_struct(struct bnxt *bp) 4050 { 4051 int i, j; 4052 4053 for (i = 0; i < bp->cp_nr_rings; i++) { 4054 struct bnxt_napi *bnapi = bp->bnapi[i]; 4055 struct bnxt_ring_mem_info *rmem; 4056 struct bnxt_cp_ring_info *cpr; 4057 struct bnxt_rx_ring_info *rxr; 4058 struct bnxt_tx_ring_info *txr; 4059 struct bnxt_ring_struct *ring; 4060 4061 if (!bnapi) 4062 continue; 4063 4064 cpr = &bnapi->cp_ring; 4065 ring = &cpr->cp_ring_struct; 4066 rmem = &ring->ring_mem; 4067 rmem->nr_pages = bp->cp_nr_pages; 4068 rmem->page_size = HW_CMPD_RING_SIZE; 4069 rmem->pg_arr = (void **)cpr->cp_desc_ring; 4070 rmem->dma_arr = cpr->cp_desc_mapping; 4071 rmem->vmem_size = 0; 4072 4073 rxr = bnapi->rx_ring; 4074 if (!rxr) 4075 goto skip_rx; 4076 4077 ring = &rxr->rx_ring_struct; 4078 rmem = &ring->ring_mem; 4079 rmem->nr_pages = bp->rx_nr_pages; 4080 rmem->page_size = HW_RXBD_RING_SIZE; 4081 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4082 rmem->dma_arr = rxr->rx_desc_mapping; 4083 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4084 rmem->vmem = (void **)&rxr->rx_buf_ring; 4085 4086 ring = &rxr->rx_agg_ring_struct; 4087 rmem = &ring->ring_mem; 4088 rmem->nr_pages = bp->rx_agg_nr_pages; 4089 rmem->page_size = HW_RXBD_RING_SIZE; 4090 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4091 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4092 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4093 rmem->vmem = (void **)&rxr->rx_agg_ring; 4094 4095 skip_rx: 4096 bnxt_for_each_napi_tx(j, bnapi, txr) { 4097 ring = &txr->tx_ring_struct; 4098 rmem = &ring->ring_mem; 4099 rmem->nr_pages = bp->tx_nr_pages; 4100 rmem->page_size = HW_TXBD_RING_SIZE; 4101 rmem->pg_arr = (void **)txr->tx_desc_ring; 4102 rmem->dma_arr = txr->tx_desc_mapping; 4103 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 4104 rmem->vmem = (void **)&txr->tx_buf_ring; 4105 } 4106 } 4107 } 4108 4109 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 4110 { 4111 int i; 4112 u32 prod; 4113 struct rx_bd **rx_buf_ring; 4114 4115 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 4116 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 4117 int j; 4118 struct rx_bd *rxbd; 4119 4120 rxbd = rx_buf_ring[i]; 4121 if (!rxbd) 4122 continue; 4123 4124 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 4125 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 4126 rxbd->rx_bd_opaque = prod; 4127 } 4128 } 4129 } 4130 4131 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, 4132 struct bnxt_rx_ring_info *rxr, 4133 int ring_nr) 4134 { 4135 u32 prod; 4136 int i; 4137 4138 prod = rxr->rx_prod; 4139 for (i = 0; i < bp->rx_ring_size; i++) { 4140 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 4141 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", 4142 ring_nr, i, bp->rx_ring_size); 4143 break; 4144 } 4145 prod = NEXT_RX(prod); 4146 } 4147 rxr->rx_prod = prod; 4148 } 4149 4150 static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, 4151 struct bnxt_rx_ring_info *rxr, 4152 int ring_nr) 4153 { 4154 u32 prod; 4155 int i; 4156 4157 prod = rxr->rx_agg_prod; 4158 for (i = 0; i < bp->rx_agg_ring_size; i++) { 4159 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 4160 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", 4161 ring_nr, i, bp->rx_ring_size); 4162 break; 4163 } 4164 prod = NEXT_RX_AGG(prod); 4165 } 4166 rxr->rx_agg_prod = prod; 4167 } 4168 4169 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 4170 { 4171 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 4172 int i; 4173 4174 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); 4175 4176 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 4177 return 0; 4178 4179 bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); 4180 4181 if (rxr->rx_tpa) { 4182 dma_addr_t mapping; 4183 u8 *data; 4184 4185 for (i = 0; i < bp->max_tpa; i++) { 4186 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); 4187 if (!data) 4188 return -ENOMEM; 4189 4190 rxr->rx_tpa[i].data = data; 4191 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 4192 rxr->rx_tpa[i].mapping = mapping; 4193 } 4194 } 4195 return 0; 4196 } 4197 4198 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, 4199 struct bnxt_rx_ring_info *rxr) 4200 { 4201 struct bnxt_ring_struct *ring; 4202 u32 type; 4203 4204 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 4205 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 4206 4207 if (NET_IP_ALIGN == 2) 4208 type |= RX_BD_FLAGS_SOP; 4209 4210 ring = &rxr->rx_ring_struct; 4211 bnxt_init_rxbd_pages(ring, type); 4212 ring->fw_ring_id = INVALID_HW_RING_ID; 4213 } 4214 4215 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, 4216 struct bnxt_rx_ring_info *rxr) 4217 { 4218 struct bnxt_ring_struct *ring; 4219 u32 type; 4220 4221 ring = &rxr->rx_agg_ring_struct; 4222 ring->fw_ring_id = INVALID_HW_RING_ID; 4223 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 4224 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 4225 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 4226 4227 bnxt_init_rxbd_pages(ring, type); 4228 } 4229 } 4230 4231 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 4232 { 4233 struct bnxt_rx_ring_info *rxr; 4234 4235 rxr = &bp->rx_ring[ring_nr]; 4236 bnxt_init_one_rx_ring_rxbd(bp, rxr); 4237 4238 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, 4239 &rxr->bnapi->napi); 4240 4241 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 4242 bpf_prog_add(bp->xdp_prog, 1); 4243 rxr->xdp_prog = bp->xdp_prog; 4244 } 4245 4246 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); 4247 4248 return bnxt_alloc_one_rx_ring(bp, ring_nr); 4249 } 4250 4251 static void bnxt_init_cp_rings(struct bnxt *bp) 4252 { 4253 int i, j; 4254 4255 for (i = 0; i < bp->cp_nr_rings; i++) { 4256 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 4257 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4258 4259 ring->fw_ring_id = INVALID_HW_RING_ID; 4260 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4261 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4262 if (!cpr->cp_ring_arr) 4263 continue; 4264 for (j = 0; j < cpr->cp_ring_count; j++) { 4265 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4266 4267 ring = &cpr2->cp_ring_struct; 4268 ring->fw_ring_id = INVALID_HW_RING_ID; 4269 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4270 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4271 } 4272 } 4273 } 4274 4275 static int bnxt_init_rx_rings(struct bnxt *bp) 4276 { 4277 int i, rc = 0; 4278 4279 if (BNXT_RX_PAGE_MODE(bp)) { 4280 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 4281 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 4282 } else { 4283 bp->rx_offset = BNXT_RX_OFFSET; 4284 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 4285 } 4286 4287 for (i = 0; i < bp->rx_nr_rings; i++) { 4288 rc = bnxt_init_one_rx_ring(bp, i); 4289 if (rc) 4290 break; 4291 } 4292 4293 return rc; 4294 } 4295 4296 static int bnxt_init_tx_rings(struct bnxt *bp) 4297 { 4298 u16 i; 4299 4300 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 4301 BNXT_MIN_TX_DESC_CNT); 4302 4303 for (i = 0; i < bp->tx_nr_rings; i++) { 4304 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4305 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4306 4307 ring->fw_ring_id = INVALID_HW_RING_ID; 4308 4309 if (i >= bp->tx_nr_rings_xdp) 4310 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, 4311 NETDEV_QUEUE_TYPE_TX, 4312 &txr->bnapi->napi); 4313 } 4314 4315 return 0; 4316 } 4317 4318 static void bnxt_free_ring_grps(struct bnxt *bp) 4319 { 4320 kfree(bp->grp_info); 4321 bp->grp_info = NULL; 4322 } 4323 4324 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 4325 { 4326 int i; 4327 4328 if (irq_re_init) { 4329 bp->grp_info = kcalloc(bp->cp_nr_rings, 4330 sizeof(struct bnxt_ring_grp_info), 4331 GFP_KERNEL); 4332 if (!bp->grp_info) 4333 return -ENOMEM; 4334 } 4335 for (i = 0; i < bp->cp_nr_rings; i++) { 4336 if (irq_re_init) 4337 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 4338 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4339 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 4340 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 4341 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4342 } 4343 return 0; 4344 } 4345 4346 static void bnxt_free_vnics(struct bnxt *bp) 4347 { 4348 kfree(bp->vnic_info); 4349 bp->vnic_info = NULL; 4350 bp->nr_vnics = 0; 4351 } 4352 4353 static int bnxt_alloc_vnics(struct bnxt *bp) 4354 { 4355 int num_vnics = 1; 4356 4357 #ifdef CONFIG_RFS_ACCEL 4358 if (bp->flags & BNXT_FLAG_RFS) { 4359 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 4360 num_vnics++; 4361 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4362 num_vnics += bp->rx_nr_rings; 4363 } 4364 #endif 4365 4366 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4367 num_vnics++; 4368 4369 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 4370 GFP_KERNEL); 4371 if (!bp->vnic_info) 4372 return -ENOMEM; 4373 4374 bp->nr_vnics = num_vnics; 4375 return 0; 4376 } 4377 4378 static void bnxt_init_vnics(struct bnxt *bp) 4379 { 4380 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 4381 int i; 4382 4383 for (i = 0; i < bp->nr_vnics; i++) { 4384 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4385 int j; 4386 4387 vnic->fw_vnic_id = INVALID_HW_RING_ID; 4388 vnic->vnic_id = i; 4389 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 4390 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 4391 4392 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4393 4394 if (bp->vnic_info[i].rss_hash_key) { 4395 if (i == BNXT_VNIC_DEFAULT) { 4396 u8 *key = (void *)vnic->rss_hash_key; 4397 int k; 4398 4399 if (!bp->rss_hash_key_valid && 4400 !bp->rss_hash_key_updated) { 4401 get_random_bytes(bp->rss_hash_key, 4402 HW_HASH_KEY_SIZE); 4403 bp->rss_hash_key_updated = true; 4404 } 4405 4406 memcpy(vnic->rss_hash_key, bp->rss_hash_key, 4407 HW_HASH_KEY_SIZE); 4408 4409 if (!bp->rss_hash_key_updated) 4410 continue; 4411 4412 bp->rss_hash_key_updated = false; 4413 bp->rss_hash_key_valid = true; 4414 4415 bp->toeplitz_prefix = 0; 4416 for (k = 0; k < 8; k++) { 4417 bp->toeplitz_prefix <<= 8; 4418 bp->toeplitz_prefix |= key[k]; 4419 } 4420 } else { 4421 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, 4422 HW_HASH_KEY_SIZE); 4423 } 4424 } 4425 } 4426 } 4427 4428 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4429 { 4430 int pages; 4431 4432 pages = ring_size / desc_per_pg; 4433 4434 if (!pages) 4435 return 1; 4436 4437 pages++; 4438 4439 while (pages & (pages - 1)) 4440 pages++; 4441 4442 return pages; 4443 } 4444 4445 void bnxt_set_tpa_flags(struct bnxt *bp) 4446 { 4447 bp->flags &= ~BNXT_FLAG_TPA; 4448 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4449 return; 4450 if (bp->dev->features & NETIF_F_LRO) 4451 bp->flags |= BNXT_FLAG_LRO; 4452 else if (bp->dev->features & NETIF_F_GRO_HW) 4453 bp->flags |= BNXT_FLAG_GRO; 4454 } 4455 4456 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4457 * be set on entry. 4458 */ 4459 void bnxt_set_ring_params(struct bnxt *bp) 4460 { 4461 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4462 u32 agg_factor = 0, agg_ring_size = 0; 4463 4464 /* 8 for CRC and VLAN */ 4465 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4466 4467 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4468 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4469 4470 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 4471 ring_size = bp->rx_ring_size; 4472 bp->rx_agg_ring_size = 0; 4473 bp->rx_agg_nr_pages = 0; 4474 4475 if (bp->flags & BNXT_FLAG_TPA) 4476 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4477 4478 bp->flags &= ~BNXT_FLAG_JUMBO; 4479 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4480 u32 jumbo_factor; 4481 4482 bp->flags |= BNXT_FLAG_JUMBO; 4483 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4484 if (jumbo_factor > agg_factor) 4485 agg_factor = jumbo_factor; 4486 } 4487 if (agg_factor) { 4488 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4489 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4490 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4491 bp->rx_ring_size, ring_size); 4492 bp->rx_ring_size = ring_size; 4493 } 4494 agg_ring_size = ring_size * agg_factor; 4495 4496 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4497 RX_DESC_CNT); 4498 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4499 u32 tmp = agg_ring_size; 4500 4501 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4502 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4503 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4504 tmp, agg_ring_size); 4505 } 4506 bp->rx_agg_ring_size = agg_ring_size; 4507 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4508 4509 if (BNXT_RX_PAGE_MODE(bp)) { 4510 rx_space = PAGE_SIZE; 4511 rx_size = PAGE_SIZE - 4512 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4513 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4514 } else { 4515 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 4516 rx_space = rx_size + NET_SKB_PAD + 4517 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4518 } 4519 } 4520 4521 bp->rx_buf_use_size = rx_size; 4522 bp->rx_buf_size = rx_space; 4523 4524 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4525 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4526 4527 ring_size = bp->tx_ring_size; 4528 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4529 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4530 4531 max_rx_cmpl = bp->rx_ring_size; 4532 /* MAX TPA needs to be added because TPA_START completions are 4533 * immediately recycled, so the TPA completions are not bound by 4534 * the RX ring size. 4535 */ 4536 if (bp->flags & BNXT_FLAG_TPA) 4537 max_rx_cmpl += bp->max_tpa; 4538 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4539 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4540 bp->cp_ring_size = ring_size; 4541 4542 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4543 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4544 bp->cp_nr_pages = MAX_CP_PAGES; 4545 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4546 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4547 ring_size, bp->cp_ring_size); 4548 } 4549 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4550 bp->cp_ring_mask = bp->cp_bit - 1; 4551 } 4552 4553 /* Changing allocation mode of RX rings. 4554 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4555 */ 4556 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4557 { 4558 struct net_device *dev = bp->dev; 4559 4560 if (page_mode) { 4561 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 4562 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4563 4564 if (bp->xdp_prog->aux->xdp_has_frags) 4565 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4566 else 4567 dev->max_mtu = 4568 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4569 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4570 bp->flags |= BNXT_FLAG_JUMBO; 4571 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4572 } else { 4573 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4574 bp->rx_skb_func = bnxt_rx_page_skb; 4575 } 4576 bp->rx_dir = DMA_BIDIRECTIONAL; 4577 /* Disable LRO or GRO_HW */ 4578 netdev_update_features(dev); 4579 } else { 4580 dev->max_mtu = bp->max_mtu; 4581 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4582 bp->rx_dir = DMA_FROM_DEVICE; 4583 bp->rx_skb_func = bnxt_rx_skb; 4584 } 4585 return 0; 4586 } 4587 4588 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4589 { 4590 int i; 4591 struct bnxt_vnic_info *vnic; 4592 struct pci_dev *pdev = bp->pdev; 4593 4594 if (!bp->vnic_info) 4595 return; 4596 4597 for (i = 0; i < bp->nr_vnics; i++) { 4598 vnic = &bp->vnic_info[i]; 4599 4600 kfree(vnic->fw_grp_ids); 4601 vnic->fw_grp_ids = NULL; 4602 4603 kfree(vnic->uc_list); 4604 vnic->uc_list = NULL; 4605 4606 if (vnic->mc_list) { 4607 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4608 vnic->mc_list, vnic->mc_list_mapping); 4609 vnic->mc_list = NULL; 4610 } 4611 4612 if (vnic->rss_table) { 4613 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4614 vnic->rss_table, 4615 vnic->rss_table_dma_addr); 4616 vnic->rss_table = NULL; 4617 } 4618 4619 vnic->rss_hash_key = NULL; 4620 vnic->flags = 0; 4621 } 4622 } 4623 4624 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4625 { 4626 int i, rc = 0, size; 4627 struct bnxt_vnic_info *vnic; 4628 struct pci_dev *pdev = bp->pdev; 4629 int max_rings; 4630 4631 for (i = 0; i < bp->nr_vnics; i++) { 4632 vnic = &bp->vnic_info[i]; 4633 4634 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4635 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4636 4637 if (mem_size > 0) { 4638 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4639 if (!vnic->uc_list) { 4640 rc = -ENOMEM; 4641 goto out; 4642 } 4643 } 4644 } 4645 4646 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4647 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4648 vnic->mc_list = 4649 dma_alloc_coherent(&pdev->dev, 4650 vnic->mc_list_size, 4651 &vnic->mc_list_mapping, 4652 GFP_KERNEL); 4653 if (!vnic->mc_list) { 4654 rc = -ENOMEM; 4655 goto out; 4656 } 4657 } 4658 4659 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4660 goto vnic_skip_grps; 4661 4662 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4663 max_rings = bp->rx_nr_rings; 4664 else 4665 max_rings = 1; 4666 4667 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4668 if (!vnic->fw_grp_ids) { 4669 rc = -ENOMEM; 4670 goto out; 4671 } 4672 vnic_skip_grps: 4673 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 4674 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4675 continue; 4676 4677 /* Allocate rss table and hash key */ 4678 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4679 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4680 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4681 4682 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4683 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4684 vnic->rss_table_size, 4685 &vnic->rss_table_dma_addr, 4686 GFP_KERNEL); 4687 if (!vnic->rss_table) { 4688 rc = -ENOMEM; 4689 goto out; 4690 } 4691 4692 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4693 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4694 } 4695 return 0; 4696 4697 out: 4698 return rc; 4699 } 4700 4701 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4702 { 4703 struct bnxt_hwrm_wait_token *token; 4704 4705 dma_pool_destroy(bp->hwrm_dma_pool); 4706 bp->hwrm_dma_pool = NULL; 4707 4708 rcu_read_lock(); 4709 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4710 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4711 rcu_read_unlock(); 4712 } 4713 4714 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4715 { 4716 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4717 BNXT_HWRM_DMA_SIZE, 4718 BNXT_HWRM_DMA_ALIGN, 0); 4719 if (!bp->hwrm_dma_pool) 4720 return -ENOMEM; 4721 4722 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4723 4724 return 0; 4725 } 4726 4727 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 4728 { 4729 kfree(stats->hw_masks); 4730 stats->hw_masks = NULL; 4731 kfree(stats->sw_stats); 4732 stats->sw_stats = NULL; 4733 if (stats->hw_stats) { 4734 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 4735 stats->hw_stats_map); 4736 stats->hw_stats = NULL; 4737 } 4738 } 4739 4740 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 4741 bool alloc_masks) 4742 { 4743 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 4744 &stats->hw_stats_map, GFP_KERNEL); 4745 if (!stats->hw_stats) 4746 return -ENOMEM; 4747 4748 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 4749 if (!stats->sw_stats) 4750 goto stats_mem_err; 4751 4752 if (alloc_masks) { 4753 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 4754 if (!stats->hw_masks) 4755 goto stats_mem_err; 4756 } 4757 return 0; 4758 4759 stats_mem_err: 4760 bnxt_free_stats_mem(bp, stats); 4761 return -ENOMEM; 4762 } 4763 4764 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 4765 { 4766 int i; 4767 4768 for (i = 0; i < count; i++) 4769 mask_arr[i] = mask; 4770 } 4771 4772 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 4773 { 4774 int i; 4775 4776 for (i = 0; i < count; i++) 4777 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 4778 } 4779 4780 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 4781 struct bnxt_stats_mem *stats) 4782 { 4783 struct hwrm_func_qstats_ext_output *resp; 4784 struct hwrm_func_qstats_ext_input *req; 4785 __le64 *hw_masks; 4786 int rc; 4787 4788 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 4789 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4790 return -EOPNOTSUPP; 4791 4792 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 4793 if (rc) 4794 return rc; 4795 4796 req->fid = cpu_to_le16(0xffff); 4797 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4798 4799 resp = hwrm_req_hold(bp, req); 4800 rc = hwrm_req_send(bp, req); 4801 if (!rc) { 4802 hw_masks = &resp->rx_ucast_pkts; 4803 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 4804 } 4805 hwrm_req_drop(bp, req); 4806 return rc; 4807 } 4808 4809 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 4810 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 4811 4812 static void bnxt_init_stats(struct bnxt *bp) 4813 { 4814 struct bnxt_napi *bnapi = bp->bnapi[0]; 4815 struct bnxt_cp_ring_info *cpr; 4816 struct bnxt_stats_mem *stats; 4817 __le64 *rx_stats, *tx_stats; 4818 int rc, rx_count, tx_count; 4819 u64 *rx_masks, *tx_masks; 4820 u64 mask; 4821 u8 flags; 4822 4823 cpr = &bnapi->cp_ring; 4824 stats = &cpr->stats; 4825 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 4826 if (rc) { 4827 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4828 mask = (1ULL << 48) - 1; 4829 else 4830 mask = -1ULL; 4831 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 4832 } 4833 if (bp->flags & BNXT_FLAG_PORT_STATS) { 4834 stats = &bp->port_stats; 4835 rx_stats = stats->hw_stats; 4836 rx_masks = stats->hw_masks; 4837 rx_count = sizeof(struct rx_port_stats) / 8; 4838 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4839 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4840 tx_count = sizeof(struct tx_port_stats) / 8; 4841 4842 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 4843 rc = bnxt_hwrm_port_qstats(bp, flags); 4844 if (rc) { 4845 mask = (1ULL << 40) - 1; 4846 4847 bnxt_fill_masks(rx_masks, mask, rx_count); 4848 bnxt_fill_masks(tx_masks, mask, tx_count); 4849 } else { 4850 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4851 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 4852 bnxt_hwrm_port_qstats(bp, 0); 4853 } 4854 } 4855 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 4856 stats = &bp->rx_port_stats_ext; 4857 rx_stats = stats->hw_stats; 4858 rx_masks = stats->hw_masks; 4859 rx_count = sizeof(struct rx_port_stats_ext) / 8; 4860 stats = &bp->tx_port_stats_ext; 4861 tx_stats = stats->hw_stats; 4862 tx_masks = stats->hw_masks; 4863 tx_count = sizeof(struct tx_port_stats_ext) / 8; 4864 4865 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4866 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 4867 if (rc) { 4868 mask = (1ULL << 40) - 1; 4869 4870 bnxt_fill_masks(rx_masks, mask, rx_count); 4871 if (tx_stats) 4872 bnxt_fill_masks(tx_masks, mask, tx_count); 4873 } else { 4874 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4875 if (tx_stats) 4876 bnxt_copy_hw_masks(tx_masks, tx_stats, 4877 tx_count); 4878 bnxt_hwrm_port_qstats_ext(bp, 0); 4879 } 4880 } 4881 } 4882 4883 static void bnxt_free_port_stats(struct bnxt *bp) 4884 { 4885 bp->flags &= ~BNXT_FLAG_PORT_STATS; 4886 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 4887 4888 bnxt_free_stats_mem(bp, &bp->port_stats); 4889 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 4890 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 4891 } 4892 4893 static void bnxt_free_ring_stats(struct bnxt *bp) 4894 { 4895 int i; 4896 4897 if (!bp->bnapi) 4898 return; 4899 4900 for (i = 0; i < bp->cp_nr_rings; i++) { 4901 struct bnxt_napi *bnapi = bp->bnapi[i]; 4902 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4903 4904 bnxt_free_stats_mem(bp, &cpr->stats); 4905 4906 kfree(cpr->sw_stats); 4907 cpr->sw_stats = NULL; 4908 } 4909 } 4910 4911 static int bnxt_alloc_stats(struct bnxt *bp) 4912 { 4913 u32 size, i; 4914 int rc; 4915 4916 size = bp->hw_ring_stats_size; 4917 4918 for (i = 0; i < bp->cp_nr_rings; i++) { 4919 struct bnxt_napi *bnapi = bp->bnapi[i]; 4920 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4921 4922 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); 4923 if (!cpr->sw_stats) 4924 return -ENOMEM; 4925 4926 cpr->stats.len = size; 4927 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 4928 if (rc) 4929 return rc; 4930 4931 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4932 } 4933 4934 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 4935 return 0; 4936 4937 if (bp->port_stats.hw_stats) 4938 goto alloc_ext_stats; 4939 4940 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 4941 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 4942 if (rc) 4943 return rc; 4944 4945 bp->flags |= BNXT_FLAG_PORT_STATS; 4946 4947 alloc_ext_stats: 4948 /* Display extended statistics only if FW supports it */ 4949 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 4950 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 4951 return 0; 4952 4953 if (bp->rx_port_stats_ext.hw_stats) 4954 goto alloc_tx_ext_stats; 4955 4956 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 4957 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 4958 /* Extended stats are optional */ 4959 if (rc) 4960 return 0; 4961 4962 alloc_tx_ext_stats: 4963 if (bp->tx_port_stats_ext.hw_stats) 4964 return 0; 4965 4966 if (bp->hwrm_spec_code >= 0x10902 || 4967 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 4968 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 4969 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 4970 /* Extended stats are optional */ 4971 if (rc) 4972 return 0; 4973 } 4974 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 4975 return 0; 4976 } 4977 4978 static void bnxt_clear_ring_indices(struct bnxt *bp) 4979 { 4980 int i, j; 4981 4982 if (!bp->bnapi) 4983 return; 4984 4985 for (i = 0; i < bp->cp_nr_rings; i++) { 4986 struct bnxt_napi *bnapi = bp->bnapi[i]; 4987 struct bnxt_cp_ring_info *cpr; 4988 struct bnxt_rx_ring_info *rxr; 4989 struct bnxt_tx_ring_info *txr; 4990 4991 if (!bnapi) 4992 continue; 4993 4994 cpr = &bnapi->cp_ring; 4995 cpr->cp_raw_cons = 0; 4996 4997 bnxt_for_each_napi_tx(j, bnapi, txr) { 4998 txr->tx_prod = 0; 4999 txr->tx_cons = 0; 5000 txr->tx_hw_cons = 0; 5001 } 5002 5003 rxr = bnapi->rx_ring; 5004 if (rxr) { 5005 rxr->rx_prod = 0; 5006 rxr->rx_agg_prod = 0; 5007 rxr->rx_sw_agg_prod = 0; 5008 rxr->rx_next_cons = 0; 5009 } 5010 bnapi->events = 0; 5011 } 5012 } 5013 5014 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5015 { 5016 u8 type = fltr->type, flags = fltr->flags; 5017 5018 INIT_LIST_HEAD(&fltr->list); 5019 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) || 5020 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING)) 5021 list_add_tail(&fltr->list, &bp->usr_fltr_list); 5022 } 5023 5024 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5025 { 5026 if (!list_empty(&fltr->list)) 5027 list_del_init(&fltr->list); 5028 } 5029 5030 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) 5031 { 5032 struct bnxt_filter_base *usr_fltr, *tmp; 5033 5034 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 5035 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) 5036 continue; 5037 bnxt_del_one_usr_fltr(bp, usr_fltr); 5038 } 5039 } 5040 5041 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5042 { 5043 hlist_del(&fltr->hash); 5044 bnxt_del_one_usr_fltr(bp, fltr); 5045 if (fltr->flags) { 5046 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 5047 bp->ntp_fltr_count--; 5048 } 5049 kfree(fltr); 5050 } 5051 5052 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) 5053 { 5054 int i; 5055 5056 /* Under rtnl_lock and all our NAPIs have been disabled. It's 5057 * safe to delete the hash table. 5058 */ 5059 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 5060 struct hlist_head *head; 5061 struct hlist_node *tmp; 5062 struct bnxt_ntuple_filter *fltr; 5063 5064 head = &bp->ntp_fltr_hash_tbl[i]; 5065 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5066 bnxt_del_l2_filter(bp, fltr->l2_fltr); 5067 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5068 !list_empty(&fltr->base.list))) 5069 continue; 5070 bnxt_del_fltr(bp, &fltr->base); 5071 } 5072 } 5073 if (!all) 5074 return; 5075 5076 bitmap_free(bp->ntp_fltr_bmap); 5077 bp->ntp_fltr_bmap = NULL; 5078 bp->ntp_fltr_count = 0; 5079 } 5080 5081 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 5082 { 5083 int i, rc = 0; 5084 5085 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) 5086 return 0; 5087 5088 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 5089 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 5090 5091 bp->ntp_fltr_count = 0; 5092 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); 5093 5094 if (!bp->ntp_fltr_bmap) 5095 rc = -ENOMEM; 5096 5097 return rc; 5098 } 5099 5100 static void bnxt_free_l2_filters(struct bnxt *bp, bool all) 5101 { 5102 int i; 5103 5104 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { 5105 struct hlist_head *head; 5106 struct hlist_node *tmp; 5107 struct bnxt_l2_filter *fltr; 5108 5109 head = &bp->l2_fltr_hash_tbl[i]; 5110 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5111 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5112 !list_empty(&fltr->base.list))) 5113 continue; 5114 bnxt_del_fltr(bp, &fltr->base); 5115 } 5116 } 5117 } 5118 5119 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) 5120 { 5121 int i; 5122 5123 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) 5124 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); 5125 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); 5126 } 5127 5128 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 5129 { 5130 bnxt_free_vnic_attributes(bp); 5131 bnxt_free_tx_rings(bp); 5132 bnxt_free_rx_rings(bp); 5133 bnxt_free_cp_rings(bp); 5134 bnxt_free_all_cp_arrays(bp); 5135 bnxt_free_ntp_fltrs(bp, false); 5136 bnxt_free_l2_filters(bp, false); 5137 if (irq_re_init) { 5138 bnxt_free_ring_stats(bp); 5139 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 5140 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 5141 bnxt_free_port_stats(bp); 5142 bnxt_free_ring_grps(bp); 5143 bnxt_free_vnics(bp); 5144 kfree(bp->tx_ring_map); 5145 bp->tx_ring_map = NULL; 5146 kfree(bp->tx_ring); 5147 bp->tx_ring = NULL; 5148 kfree(bp->rx_ring); 5149 bp->rx_ring = NULL; 5150 kfree(bp->bnapi); 5151 bp->bnapi = NULL; 5152 } else { 5153 bnxt_clear_ring_indices(bp); 5154 } 5155 } 5156 5157 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 5158 { 5159 int i, j, rc, size, arr_size; 5160 void *bnapi; 5161 5162 if (irq_re_init) { 5163 /* Allocate bnapi mem pointer array and mem block for 5164 * all queues 5165 */ 5166 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 5167 bp->cp_nr_rings); 5168 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 5169 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 5170 if (!bnapi) 5171 return -ENOMEM; 5172 5173 bp->bnapi = bnapi; 5174 bnapi += arr_size; 5175 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 5176 bp->bnapi[i] = bnapi; 5177 bp->bnapi[i]->index = i; 5178 bp->bnapi[i]->bp = bp; 5179 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5180 struct bnxt_cp_ring_info *cpr = 5181 &bp->bnapi[i]->cp_ring; 5182 5183 cpr->cp_ring_struct.ring_mem.flags = 5184 BNXT_RMEM_RING_PTE_FLAG; 5185 } 5186 } 5187 5188 bp->rx_ring = kcalloc(bp->rx_nr_rings, 5189 sizeof(struct bnxt_rx_ring_info), 5190 GFP_KERNEL); 5191 if (!bp->rx_ring) 5192 return -ENOMEM; 5193 5194 for (i = 0; i < bp->rx_nr_rings; i++) { 5195 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5196 5197 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5198 rxr->rx_ring_struct.ring_mem.flags = 5199 BNXT_RMEM_RING_PTE_FLAG; 5200 rxr->rx_agg_ring_struct.ring_mem.flags = 5201 BNXT_RMEM_RING_PTE_FLAG; 5202 } else { 5203 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 5204 } 5205 rxr->bnapi = bp->bnapi[i]; 5206 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 5207 } 5208 5209 bp->tx_ring = kcalloc(bp->tx_nr_rings, 5210 sizeof(struct bnxt_tx_ring_info), 5211 GFP_KERNEL); 5212 if (!bp->tx_ring) 5213 return -ENOMEM; 5214 5215 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 5216 GFP_KERNEL); 5217 5218 if (!bp->tx_ring_map) 5219 return -ENOMEM; 5220 5221 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5222 j = 0; 5223 else 5224 j = bp->rx_nr_rings; 5225 5226 for (i = 0; i < bp->tx_nr_rings; i++) { 5227 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5228 struct bnxt_napi *bnapi2; 5229 5230 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5231 txr->tx_ring_struct.ring_mem.flags = 5232 BNXT_RMEM_RING_PTE_FLAG; 5233 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 5234 if (i >= bp->tx_nr_rings_xdp) { 5235 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 5236 5237 bnapi2 = bp->bnapi[k]; 5238 txr->txq_index = i - bp->tx_nr_rings_xdp; 5239 txr->tx_napi_idx = 5240 BNXT_RING_TO_TC(bp, txr->txq_index); 5241 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 5242 bnapi2->tx_int = bnxt_tx_int; 5243 } else { 5244 bnapi2 = bp->bnapi[j]; 5245 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 5246 bnapi2->tx_ring[0] = txr; 5247 bnapi2->tx_int = bnxt_tx_int_xdp; 5248 j++; 5249 } 5250 txr->bnapi = bnapi2; 5251 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5252 txr->tx_cpr = &bnapi2->cp_ring; 5253 } 5254 5255 rc = bnxt_alloc_stats(bp); 5256 if (rc) 5257 goto alloc_mem_err; 5258 bnxt_init_stats(bp); 5259 5260 rc = bnxt_alloc_ntp_fltrs(bp); 5261 if (rc) 5262 goto alloc_mem_err; 5263 5264 rc = bnxt_alloc_vnics(bp); 5265 if (rc) 5266 goto alloc_mem_err; 5267 } 5268 5269 rc = bnxt_alloc_all_cp_arrays(bp); 5270 if (rc) 5271 goto alloc_mem_err; 5272 5273 bnxt_init_ring_struct(bp); 5274 5275 rc = bnxt_alloc_rx_rings(bp); 5276 if (rc) 5277 goto alloc_mem_err; 5278 5279 rc = bnxt_alloc_tx_rings(bp); 5280 if (rc) 5281 goto alloc_mem_err; 5282 5283 rc = bnxt_alloc_cp_rings(bp); 5284 if (rc) 5285 goto alloc_mem_err; 5286 5287 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | 5288 BNXT_VNIC_MCAST_FLAG | 5289 BNXT_VNIC_UCAST_FLAG; 5290 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) 5291 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= 5292 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; 5293 5294 rc = bnxt_alloc_vnic_attributes(bp); 5295 if (rc) 5296 goto alloc_mem_err; 5297 return 0; 5298 5299 alloc_mem_err: 5300 bnxt_free_mem(bp, true); 5301 return rc; 5302 } 5303 5304 static void bnxt_disable_int(struct bnxt *bp) 5305 { 5306 int i; 5307 5308 if (!bp->bnapi) 5309 return; 5310 5311 for (i = 0; i < bp->cp_nr_rings; i++) { 5312 struct bnxt_napi *bnapi = bp->bnapi[i]; 5313 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5314 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5315 5316 if (ring->fw_ring_id != INVALID_HW_RING_ID) 5317 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5318 } 5319 } 5320 5321 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 5322 { 5323 struct bnxt_napi *bnapi = bp->bnapi[n]; 5324 struct bnxt_cp_ring_info *cpr; 5325 5326 cpr = &bnapi->cp_ring; 5327 return cpr->cp_ring_struct.map_idx; 5328 } 5329 5330 static void bnxt_disable_int_sync(struct bnxt *bp) 5331 { 5332 int i; 5333 5334 if (!bp->irq_tbl) 5335 return; 5336 5337 atomic_inc(&bp->intr_sem); 5338 5339 bnxt_disable_int(bp); 5340 for (i = 0; i < bp->cp_nr_rings; i++) { 5341 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 5342 5343 synchronize_irq(bp->irq_tbl[map_idx].vector); 5344 } 5345 } 5346 5347 static void bnxt_enable_int(struct bnxt *bp) 5348 { 5349 int i; 5350 5351 atomic_set(&bp->intr_sem, 0); 5352 for (i = 0; i < bp->cp_nr_rings; i++) { 5353 struct bnxt_napi *bnapi = bp->bnapi[i]; 5354 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5355 5356 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 5357 } 5358 } 5359 5360 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 5361 bool async_only) 5362 { 5363 DECLARE_BITMAP(async_events_bmap, 256); 5364 u32 *events = (u32 *)async_events_bmap; 5365 struct hwrm_func_drv_rgtr_output *resp; 5366 struct hwrm_func_drv_rgtr_input *req; 5367 u32 flags; 5368 int rc, i; 5369 5370 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 5371 if (rc) 5372 return rc; 5373 5374 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 5375 FUNC_DRV_RGTR_REQ_ENABLES_VER | 5376 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5377 5378 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 5379 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 5380 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 5381 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 5382 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 5383 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 5384 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 5385 req->flags = cpu_to_le32(flags); 5386 req->ver_maj_8b = DRV_VER_MAJ; 5387 req->ver_min_8b = DRV_VER_MIN; 5388 req->ver_upd_8b = DRV_VER_UPD; 5389 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 5390 req->ver_min = cpu_to_le16(DRV_VER_MIN); 5391 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 5392 5393 if (BNXT_PF(bp)) { 5394 u32 data[8]; 5395 int i; 5396 5397 memset(data, 0, sizeof(data)); 5398 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 5399 u16 cmd = bnxt_vf_req_snif[i]; 5400 unsigned int bit, idx; 5401 5402 idx = cmd / 32; 5403 bit = cmd % 32; 5404 data[idx] |= 1 << bit; 5405 } 5406 5407 for (i = 0; i < 8; i++) 5408 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 5409 5410 req->enables |= 5411 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 5412 } 5413 5414 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 5415 req->flags |= cpu_to_le32( 5416 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 5417 5418 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 5419 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 5420 u16 event_id = bnxt_async_events_arr[i]; 5421 5422 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 5423 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5424 continue; 5425 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 5426 !bp->ptp_cfg) 5427 continue; 5428 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 5429 } 5430 if (bmap && bmap_size) { 5431 for (i = 0; i < bmap_size; i++) { 5432 if (test_bit(i, bmap)) 5433 __set_bit(i, async_events_bmap); 5434 } 5435 } 5436 for (i = 0; i < 8; i++) 5437 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 5438 5439 if (async_only) 5440 req->enables = 5441 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5442 5443 resp = hwrm_req_hold(bp, req); 5444 rc = hwrm_req_send(bp, req); 5445 if (!rc) { 5446 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 5447 if (resp->flags & 5448 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 5449 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 5450 } 5451 hwrm_req_drop(bp, req); 5452 return rc; 5453 } 5454 5455 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 5456 { 5457 struct hwrm_func_drv_unrgtr_input *req; 5458 int rc; 5459 5460 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 5461 return 0; 5462 5463 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 5464 if (rc) 5465 return rc; 5466 return hwrm_req_send(bp, req); 5467 } 5468 5469 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); 5470 5471 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 5472 { 5473 struct hwrm_tunnel_dst_port_free_input *req; 5474 int rc; 5475 5476 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 5477 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 5478 return 0; 5479 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 5480 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 5481 return 0; 5482 5483 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 5484 if (rc) 5485 return rc; 5486 5487 req->tunnel_type = tunnel_type; 5488 5489 switch (tunnel_type) { 5490 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 5491 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 5492 bp->vxlan_port = 0; 5493 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 5494 break; 5495 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5496 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5497 bp->nge_port = 0; 5498 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5499 break; 5500 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: 5501 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); 5502 bp->vxlan_gpe_port = 0; 5503 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; 5504 break; 5505 default: 5506 break; 5507 } 5508 5509 rc = hwrm_req_send(bp, req); 5510 if (rc) 5511 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5512 rc); 5513 if (bp->flags & BNXT_FLAG_TPA) 5514 bnxt_set_tpa(bp, true); 5515 return rc; 5516 } 5517 5518 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5519 u8 tunnel_type) 5520 { 5521 struct hwrm_tunnel_dst_port_alloc_output *resp; 5522 struct hwrm_tunnel_dst_port_alloc_input *req; 5523 int rc; 5524 5525 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5526 if (rc) 5527 return rc; 5528 5529 req->tunnel_type = tunnel_type; 5530 req->tunnel_dst_port_val = port; 5531 5532 resp = hwrm_req_hold(bp, req); 5533 rc = hwrm_req_send(bp, req); 5534 if (rc) { 5535 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5536 rc); 5537 goto err_out; 5538 } 5539 5540 switch (tunnel_type) { 5541 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5542 bp->vxlan_port = port; 5543 bp->vxlan_fw_dst_port_id = 5544 le16_to_cpu(resp->tunnel_dst_port_id); 5545 break; 5546 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5547 bp->nge_port = port; 5548 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5549 break; 5550 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: 5551 bp->vxlan_gpe_port = port; 5552 bp->vxlan_gpe_fw_dst_port_id = 5553 le16_to_cpu(resp->tunnel_dst_port_id); 5554 break; 5555 default: 5556 break; 5557 } 5558 if (bp->flags & BNXT_FLAG_TPA) 5559 bnxt_set_tpa(bp, true); 5560 5561 err_out: 5562 hwrm_req_drop(bp, req); 5563 return rc; 5564 } 5565 5566 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5567 { 5568 struct hwrm_cfa_l2_set_rx_mask_input *req; 5569 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5570 int rc; 5571 5572 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5573 if (rc) 5574 return rc; 5575 5576 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5577 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5578 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5579 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5580 } 5581 req->mask = cpu_to_le32(vnic->rx_mask); 5582 return hwrm_req_send_silent(bp, req); 5583 } 5584 5585 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5586 { 5587 if (!atomic_dec_and_test(&fltr->refcnt)) 5588 return; 5589 spin_lock_bh(&bp->ntp_fltr_lock); 5590 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 5591 spin_unlock_bh(&bp->ntp_fltr_lock); 5592 return; 5593 } 5594 hlist_del_rcu(&fltr->base.hash); 5595 bnxt_del_one_usr_fltr(bp, &fltr->base); 5596 if (fltr->base.flags) { 5597 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 5598 bp->ntp_fltr_count--; 5599 } 5600 spin_unlock_bh(&bp->ntp_fltr_lock); 5601 kfree_rcu(fltr, base.rcu); 5602 } 5603 5604 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, 5605 struct bnxt_l2_key *key, 5606 u32 idx) 5607 { 5608 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; 5609 struct bnxt_l2_filter *fltr; 5610 5611 hlist_for_each_entry_rcu(fltr, head, base.hash) { 5612 struct bnxt_l2_key *l2_key = &fltr->l2_key; 5613 5614 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && 5615 l2_key->vlan == key->vlan) 5616 return fltr; 5617 } 5618 return NULL; 5619 } 5620 5621 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, 5622 struct bnxt_l2_key *key, 5623 u32 idx) 5624 { 5625 struct bnxt_l2_filter *fltr = NULL; 5626 5627 rcu_read_lock(); 5628 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5629 if (fltr) 5630 atomic_inc(&fltr->refcnt); 5631 rcu_read_unlock(); 5632 return fltr; 5633 } 5634 5635 #define BNXT_IPV4_4TUPLE(bp, fkeys) \ 5636 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5637 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ 5638 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5639 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) 5640 5641 #define BNXT_IPV6_4TUPLE(bp, fkeys) \ 5642 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5643 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ 5644 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5645 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) 5646 5647 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) 5648 { 5649 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5650 if (BNXT_IPV4_4TUPLE(bp, fkeys)) 5651 return sizeof(fkeys->addrs.v4addrs) + 5652 sizeof(fkeys->ports); 5653 5654 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 5655 return sizeof(fkeys->addrs.v4addrs); 5656 } 5657 5658 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 5659 if (BNXT_IPV6_4TUPLE(bp, fkeys)) 5660 return sizeof(fkeys->addrs.v6addrs) + 5661 sizeof(fkeys->ports); 5662 5663 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 5664 return sizeof(fkeys->addrs.v6addrs); 5665 } 5666 5667 return 0; 5668 } 5669 5670 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, 5671 const unsigned char *key) 5672 { 5673 u64 prefix = bp->toeplitz_prefix, hash = 0; 5674 struct bnxt_ipv4_tuple tuple4; 5675 struct bnxt_ipv6_tuple tuple6; 5676 int i, j, len = 0; 5677 u8 *four_tuple; 5678 5679 len = bnxt_get_rss_flow_tuple_len(bp, fkeys); 5680 if (!len) 5681 return 0; 5682 5683 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5684 tuple4.v4addrs = fkeys->addrs.v4addrs; 5685 tuple4.ports = fkeys->ports; 5686 four_tuple = (unsigned char *)&tuple4; 5687 } else { 5688 tuple6.v6addrs = fkeys->addrs.v6addrs; 5689 tuple6.ports = fkeys->ports; 5690 four_tuple = (unsigned char *)&tuple6; 5691 } 5692 5693 for (i = 0, j = 8; i < len; i++, j++) { 5694 u8 byte = four_tuple[i]; 5695 int bit; 5696 5697 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { 5698 if (byte & 0x80) 5699 hash ^= prefix; 5700 } 5701 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; 5702 } 5703 5704 /* The valid part of the hash is in the upper 32 bits. */ 5705 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; 5706 } 5707 5708 #ifdef CONFIG_RFS_ACCEL 5709 static struct bnxt_l2_filter * 5710 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) 5711 { 5712 struct bnxt_l2_filter *fltr; 5713 u32 idx; 5714 5715 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5716 BNXT_L2_FLTR_HASH_MASK; 5717 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5718 return fltr; 5719 } 5720 #endif 5721 5722 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, 5723 struct bnxt_l2_key *key, u32 idx) 5724 { 5725 struct hlist_head *head; 5726 5727 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); 5728 fltr->l2_key.vlan = key->vlan; 5729 fltr->base.type = BNXT_FLTR_TYPE_L2; 5730 if (fltr->base.flags) { 5731 int bit_id; 5732 5733 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5734 bp->max_fltr, 0); 5735 if (bit_id < 0) 5736 return -ENOMEM; 5737 fltr->base.sw_id = (u16)bit_id; 5738 bp->ntp_fltr_count++; 5739 } 5740 head = &bp->l2_fltr_hash_tbl[idx]; 5741 hlist_add_head_rcu(&fltr->base.hash, head); 5742 bnxt_insert_usr_fltr(bp, &fltr->base); 5743 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 5744 atomic_set(&fltr->refcnt, 1); 5745 return 0; 5746 } 5747 5748 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, 5749 struct bnxt_l2_key *key, 5750 gfp_t gfp) 5751 { 5752 struct bnxt_l2_filter *fltr; 5753 u32 idx; 5754 int rc; 5755 5756 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5757 BNXT_L2_FLTR_HASH_MASK; 5758 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5759 if (fltr) 5760 return fltr; 5761 5762 fltr = kzalloc(sizeof(*fltr), gfp); 5763 if (!fltr) 5764 return ERR_PTR(-ENOMEM); 5765 spin_lock_bh(&bp->ntp_fltr_lock); 5766 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 5767 spin_unlock_bh(&bp->ntp_fltr_lock); 5768 if (rc) { 5769 bnxt_del_l2_filter(bp, fltr); 5770 fltr = ERR_PTR(rc); 5771 } 5772 return fltr; 5773 } 5774 5775 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, 5776 struct bnxt_l2_key *key, 5777 u16 flags) 5778 { 5779 struct bnxt_l2_filter *fltr; 5780 u32 idx; 5781 int rc; 5782 5783 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5784 BNXT_L2_FLTR_HASH_MASK; 5785 spin_lock_bh(&bp->ntp_fltr_lock); 5786 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5787 if (fltr) { 5788 fltr = ERR_PTR(-EEXIST); 5789 goto l2_filter_exit; 5790 } 5791 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); 5792 if (!fltr) { 5793 fltr = ERR_PTR(-ENOMEM); 5794 goto l2_filter_exit; 5795 } 5796 fltr->base.flags = flags; 5797 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 5798 if (rc) { 5799 spin_unlock_bh(&bp->ntp_fltr_lock); 5800 bnxt_del_l2_filter(bp, fltr); 5801 return ERR_PTR(rc); 5802 } 5803 5804 l2_filter_exit: 5805 spin_unlock_bh(&bp->ntp_fltr_lock); 5806 return fltr; 5807 } 5808 5809 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) 5810 { 5811 #ifdef CONFIG_BNXT_SRIOV 5812 struct bnxt_vf_info *vf = &pf->vf[vf_idx]; 5813 5814 return vf->fw_fid; 5815 #else 5816 return INVALID_HW_RING_ID; 5817 #endif 5818 } 5819 5820 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5821 { 5822 struct hwrm_cfa_l2_filter_free_input *req; 5823 u16 target_id = 0xffff; 5824 int rc; 5825 5826 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 5827 struct bnxt_pf_info *pf = &bp->pf; 5828 5829 if (fltr->base.vf_idx >= pf->active_vfs) 5830 return -EINVAL; 5831 5832 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 5833 if (target_id == INVALID_HW_RING_ID) 5834 return -EINVAL; 5835 } 5836 5837 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 5838 if (rc) 5839 return rc; 5840 5841 req->target_id = cpu_to_le16(target_id); 5842 req->l2_filter_id = fltr->base.filter_id; 5843 return hwrm_req_send(bp, req); 5844 } 5845 5846 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5847 { 5848 struct hwrm_cfa_l2_filter_alloc_output *resp; 5849 struct hwrm_cfa_l2_filter_alloc_input *req; 5850 u16 target_id = 0xffff; 5851 int rc; 5852 5853 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 5854 struct bnxt_pf_info *pf = &bp->pf; 5855 5856 if (fltr->base.vf_idx >= pf->active_vfs) 5857 return -EINVAL; 5858 5859 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 5860 } 5861 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 5862 if (rc) 5863 return rc; 5864 5865 req->target_id = cpu_to_le16(target_id); 5866 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 5867 5868 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 5869 req->flags |= 5870 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 5871 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); 5872 req->enables = 5873 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 5874 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 5875 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 5876 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); 5877 eth_broadcast_addr(req->l2_addr_mask); 5878 5879 if (fltr->l2_key.vlan) { 5880 req->enables |= 5881 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | 5882 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | 5883 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); 5884 req->num_vlans = 1; 5885 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); 5886 req->l2_ivlan_mask = cpu_to_le16(0xfff); 5887 } 5888 5889 resp = hwrm_req_hold(bp, req); 5890 rc = hwrm_req_send(bp, req); 5891 if (!rc) { 5892 fltr->base.filter_id = resp->l2_filter_id; 5893 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 5894 } 5895 hwrm_req_drop(bp, req); 5896 return rc; 5897 } 5898 5899 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 5900 struct bnxt_ntuple_filter *fltr) 5901 { 5902 struct hwrm_cfa_ntuple_filter_free_input *req; 5903 int rc; 5904 5905 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); 5906 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 5907 if (rc) 5908 return rc; 5909 5910 req->ntuple_filter_id = fltr->base.filter_id; 5911 return hwrm_req_send(bp, req); 5912 } 5913 5914 #define BNXT_NTP_FLTR_FLAGS \ 5915 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 5916 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 5917 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 5918 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 5919 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 5920 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 5921 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 5922 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 5923 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 5924 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 5925 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 5926 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 5927 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 5928 5929 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 5930 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 5931 5932 void bnxt_fill_ipv6_mask(__be32 mask[4]) 5933 { 5934 int i; 5935 5936 for (i = 0; i < 4; i++) 5937 mask[i] = cpu_to_be32(~0); 5938 } 5939 5940 static void 5941 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, 5942 struct hwrm_cfa_ntuple_filter_alloc_input *req, 5943 struct bnxt_ntuple_filter *fltr) 5944 { 5945 u16 rxq = fltr->base.rxq; 5946 5947 if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 5948 struct ethtool_rxfh_context *ctx; 5949 struct bnxt_rss_ctx *rss_ctx; 5950 struct bnxt_vnic_info *vnic; 5951 5952 ctx = xa_load(&bp->dev->ethtool->rss_ctx, 5953 fltr->base.fw_vnic_id); 5954 if (ctx) { 5955 rss_ctx = ethtool_rxfh_context_priv(ctx); 5956 vnic = &rss_ctx->vnic; 5957 5958 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5959 } 5960 return; 5961 } 5962 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 5963 struct bnxt_vnic_info *vnic; 5964 u32 enables; 5965 5966 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 5967 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5968 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; 5969 req->enables |= cpu_to_le32(enables); 5970 req->rfs_ring_tbl_idx = cpu_to_le16(rxq); 5971 } else { 5972 u32 flags; 5973 5974 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 5975 req->flags |= cpu_to_le32(flags); 5976 req->dst_id = cpu_to_le16(rxq); 5977 } 5978 } 5979 5980 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 5981 struct bnxt_ntuple_filter *fltr) 5982 { 5983 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 5984 struct hwrm_cfa_ntuple_filter_alloc_input *req; 5985 struct bnxt_flow_masks *masks = &fltr->fmasks; 5986 struct flow_keys *keys = &fltr->fkeys; 5987 struct bnxt_l2_filter *l2_fltr; 5988 struct bnxt_vnic_info *vnic; 5989 int rc; 5990 5991 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 5992 if (rc) 5993 return rc; 5994 5995 l2_fltr = fltr->l2_fltr; 5996 req->l2_filter_id = l2_fltr->base.filter_id; 5997 5998 if (fltr->base.flags & BNXT_ACT_DROP) { 5999 req->flags = 6000 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); 6001 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 6002 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); 6003 } else { 6004 vnic = &bp->vnic_info[fltr->base.rxq + 1]; 6005 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6006 } 6007 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 6008 6009 req->ethertype = htons(ETH_P_IP); 6010 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 6011 req->ip_protocol = keys->basic.ip_proto; 6012 6013 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 6014 req->ethertype = htons(ETH_P_IPV6); 6015 req->ip_addr_type = 6016 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 6017 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; 6018 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; 6019 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; 6020 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; 6021 } else { 6022 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 6023 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; 6024 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 6025 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; 6026 } 6027 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 6028 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 6029 req->tunnel_type = 6030 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 6031 } 6032 6033 req->src_port = keys->ports.src; 6034 req->src_port_mask = masks->ports.src; 6035 req->dst_port = keys->ports.dst; 6036 req->dst_port_mask = masks->ports.dst; 6037 6038 resp = hwrm_req_hold(bp, req); 6039 rc = hwrm_req_send(bp, req); 6040 if (!rc) 6041 fltr->base.filter_id = resp->ntuple_filter_id; 6042 hwrm_req_drop(bp, req); 6043 return rc; 6044 } 6045 6046 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 6047 const u8 *mac_addr) 6048 { 6049 struct bnxt_l2_filter *fltr; 6050 struct bnxt_l2_key key; 6051 int rc; 6052 6053 ether_addr_copy(key.dst_mac_addr, mac_addr); 6054 key.vlan = 0; 6055 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); 6056 if (IS_ERR(fltr)) 6057 return PTR_ERR(fltr); 6058 6059 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; 6060 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 6061 if (rc) 6062 bnxt_del_l2_filter(bp, fltr); 6063 else 6064 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; 6065 return rc; 6066 } 6067 6068 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 6069 { 6070 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 6071 6072 /* Any associated ntuple filters will also be cleared by firmware. */ 6073 for (i = 0; i < num_of_vnics; i++) { 6074 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6075 6076 for (j = 0; j < vnic->uc_filter_count; j++) { 6077 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; 6078 6079 bnxt_hwrm_l2_filter_free(bp, fltr); 6080 bnxt_del_l2_filter(bp, fltr); 6081 } 6082 vnic->uc_filter_count = 0; 6083 } 6084 } 6085 6086 #define BNXT_DFLT_TUNL_TPA_BMAP \ 6087 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ 6088 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ 6089 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) 6090 6091 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, 6092 struct hwrm_vnic_tpa_cfg_input *req) 6093 { 6094 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; 6095 6096 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) 6097 return; 6098 6099 if (bp->vxlan_port) 6100 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; 6101 if (bp->vxlan_gpe_port) 6102 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; 6103 if (bp->nge_port) 6104 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; 6105 6106 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); 6107 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); 6108 } 6109 6110 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6111 u32 tpa_flags) 6112 { 6113 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 6114 struct hwrm_vnic_tpa_cfg_input *req; 6115 int rc; 6116 6117 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 6118 return 0; 6119 6120 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 6121 if (rc) 6122 return rc; 6123 6124 if (tpa_flags) { 6125 u16 mss = bp->dev->mtu - 40; 6126 u32 nsegs, n, segs = 0, flags; 6127 6128 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 6129 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 6130 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 6131 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 6132 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 6133 if (tpa_flags & BNXT_FLAG_GRO) 6134 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 6135 6136 req->flags = cpu_to_le32(flags); 6137 6138 req->enables = 6139 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 6140 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 6141 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 6142 6143 /* Number of segs are log2 units, and first packet is not 6144 * included as part of this units. 6145 */ 6146 if (mss <= BNXT_RX_PAGE_SIZE) { 6147 n = BNXT_RX_PAGE_SIZE / mss; 6148 nsegs = (MAX_SKB_FRAGS - 1) * n; 6149 } else { 6150 n = mss / BNXT_RX_PAGE_SIZE; 6151 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 6152 n++; 6153 nsegs = (MAX_SKB_FRAGS - n) / n; 6154 } 6155 6156 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6157 segs = MAX_TPA_SEGS_P5; 6158 max_aggs = bp->max_tpa; 6159 } else { 6160 segs = ilog2(nsegs); 6161 } 6162 req->max_agg_segs = cpu_to_le16(segs); 6163 req->max_aggs = cpu_to_le16(max_aggs); 6164 6165 req->min_agg_len = cpu_to_le32(512); 6166 bnxt_hwrm_vnic_update_tunl_tpa(bp, req); 6167 } 6168 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6169 6170 return hwrm_req_send(bp, req); 6171 } 6172 6173 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 6174 { 6175 struct bnxt_ring_grp_info *grp_info; 6176 6177 grp_info = &bp->grp_info[ring->grp_idx]; 6178 return grp_info->cp_fw_ring_id; 6179 } 6180 6181 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 6182 { 6183 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6184 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 6185 else 6186 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 6187 } 6188 6189 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 6190 { 6191 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6192 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 6193 else 6194 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 6195 } 6196 6197 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 6198 { 6199 int entries; 6200 6201 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6202 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 6203 else 6204 entries = HW_HASH_INDEX_SIZE; 6205 6206 bp->rss_indir_tbl_entries = entries; 6207 bp->rss_indir_tbl = 6208 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); 6209 if (!bp->rss_indir_tbl) 6210 return -ENOMEM; 6211 6212 return 0; 6213 } 6214 6215 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, 6216 struct ethtool_rxfh_context *rss_ctx) 6217 { 6218 u16 max_rings, max_entries, pad, i; 6219 u32 *rss_indir_tbl; 6220 6221 if (!bp->rx_nr_rings) 6222 return; 6223 6224 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6225 max_rings = bp->rx_nr_rings - 1; 6226 else 6227 max_rings = bp->rx_nr_rings; 6228 6229 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 6230 if (rss_ctx) 6231 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); 6232 else 6233 rss_indir_tbl = &bp->rss_indir_tbl[0]; 6234 6235 for (i = 0; i < max_entries; i++) 6236 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 6237 6238 pad = bp->rss_indir_tbl_entries - max_entries; 6239 if (pad) 6240 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); 6241 } 6242 6243 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 6244 { 6245 u32 i, tbl_size, max_ring = 0; 6246 6247 if (!bp->rss_indir_tbl) 6248 return 0; 6249 6250 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6251 for (i = 0; i < tbl_size; i++) 6252 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 6253 return max_ring; 6254 } 6255 6256 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 6257 { 6258 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6259 if (!rx_rings) 6260 return 0; 6261 return bnxt_calc_nr_ring_pages(rx_rings - 1, 6262 BNXT_RSS_TABLE_ENTRIES_P5); 6263 } 6264 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6265 return 2; 6266 return 1; 6267 } 6268 6269 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6270 { 6271 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 6272 u16 i, j; 6273 6274 /* Fill the RSS indirection table with ring group ids */ 6275 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 6276 if (!no_rss) 6277 j = bp->rss_indir_tbl[i]; 6278 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 6279 } 6280 } 6281 6282 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 6283 struct bnxt_vnic_info *vnic) 6284 { 6285 __le16 *ring_tbl = vnic->rss_table; 6286 struct bnxt_rx_ring_info *rxr; 6287 u16 tbl_size, i; 6288 6289 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6290 6291 for (i = 0; i < tbl_size; i++) { 6292 u16 ring_id, j; 6293 6294 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) 6295 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); 6296 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) 6297 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; 6298 else 6299 j = bp->rss_indir_tbl[i]; 6300 rxr = &bp->rx_ring[j]; 6301 6302 ring_id = rxr->rx_ring_struct.fw_ring_id; 6303 *ring_tbl++ = cpu_to_le16(ring_id); 6304 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6305 *ring_tbl++ = cpu_to_le16(ring_id); 6306 } 6307 } 6308 6309 static void 6310 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 6311 struct bnxt_vnic_info *vnic) 6312 { 6313 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6314 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 6315 if (bp->flags & BNXT_FLAG_CHIP_P7) 6316 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; 6317 } else { 6318 bnxt_fill_hw_rss_tbl(bp, vnic); 6319 } 6320 6321 if (bp->rss_hash_delta) { 6322 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 6323 if (bp->rss_hash_cfg & bp->rss_hash_delta) 6324 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 6325 else 6326 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 6327 } else { 6328 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 6329 } 6330 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 6331 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 6332 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 6333 } 6334 6335 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6336 bool set_rss) 6337 { 6338 struct hwrm_vnic_rss_cfg_input *req; 6339 int rc; 6340 6341 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 6342 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 6343 return 0; 6344 6345 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6346 if (rc) 6347 return rc; 6348 6349 if (set_rss) 6350 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6351 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6352 return hwrm_req_send(bp, req); 6353 } 6354 6355 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, 6356 struct bnxt_vnic_info *vnic, bool set_rss) 6357 { 6358 struct hwrm_vnic_rss_cfg_input *req; 6359 dma_addr_t ring_tbl_map; 6360 u32 i, nr_ctxs; 6361 int rc; 6362 6363 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6364 if (rc) 6365 return rc; 6366 6367 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6368 if (!set_rss) 6369 return hwrm_req_send(bp, req); 6370 6371 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6372 ring_tbl_map = vnic->rss_table_dma_addr; 6373 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 6374 6375 hwrm_req_hold(bp, req); 6376 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 6377 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 6378 req->ring_table_pair_index = i; 6379 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 6380 rc = hwrm_req_send(bp, req); 6381 if (rc) 6382 goto exit; 6383 } 6384 6385 exit: 6386 hwrm_req_drop(bp, req); 6387 return rc; 6388 } 6389 6390 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 6391 { 6392 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6393 struct hwrm_vnic_rss_qcfg_output *resp; 6394 struct hwrm_vnic_rss_qcfg_input *req; 6395 6396 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 6397 return; 6398 6399 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6400 /* all contexts configured to same hash_type, zero always exists */ 6401 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6402 resp = hwrm_req_hold(bp, req); 6403 if (!hwrm_req_send(bp, req)) { 6404 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 6405 bp->rss_hash_delta = 0; 6406 } 6407 hwrm_req_drop(bp, req); 6408 } 6409 6410 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6411 { 6412 struct hwrm_vnic_plcmodes_cfg_input *req; 6413 int rc; 6414 6415 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 6416 if (rc) 6417 return rc; 6418 6419 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 6420 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 6421 6422 if (BNXT_RX_PAGE_MODE(bp)) { 6423 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 6424 } else { 6425 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 6426 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 6427 req->enables |= 6428 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 6429 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 6430 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 6431 } 6432 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6433 return hwrm_req_send(bp, req); 6434 } 6435 6436 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, 6437 struct bnxt_vnic_info *vnic, 6438 u16 ctx_idx) 6439 { 6440 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 6441 6442 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 6443 return; 6444 6445 req->rss_cos_lb_ctx_id = 6446 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); 6447 6448 hwrm_req_send(bp, req); 6449 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 6450 } 6451 6452 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 6453 { 6454 int i, j; 6455 6456 for (i = 0; i < bp->nr_vnics; i++) { 6457 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6458 6459 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 6460 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 6461 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); 6462 } 6463 } 6464 bp->rsscos_nr_ctxs = 0; 6465 } 6466 6467 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, 6468 struct bnxt_vnic_info *vnic, u16 ctx_idx) 6469 { 6470 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 6471 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 6472 int rc; 6473 6474 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 6475 if (rc) 6476 return rc; 6477 6478 resp = hwrm_req_hold(bp, req); 6479 rc = hwrm_req_send(bp, req); 6480 if (!rc) 6481 vnic->fw_rss_cos_lb_ctx[ctx_idx] = 6482 le16_to_cpu(resp->rss_cos_lb_ctx_id); 6483 hwrm_req_drop(bp, req); 6484 6485 return rc; 6486 } 6487 6488 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 6489 { 6490 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 6491 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 6492 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 6493 } 6494 6495 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6496 { 6497 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6498 struct hwrm_vnic_cfg_input *req; 6499 unsigned int ring = 0, grp_idx; 6500 u16 def_vlan = 0; 6501 int rc; 6502 6503 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 6504 if (rc) 6505 return rc; 6506 6507 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6508 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 6509 6510 req->default_rx_ring_id = 6511 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 6512 req->default_cmpl_ring_id = 6513 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 6514 req->enables = 6515 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 6516 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 6517 goto vnic_mru; 6518 } 6519 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 6520 /* Only RSS support for now TBD: COS & LB */ 6521 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 6522 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6523 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6524 VNIC_CFG_REQ_ENABLES_MRU); 6525 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 6526 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); 6527 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6528 VNIC_CFG_REQ_ENABLES_MRU); 6529 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 6530 } else { 6531 req->rss_rule = cpu_to_le16(0xffff); 6532 } 6533 6534 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 6535 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 6536 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 6537 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 6538 } else { 6539 req->cos_rule = cpu_to_le16(0xffff); 6540 } 6541 6542 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 6543 ring = 0; 6544 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 6545 ring = vnic->vnic_id - 1; 6546 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 6547 ring = bp->rx_nr_rings - 1; 6548 6549 grp_idx = bp->rx_ring[ring].bnapi->index; 6550 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 6551 req->lb_rule = cpu_to_le16(0xffff); 6552 vnic_mru: 6553 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 6554 req->mru = cpu_to_le16(vnic->mru); 6555 6556 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6557 #ifdef CONFIG_BNXT_SRIOV 6558 if (BNXT_VF(bp)) 6559 def_vlan = bp->vf.vlan; 6560 #endif 6561 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 6562 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 6563 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) 6564 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 6565 6566 return hwrm_req_send(bp, req); 6567 } 6568 6569 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, 6570 struct bnxt_vnic_info *vnic) 6571 { 6572 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { 6573 struct hwrm_vnic_free_input *req; 6574 6575 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 6576 return; 6577 6578 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6579 6580 hwrm_req_send(bp, req); 6581 vnic->fw_vnic_id = INVALID_HW_RING_ID; 6582 } 6583 } 6584 6585 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 6586 { 6587 u16 i; 6588 6589 for (i = 0; i < bp->nr_vnics; i++) 6590 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); 6591 } 6592 6593 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6594 unsigned int start_rx_ring_idx, 6595 unsigned int nr_rings) 6596 { 6597 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 6598 struct hwrm_vnic_alloc_output *resp; 6599 struct hwrm_vnic_alloc_input *req; 6600 int rc; 6601 6602 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 6603 if (rc) 6604 return rc; 6605 6606 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6607 goto vnic_no_ring_grps; 6608 6609 /* map ring groups to this vnic */ 6610 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 6611 grp_idx = bp->rx_ring[i].bnapi->index; 6612 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 6613 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 6614 j, nr_rings); 6615 break; 6616 } 6617 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 6618 } 6619 6620 vnic_no_ring_grps: 6621 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 6622 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 6623 if (vnic->vnic_id == BNXT_VNIC_DEFAULT) 6624 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 6625 6626 resp = hwrm_req_hold(bp, req); 6627 rc = hwrm_req_send(bp, req); 6628 if (!rc) 6629 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 6630 hwrm_req_drop(bp, req); 6631 return rc; 6632 } 6633 6634 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 6635 { 6636 struct hwrm_vnic_qcaps_output *resp; 6637 struct hwrm_vnic_qcaps_input *req; 6638 int rc; 6639 6640 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 6641 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; 6642 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; 6643 if (bp->hwrm_spec_code < 0x10600) 6644 return 0; 6645 6646 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 6647 if (rc) 6648 return rc; 6649 6650 resp = hwrm_req_hold(bp, req); 6651 rc = hwrm_req_send(bp, req); 6652 if (!rc) { 6653 u32 flags = le32_to_cpu(resp->flags); 6654 6655 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 6656 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 6657 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; 6658 if (flags & 6659 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 6660 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 6661 6662 /* Older P5 fw before EXT_HW_STATS support did not set 6663 * VLAN_STRIP_CAP properly. 6664 */ 6665 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 6666 (BNXT_CHIP_P5(bp) && 6667 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 6668 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 6669 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 6670 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; 6671 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) 6672 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; 6673 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 6674 if (bp->max_tpa_v2) { 6675 if (BNXT_CHIP_P5(bp)) 6676 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 6677 else 6678 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; 6679 } 6680 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) 6681 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; 6682 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) 6683 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; 6684 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) 6685 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; 6686 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) 6687 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; 6688 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) 6689 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; 6690 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP) 6691 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH; 6692 } 6693 hwrm_req_drop(bp, req); 6694 return rc; 6695 } 6696 6697 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 6698 { 6699 struct hwrm_ring_grp_alloc_output *resp; 6700 struct hwrm_ring_grp_alloc_input *req; 6701 int rc; 6702 u16 i; 6703 6704 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6705 return 0; 6706 6707 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 6708 if (rc) 6709 return rc; 6710 6711 resp = hwrm_req_hold(bp, req); 6712 for (i = 0; i < bp->rx_nr_rings; i++) { 6713 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 6714 6715 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 6716 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 6717 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 6718 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 6719 6720 rc = hwrm_req_send(bp, req); 6721 6722 if (rc) 6723 break; 6724 6725 bp->grp_info[grp_idx].fw_grp_id = 6726 le32_to_cpu(resp->ring_group_id); 6727 } 6728 hwrm_req_drop(bp, req); 6729 return rc; 6730 } 6731 6732 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 6733 { 6734 struct hwrm_ring_grp_free_input *req; 6735 u16 i; 6736 6737 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6738 return; 6739 6740 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 6741 return; 6742 6743 hwrm_req_hold(bp, req); 6744 for (i = 0; i < bp->cp_nr_rings; i++) { 6745 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 6746 continue; 6747 req->ring_group_id = 6748 cpu_to_le32(bp->grp_info[i].fw_grp_id); 6749 6750 hwrm_req_send(bp, req); 6751 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 6752 } 6753 hwrm_req_drop(bp, req); 6754 } 6755 6756 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 6757 struct bnxt_ring_struct *ring, 6758 u32 ring_type, u32 map_index) 6759 { 6760 struct hwrm_ring_alloc_output *resp; 6761 struct hwrm_ring_alloc_input *req; 6762 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 6763 struct bnxt_ring_grp_info *grp_info; 6764 int rc, err = 0; 6765 u16 ring_id; 6766 6767 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 6768 if (rc) 6769 goto exit; 6770 6771 req->enables = 0; 6772 if (rmem->nr_pages > 1) { 6773 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 6774 /* Page size is in log2 units */ 6775 req->page_size = BNXT_PAGE_SHIFT; 6776 req->page_tbl_depth = 1; 6777 } else { 6778 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 6779 } 6780 req->fbo = 0; 6781 /* Association of ring index with doorbell index and MSIX number */ 6782 req->logical_id = cpu_to_le16(map_index); 6783 6784 switch (ring_type) { 6785 case HWRM_RING_ALLOC_TX: { 6786 struct bnxt_tx_ring_info *txr; 6787 u16 flags = 0; 6788 6789 txr = container_of(ring, struct bnxt_tx_ring_info, 6790 tx_ring_struct); 6791 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 6792 /* Association of transmit ring with completion ring */ 6793 grp_info = &bp->grp_info[ring->grp_idx]; 6794 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 6795 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 6796 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6797 req->queue_id = cpu_to_le16(ring->queue_id); 6798 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) 6799 req->cmpl_coal_cnt = 6800 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; 6801 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) 6802 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; 6803 req->flags = cpu_to_le16(flags); 6804 break; 6805 } 6806 case HWRM_RING_ALLOC_RX: 6807 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6808 req->length = cpu_to_le32(bp->rx_ring_mask + 1); 6809 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6810 u16 flags = 0; 6811 6812 /* Association of rx ring with stats context */ 6813 grp_info = &bp->grp_info[ring->grp_idx]; 6814 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 6815 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6816 req->enables |= cpu_to_le32( 6817 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6818 if (NET_IP_ALIGN == 2) 6819 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 6820 req->flags = cpu_to_le16(flags); 6821 } 6822 break; 6823 case HWRM_RING_ALLOC_AGG: 6824 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6825 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 6826 /* Association of agg ring with rx ring */ 6827 grp_info = &bp->grp_info[ring->grp_idx]; 6828 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 6829 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 6830 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6831 req->enables |= cpu_to_le32( 6832 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 6833 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6834 } else { 6835 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6836 } 6837 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 6838 break; 6839 case HWRM_RING_ALLOC_CMPL: 6840 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 6841 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6842 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6843 /* Association of cp ring with nq */ 6844 grp_info = &bp->grp_info[map_index]; 6845 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 6846 req->cq_handle = cpu_to_le64(ring->handle); 6847 req->enables |= cpu_to_le32( 6848 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 6849 } else { 6850 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6851 } 6852 break; 6853 case HWRM_RING_ALLOC_NQ: 6854 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 6855 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6856 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6857 break; 6858 default: 6859 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 6860 ring_type); 6861 return -1; 6862 } 6863 6864 resp = hwrm_req_hold(bp, req); 6865 rc = hwrm_req_send(bp, req); 6866 err = le16_to_cpu(resp->error_code); 6867 ring_id = le16_to_cpu(resp->ring_id); 6868 hwrm_req_drop(bp, req); 6869 6870 exit: 6871 if (rc || err) { 6872 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 6873 ring_type, rc, err); 6874 return -EIO; 6875 } 6876 ring->fw_ring_id = ring_id; 6877 return rc; 6878 } 6879 6880 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 6881 { 6882 int rc; 6883 6884 if (BNXT_PF(bp)) { 6885 struct hwrm_func_cfg_input *req; 6886 6887 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 6888 if (rc) 6889 return rc; 6890 6891 req->fid = cpu_to_le16(0xffff); 6892 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6893 req->async_event_cr = cpu_to_le16(idx); 6894 return hwrm_req_send(bp, req); 6895 } else { 6896 struct hwrm_func_vf_cfg_input *req; 6897 6898 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 6899 if (rc) 6900 return rc; 6901 6902 req->enables = 6903 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6904 req->async_event_cr = cpu_to_le16(idx); 6905 return hwrm_req_send(bp, req); 6906 } 6907 } 6908 6909 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 6910 u32 ring_type) 6911 { 6912 switch (ring_type) { 6913 case HWRM_RING_ALLOC_TX: 6914 db->db_ring_mask = bp->tx_ring_mask; 6915 break; 6916 case HWRM_RING_ALLOC_RX: 6917 db->db_ring_mask = bp->rx_ring_mask; 6918 break; 6919 case HWRM_RING_ALLOC_AGG: 6920 db->db_ring_mask = bp->rx_agg_ring_mask; 6921 break; 6922 case HWRM_RING_ALLOC_CMPL: 6923 case HWRM_RING_ALLOC_NQ: 6924 db->db_ring_mask = bp->cp_ring_mask; 6925 break; 6926 } 6927 if (bp->flags & BNXT_FLAG_CHIP_P7) { 6928 db->db_epoch_mask = db->db_ring_mask + 1; 6929 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); 6930 } 6931 } 6932 6933 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 6934 u32 map_idx, u32 xid) 6935 { 6936 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6937 switch (ring_type) { 6938 case HWRM_RING_ALLOC_TX: 6939 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 6940 break; 6941 case HWRM_RING_ALLOC_RX: 6942 case HWRM_RING_ALLOC_AGG: 6943 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 6944 break; 6945 case HWRM_RING_ALLOC_CMPL: 6946 db->db_key64 = DBR_PATH_L2; 6947 break; 6948 case HWRM_RING_ALLOC_NQ: 6949 db->db_key64 = DBR_PATH_L2; 6950 break; 6951 } 6952 db->db_key64 |= (u64)xid << DBR_XID_SFT; 6953 6954 if (bp->flags & BNXT_FLAG_CHIP_P7) 6955 db->db_key64 |= DBR_VALID; 6956 6957 db->doorbell = bp->bar1 + bp->db_offset; 6958 } else { 6959 db->doorbell = bp->bar1 + map_idx * 0x80; 6960 switch (ring_type) { 6961 case HWRM_RING_ALLOC_TX: 6962 db->db_key32 = DB_KEY_TX; 6963 break; 6964 case HWRM_RING_ALLOC_RX: 6965 case HWRM_RING_ALLOC_AGG: 6966 db->db_key32 = DB_KEY_RX; 6967 break; 6968 case HWRM_RING_ALLOC_CMPL: 6969 db->db_key32 = DB_KEY_CP; 6970 break; 6971 } 6972 } 6973 bnxt_set_db_mask(bp, db, ring_type); 6974 } 6975 6976 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, 6977 struct bnxt_rx_ring_info *rxr) 6978 { 6979 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6980 struct bnxt_napi *bnapi = rxr->bnapi; 6981 u32 type = HWRM_RING_ALLOC_RX; 6982 u32 map_idx = bnapi->index; 6983 int rc; 6984 6985 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6986 if (rc) 6987 return rc; 6988 6989 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 6990 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 6991 6992 return 0; 6993 } 6994 6995 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, 6996 struct bnxt_rx_ring_info *rxr) 6997 { 6998 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 6999 u32 type = HWRM_RING_ALLOC_AGG; 7000 u32 grp_idx = ring->grp_idx; 7001 u32 map_idx; 7002 int rc; 7003 7004 map_idx = grp_idx + bp->rx_nr_rings; 7005 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7006 if (rc) 7007 return rc; 7008 7009 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 7010 ring->fw_ring_id); 7011 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 7012 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7013 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 7014 7015 return 0; 7016 } 7017 7018 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 7019 { 7020 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 7021 int i, rc = 0; 7022 u32 type; 7023 7024 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7025 type = HWRM_RING_ALLOC_NQ; 7026 else 7027 type = HWRM_RING_ALLOC_CMPL; 7028 for (i = 0; i < bp->cp_nr_rings; i++) { 7029 struct bnxt_napi *bnapi = bp->bnapi[i]; 7030 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7031 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 7032 u32 map_idx = ring->map_idx; 7033 unsigned int vector; 7034 7035 vector = bp->irq_tbl[map_idx].vector; 7036 disable_irq_nosync(vector); 7037 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7038 if (rc) { 7039 enable_irq(vector); 7040 goto err_out; 7041 } 7042 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 7043 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 7044 enable_irq(vector); 7045 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 7046 7047 if (!i) { 7048 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 7049 if (rc) 7050 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 7051 } 7052 } 7053 7054 type = HWRM_RING_ALLOC_TX; 7055 for (i = 0; i < bp->tx_nr_rings; i++) { 7056 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 7057 struct bnxt_ring_struct *ring; 7058 u32 map_idx; 7059 7060 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7061 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; 7062 struct bnxt_napi *bnapi = txr->bnapi; 7063 u32 type2 = HWRM_RING_ALLOC_CMPL; 7064 7065 ring = &cpr2->cp_ring_struct; 7066 ring->handle = BNXT_SET_NQ_HDL(cpr2); 7067 map_idx = bnapi->index; 7068 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 7069 if (rc) 7070 goto err_out; 7071 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 7072 ring->fw_ring_id); 7073 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 7074 } 7075 ring = &txr->tx_ring_struct; 7076 map_idx = i; 7077 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7078 if (rc) 7079 goto err_out; 7080 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 7081 } 7082 7083 for (i = 0; i < bp->rx_nr_rings; i++) { 7084 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7085 7086 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 7087 if (rc) 7088 goto err_out; 7089 /* If we have agg rings, post agg buffers first. */ 7090 if (!agg_rings) 7091 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7092 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7093 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; 7094 struct bnxt_napi *bnapi = rxr->bnapi; 7095 u32 type2 = HWRM_RING_ALLOC_CMPL; 7096 struct bnxt_ring_struct *ring; 7097 u32 map_idx = bnapi->index; 7098 7099 ring = &cpr2->cp_ring_struct; 7100 ring->handle = BNXT_SET_NQ_HDL(cpr2); 7101 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 7102 if (rc) 7103 goto err_out; 7104 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 7105 ring->fw_ring_id); 7106 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 7107 } 7108 } 7109 7110 if (agg_rings) { 7111 for (i = 0; i < bp->rx_nr_rings; i++) { 7112 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); 7113 if (rc) 7114 goto err_out; 7115 } 7116 } 7117 err_out: 7118 return rc; 7119 } 7120 7121 static int hwrm_ring_free_send_msg(struct bnxt *bp, 7122 struct bnxt_ring_struct *ring, 7123 u32 ring_type, int cmpl_ring_id) 7124 { 7125 struct hwrm_ring_free_output *resp; 7126 struct hwrm_ring_free_input *req; 7127 u16 error_code = 0; 7128 int rc; 7129 7130 if (BNXT_NO_FW_ACCESS(bp)) 7131 return 0; 7132 7133 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 7134 if (rc) 7135 goto exit; 7136 7137 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 7138 req->ring_type = ring_type; 7139 req->ring_id = cpu_to_le16(ring->fw_ring_id); 7140 7141 resp = hwrm_req_hold(bp, req); 7142 rc = hwrm_req_send(bp, req); 7143 error_code = le16_to_cpu(resp->error_code); 7144 hwrm_req_drop(bp, req); 7145 exit: 7146 if (rc || error_code) { 7147 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 7148 ring_type, rc, error_code); 7149 return -EIO; 7150 } 7151 return 0; 7152 } 7153 7154 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, 7155 struct bnxt_rx_ring_info *rxr, 7156 bool close_path) 7157 { 7158 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7159 u32 grp_idx = rxr->bnapi->index; 7160 u32 cmpl_ring_id; 7161 7162 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7163 return; 7164 7165 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7166 hwrm_ring_free_send_msg(bp, ring, 7167 RING_FREE_REQ_RING_TYPE_RX, 7168 close_path ? cmpl_ring_id : 7169 INVALID_HW_RING_ID); 7170 ring->fw_ring_id = INVALID_HW_RING_ID; 7171 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; 7172 } 7173 7174 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, 7175 struct bnxt_rx_ring_info *rxr, 7176 bool close_path) 7177 { 7178 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7179 u32 grp_idx = rxr->bnapi->index; 7180 u32 type, cmpl_ring_id; 7181 7182 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7183 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 7184 else 7185 type = RING_FREE_REQ_RING_TYPE_RX; 7186 7187 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7188 return; 7189 7190 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7191 hwrm_ring_free_send_msg(bp, ring, type, 7192 close_path ? cmpl_ring_id : 7193 INVALID_HW_RING_ID); 7194 ring->fw_ring_id = INVALID_HW_RING_ID; 7195 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; 7196 } 7197 7198 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 7199 { 7200 u32 type; 7201 int i; 7202 7203 if (!bp->bnapi) 7204 return; 7205 7206 for (i = 0; i < bp->tx_nr_rings; i++) { 7207 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 7208 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7209 7210 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7211 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 7212 7213 hwrm_ring_free_send_msg(bp, ring, 7214 RING_FREE_REQ_RING_TYPE_TX, 7215 close_path ? cmpl_ring_id : 7216 INVALID_HW_RING_ID); 7217 ring->fw_ring_id = INVALID_HW_RING_ID; 7218 } 7219 } 7220 7221 for (i = 0; i < bp->rx_nr_rings; i++) { 7222 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); 7223 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); 7224 } 7225 7226 /* The completion rings are about to be freed. After that the 7227 * IRQ doorbell will not work anymore. So we need to disable 7228 * IRQ here. 7229 */ 7230 bnxt_disable_int_sync(bp); 7231 7232 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7233 type = RING_FREE_REQ_RING_TYPE_NQ; 7234 else 7235 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 7236 for (i = 0; i < bp->cp_nr_rings; i++) { 7237 struct bnxt_napi *bnapi = bp->bnapi[i]; 7238 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7239 struct bnxt_ring_struct *ring; 7240 int j; 7241 7242 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { 7243 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 7244 7245 ring = &cpr2->cp_ring_struct; 7246 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7247 continue; 7248 hwrm_ring_free_send_msg(bp, ring, 7249 RING_FREE_REQ_RING_TYPE_L2_CMPL, 7250 INVALID_HW_RING_ID); 7251 ring->fw_ring_id = INVALID_HW_RING_ID; 7252 } 7253 ring = &cpr->cp_ring_struct; 7254 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7255 hwrm_ring_free_send_msg(bp, ring, type, 7256 INVALID_HW_RING_ID); 7257 ring->fw_ring_id = INVALID_HW_RING_ID; 7258 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 7259 } 7260 } 7261 } 7262 7263 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7264 bool shared); 7265 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7266 bool shared); 7267 7268 static int bnxt_hwrm_get_rings(struct bnxt *bp) 7269 { 7270 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7271 struct hwrm_func_qcfg_output *resp; 7272 struct hwrm_func_qcfg_input *req; 7273 int rc; 7274 7275 if (bp->hwrm_spec_code < 0x10601) 7276 return 0; 7277 7278 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7279 if (rc) 7280 return rc; 7281 7282 req->fid = cpu_to_le16(0xffff); 7283 resp = hwrm_req_hold(bp, req); 7284 rc = hwrm_req_send(bp, req); 7285 if (rc) { 7286 hwrm_req_drop(bp, req); 7287 return rc; 7288 } 7289 7290 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7291 if (BNXT_NEW_RM(bp)) { 7292 u16 cp, stats; 7293 7294 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 7295 hw_resc->resv_hw_ring_grps = 7296 le32_to_cpu(resp->alloc_hw_ring_grps); 7297 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 7298 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); 7299 cp = le16_to_cpu(resp->alloc_cmpl_rings); 7300 stats = le16_to_cpu(resp->alloc_stat_ctx); 7301 hw_resc->resv_irqs = cp; 7302 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7303 int rx = hw_resc->resv_rx_rings; 7304 int tx = hw_resc->resv_tx_rings; 7305 7306 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7307 rx >>= 1; 7308 if (cp < (rx + tx)) { 7309 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 7310 if (rc) 7311 goto get_rings_exit; 7312 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7313 rx <<= 1; 7314 hw_resc->resv_rx_rings = rx; 7315 hw_resc->resv_tx_rings = tx; 7316 } 7317 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 7318 hw_resc->resv_hw_ring_grps = rx; 7319 } 7320 hw_resc->resv_cp_rings = cp; 7321 hw_resc->resv_stat_ctxs = stats; 7322 } 7323 get_rings_exit: 7324 hwrm_req_drop(bp, req); 7325 return rc; 7326 } 7327 7328 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 7329 { 7330 struct hwrm_func_qcfg_output *resp; 7331 struct hwrm_func_qcfg_input *req; 7332 int rc; 7333 7334 if (bp->hwrm_spec_code < 0x10601) 7335 return 0; 7336 7337 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7338 if (rc) 7339 return rc; 7340 7341 req->fid = cpu_to_le16(fid); 7342 resp = hwrm_req_hold(bp, req); 7343 rc = hwrm_req_send(bp, req); 7344 if (!rc) 7345 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7346 7347 hwrm_req_drop(bp, req); 7348 return rc; 7349 } 7350 7351 static bool bnxt_rfs_supported(struct bnxt *bp); 7352 7353 static struct hwrm_func_cfg_input * 7354 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7355 { 7356 struct hwrm_func_cfg_input *req; 7357 u32 enables = 0; 7358 7359 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 7360 return NULL; 7361 7362 req->fid = cpu_to_le16(0xffff); 7363 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7364 req->num_tx_rings = cpu_to_le16(hwr->tx); 7365 if (BNXT_NEW_RM(bp)) { 7366 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 7367 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7368 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7369 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 7370 enables |= hwr->cp_p5 ? 7371 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7372 } else { 7373 enables |= hwr->cp ? 7374 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7375 enables |= hwr->grp ? 7376 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7377 } 7378 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7379 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 7380 0; 7381 req->num_rx_rings = cpu_to_le16(hwr->rx); 7382 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7383 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7384 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7385 req->num_msix = cpu_to_le16(hwr->cp); 7386 } else { 7387 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7388 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7389 } 7390 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7391 req->num_vnics = cpu_to_le16(hwr->vnic); 7392 } 7393 req->enables = cpu_to_le32(enables); 7394 return req; 7395 } 7396 7397 static struct hwrm_func_vf_cfg_input * 7398 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7399 { 7400 struct hwrm_func_vf_cfg_input *req; 7401 u32 enables = 0; 7402 7403 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 7404 return NULL; 7405 7406 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7407 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7408 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7409 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7410 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7411 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7412 enables |= hwr->cp_p5 ? 7413 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7414 } else { 7415 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7416 enables |= hwr->grp ? 7417 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7418 } 7419 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7420 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 7421 7422 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 7423 req->num_tx_rings = cpu_to_le16(hwr->tx); 7424 req->num_rx_rings = cpu_to_le16(hwr->rx); 7425 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7426 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7427 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7428 } else { 7429 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7430 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7431 } 7432 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7433 req->num_vnics = cpu_to_le16(hwr->vnic); 7434 7435 req->enables = cpu_to_le32(enables); 7436 return req; 7437 } 7438 7439 static int 7440 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7441 { 7442 struct hwrm_func_cfg_input *req; 7443 int rc; 7444 7445 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7446 if (!req) 7447 return -ENOMEM; 7448 7449 if (!req->enables) { 7450 hwrm_req_drop(bp, req); 7451 return 0; 7452 } 7453 7454 rc = hwrm_req_send(bp, req); 7455 if (rc) 7456 return rc; 7457 7458 if (bp->hwrm_spec_code < 0x10601) 7459 bp->hw_resc.resv_tx_rings = hwr->tx; 7460 7461 return bnxt_hwrm_get_rings(bp); 7462 } 7463 7464 static int 7465 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7466 { 7467 struct hwrm_func_vf_cfg_input *req; 7468 int rc; 7469 7470 if (!BNXT_NEW_RM(bp)) { 7471 bp->hw_resc.resv_tx_rings = hwr->tx; 7472 return 0; 7473 } 7474 7475 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7476 if (!req) 7477 return -ENOMEM; 7478 7479 rc = hwrm_req_send(bp, req); 7480 if (rc) 7481 return rc; 7482 7483 return bnxt_hwrm_get_rings(bp); 7484 } 7485 7486 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7487 { 7488 if (BNXT_PF(bp)) 7489 return bnxt_hwrm_reserve_pf_rings(bp, hwr); 7490 else 7491 return bnxt_hwrm_reserve_vf_rings(bp, hwr); 7492 } 7493 7494 int bnxt_nq_rings_in_use(struct bnxt *bp) 7495 { 7496 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); 7497 } 7498 7499 static int bnxt_cp_rings_in_use(struct bnxt *bp) 7500 { 7501 int cp; 7502 7503 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7504 return bnxt_nq_rings_in_use(bp); 7505 7506 cp = bp->tx_nr_rings + bp->rx_nr_rings; 7507 return cp; 7508 } 7509 7510 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 7511 { 7512 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); 7513 } 7514 7515 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7516 { 7517 if (!hwr->grp) 7518 return 0; 7519 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7520 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); 7521 7522 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7523 rss_ctx *= hwr->vnic; 7524 return rss_ctx; 7525 } 7526 if (BNXT_VF(bp)) 7527 return BNXT_VF_MAX_RSS_CTX; 7528 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) 7529 return hwr->grp + 1; 7530 return 1; 7531 } 7532 7533 /* Check if a default RSS map needs to be setup. This function is only 7534 * used on older firmware that does not require reserving RX rings. 7535 */ 7536 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 7537 { 7538 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7539 7540 /* The RSS map is valid for RX rings set to resv_rx_rings */ 7541 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 7542 hw_resc->resv_rx_rings = bp->rx_nr_rings; 7543 if (!netif_is_rxfh_configured(bp->dev)) 7544 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7545 } 7546 } 7547 7548 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) 7549 { 7550 if (bp->flags & BNXT_FLAG_RFS) { 7551 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7552 return 2 + bp->num_rss_ctx; 7553 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7554 return rx_rings + 1; 7555 } 7556 return 1; 7557 } 7558 7559 static bool bnxt_need_reserve_rings(struct bnxt *bp) 7560 { 7561 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7562 int cp = bnxt_cp_rings_in_use(bp); 7563 int nq = bnxt_nq_rings_in_use(bp); 7564 int rx = bp->rx_nr_rings, stat; 7565 int vnic, grp = rx; 7566 7567 /* Old firmware does not need RX ring reservations but we still 7568 * need to setup a default RSS map when needed. With new firmware 7569 * we go through RX ring reservations first and then set up the 7570 * RSS map for the successfully reserved RX rings when needed. 7571 */ 7572 if (!BNXT_NEW_RM(bp)) 7573 bnxt_check_rss_tbl_no_rmgr(bp); 7574 7575 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7576 bp->hwrm_spec_code >= 0x10601) 7577 return true; 7578 7579 if (!BNXT_NEW_RM(bp)) 7580 return false; 7581 7582 vnic = bnxt_get_total_vnics(bp, rx); 7583 7584 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7585 rx <<= 1; 7586 stat = bnxt_get_func_stat_ctxs(bp); 7587 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 7588 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 7589 (hw_resc->resv_hw_ring_grps != grp && 7590 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 7591 return true; 7592 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 7593 hw_resc->resv_irqs != nq) 7594 return true; 7595 return false; 7596 } 7597 7598 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7599 { 7600 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7601 7602 hwr->tx = hw_resc->resv_tx_rings; 7603 if (BNXT_NEW_RM(bp)) { 7604 hwr->rx = hw_resc->resv_rx_rings; 7605 hwr->cp = hw_resc->resv_irqs; 7606 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7607 hwr->cp_p5 = hw_resc->resv_cp_rings; 7608 hwr->grp = hw_resc->resv_hw_ring_grps; 7609 hwr->vnic = hw_resc->resv_vnics; 7610 hwr->stat = hw_resc->resv_stat_ctxs; 7611 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; 7612 } 7613 } 7614 7615 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7616 { 7617 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && 7618 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); 7619 } 7620 7621 static int bnxt_get_avail_msix(struct bnxt *bp, int num); 7622 7623 static int __bnxt_reserve_rings(struct bnxt *bp) 7624 { 7625 struct bnxt_hw_rings hwr = {0}; 7626 int rx_rings, old_rx_rings, rc; 7627 int cp = bp->cp_nr_rings; 7628 int ulp_msix = 0; 7629 bool sh = false; 7630 int tx_cp; 7631 7632 if (!bnxt_need_reserve_rings(bp)) 7633 return 0; 7634 7635 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 7636 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 7637 if (!ulp_msix) 7638 bnxt_set_ulp_stat_ctxs(bp, 0); 7639 7640 if (ulp_msix > bp->ulp_num_msix_want) 7641 ulp_msix = bp->ulp_num_msix_want; 7642 hwr.cp = cp + ulp_msix; 7643 } else { 7644 hwr.cp = bnxt_nq_rings_in_use(bp); 7645 } 7646 7647 hwr.tx = bp->tx_nr_rings; 7648 hwr.rx = bp->rx_nr_rings; 7649 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7650 sh = true; 7651 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7652 hwr.cp_p5 = hwr.rx + hwr.tx; 7653 7654 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); 7655 7656 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7657 hwr.rx <<= 1; 7658 hwr.grp = bp->rx_nr_rings; 7659 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 7660 hwr.stat = bnxt_get_func_stat_ctxs(bp); 7661 old_rx_rings = bp->hw_resc.resv_rx_rings; 7662 7663 rc = bnxt_hwrm_reserve_rings(bp, &hwr); 7664 if (rc) 7665 return rc; 7666 7667 bnxt_copy_reserved_rings(bp, &hwr); 7668 7669 rx_rings = hwr.rx; 7670 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7671 if (hwr.rx >= 2) { 7672 rx_rings = hwr.rx >> 1; 7673 } else { 7674 if (netif_running(bp->dev)) 7675 return -ENOMEM; 7676 7677 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7678 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7679 bp->dev->hw_features &= ~NETIF_F_LRO; 7680 bp->dev->features &= ~NETIF_F_LRO; 7681 bnxt_set_ring_params(bp); 7682 } 7683 } 7684 rx_rings = min_t(int, rx_rings, hwr.grp); 7685 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); 7686 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) 7687 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); 7688 hwr.cp = min_t(int, hwr.cp, hwr.stat); 7689 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); 7690 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7691 hwr.rx = rx_rings << 1; 7692 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); 7693 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 7694 bp->tx_nr_rings = hwr.tx; 7695 7696 /* If we cannot reserve all the RX rings, reset the RSS map only 7697 * if absolutely necessary 7698 */ 7699 if (rx_rings != bp->rx_nr_rings) { 7700 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 7701 rx_rings, bp->rx_nr_rings); 7702 if (netif_is_rxfh_configured(bp->dev) && 7703 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 7704 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 7705 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 7706 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 7707 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 7708 } 7709 } 7710 bp->rx_nr_rings = rx_rings; 7711 bp->cp_nr_rings = hwr.cp; 7712 7713 if (!bnxt_rings_ok(bp, &hwr)) 7714 return -ENOMEM; 7715 7716 if (old_rx_rings != bp->hw_resc.resv_rx_rings && 7717 !netif_is_rxfh_configured(bp->dev)) 7718 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7719 7720 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { 7721 int resv_msix, resv_ctx, ulp_ctxs; 7722 struct bnxt_hw_resc *hw_resc; 7723 7724 hw_resc = &bp->hw_resc; 7725 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; 7726 ulp_msix = min_t(int, resv_msix, ulp_msix); 7727 bnxt_set_ulp_msix_num(bp, ulp_msix); 7728 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; 7729 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); 7730 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); 7731 } 7732 7733 return rc; 7734 } 7735 7736 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7737 { 7738 struct hwrm_func_vf_cfg_input *req; 7739 u32 flags; 7740 7741 if (!BNXT_NEW_RM(bp)) 7742 return 0; 7743 7744 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7745 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 7746 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 7747 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 7748 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 7749 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 7750 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 7751 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7752 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 7753 7754 req->flags = cpu_to_le32(flags); 7755 return hwrm_req_send_silent(bp, req); 7756 } 7757 7758 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7759 { 7760 struct hwrm_func_cfg_input *req; 7761 u32 flags; 7762 7763 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7764 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 7765 if (BNXT_NEW_RM(bp)) { 7766 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 7767 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 7768 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 7769 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 7770 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7771 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 7772 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 7773 else 7774 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 7775 } 7776 7777 req->flags = cpu_to_le32(flags); 7778 return hwrm_req_send_silent(bp, req); 7779 } 7780 7781 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7782 { 7783 if (bp->hwrm_spec_code < 0x10801) 7784 return 0; 7785 7786 if (BNXT_PF(bp)) 7787 return bnxt_hwrm_check_pf_rings(bp, hwr); 7788 7789 return bnxt_hwrm_check_vf_rings(bp, hwr); 7790 } 7791 7792 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 7793 { 7794 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7795 struct hwrm_ring_aggint_qcaps_output *resp; 7796 struct hwrm_ring_aggint_qcaps_input *req; 7797 int rc; 7798 7799 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 7800 coal_cap->num_cmpl_dma_aggr_max = 63; 7801 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 7802 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 7803 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 7804 coal_cap->int_lat_tmr_min_max = 65535; 7805 coal_cap->int_lat_tmr_max_max = 65535; 7806 coal_cap->num_cmpl_aggr_int_max = 65535; 7807 coal_cap->timer_units = 80; 7808 7809 if (bp->hwrm_spec_code < 0x10902) 7810 return; 7811 7812 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 7813 return; 7814 7815 resp = hwrm_req_hold(bp, req); 7816 rc = hwrm_req_send_silent(bp, req); 7817 if (!rc) { 7818 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 7819 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 7820 coal_cap->num_cmpl_dma_aggr_max = 7821 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 7822 coal_cap->num_cmpl_dma_aggr_during_int_max = 7823 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 7824 coal_cap->cmpl_aggr_dma_tmr_max = 7825 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 7826 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 7827 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 7828 coal_cap->int_lat_tmr_min_max = 7829 le16_to_cpu(resp->int_lat_tmr_min_max); 7830 coal_cap->int_lat_tmr_max_max = 7831 le16_to_cpu(resp->int_lat_tmr_max_max); 7832 coal_cap->num_cmpl_aggr_int_max = 7833 le16_to_cpu(resp->num_cmpl_aggr_int_max); 7834 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 7835 } 7836 hwrm_req_drop(bp, req); 7837 } 7838 7839 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 7840 { 7841 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7842 7843 return usec * 1000 / coal_cap->timer_units; 7844 } 7845 7846 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 7847 struct bnxt_coal *hw_coal, 7848 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7849 { 7850 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7851 u16 val, tmr, max, flags = hw_coal->flags; 7852 u32 cmpl_params = coal_cap->cmpl_params; 7853 7854 max = hw_coal->bufs_per_record * 128; 7855 if (hw_coal->budget) 7856 max = hw_coal->bufs_per_record * hw_coal->budget; 7857 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 7858 7859 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 7860 req->num_cmpl_aggr_int = cpu_to_le16(val); 7861 7862 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 7863 req->num_cmpl_dma_aggr = cpu_to_le16(val); 7864 7865 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 7866 coal_cap->num_cmpl_dma_aggr_during_int_max); 7867 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 7868 7869 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 7870 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 7871 req->int_lat_tmr_max = cpu_to_le16(tmr); 7872 7873 /* min timer set to 1/2 of interrupt timer */ 7874 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 7875 val = tmr / 2; 7876 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 7877 req->int_lat_tmr_min = cpu_to_le16(val); 7878 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7879 } 7880 7881 /* buf timer set to 1/4 of interrupt timer */ 7882 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 7883 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 7884 7885 if (cmpl_params & 7886 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 7887 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 7888 val = clamp_t(u16, tmr, 1, 7889 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 7890 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 7891 req->enables |= 7892 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 7893 } 7894 7895 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 7896 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 7897 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 7898 req->flags = cpu_to_le16(flags); 7899 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 7900 } 7901 7902 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 7903 struct bnxt_coal *hw_coal) 7904 { 7905 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 7906 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7907 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7908 u32 nq_params = coal_cap->nq_params; 7909 u16 tmr; 7910 int rc; 7911 7912 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 7913 return 0; 7914 7915 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7916 if (rc) 7917 return rc; 7918 7919 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 7920 req->flags = 7921 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 7922 7923 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 7924 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 7925 req->int_lat_tmr_min = cpu_to_le16(tmr); 7926 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7927 return hwrm_req_send(bp, req); 7928 } 7929 7930 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 7931 { 7932 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 7933 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7934 struct bnxt_coal coal; 7935 int rc; 7936 7937 /* Tick values in micro seconds. 7938 * 1 coal_buf x bufs_per_record = 1 completion record. 7939 */ 7940 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 7941 7942 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 7943 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 7944 7945 if (!bnapi->rx_ring) 7946 return -ENODEV; 7947 7948 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7949 if (rc) 7950 return rc; 7951 7952 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 7953 7954 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 7955 7956 return hwrm_req_send(bp, req_rx); 7957 } 7958 7959 static int 7960 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7961 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7962 { 7963 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 7964 7965 req->ring_id = cpu_to_le16(ring_id); 7966 return hwrm_req_send(bp, req); 7967 } 7968 7969 static int 7970 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7971 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7972 { 7973 struct bnxt_tx_ring_info *txr; 7974 int i, rc; 7975 7976 bnxt_for_each_napi_tx(i, bnapi, txr) { 7977 u16 ring_id; 7978 7979 ring_id = bnxt_cp_ring_for_tx(bp, txr); 7980 req->ring_id = cpu_to_le16(ring_id); 7981 rc = hwrm_req_send(bp, req); 7982 if (rc) 7983 return rc; 7984 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7985 return 0; 7986 } 7987 return 0; 7988 } 7989 7990 int bnxt_hwrm_set_coal(struct bnxt *bp) 7991 { 7992 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 7993 int i, rc; 7994 7995 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7996 if (rc) 7997 return rc; 7998 7999 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8000 if (rc) { 8001 hwrm_req_drop(bp, req_rx); 8002 return rc; 8003 } 8004 8005 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 8006 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 8007 8008 hwrm_req_hold(bp, req_rx); 8009 hwrm_req_hold(bp, req_tx); 8010 for (i = 0; i < bp->cp_nr_rings; i++) { 8011 struct bnxt_napi *bnapi = bp->bnapi[i]; 8012 struct bnxt_coal *hw_coal; 8013 8014 if (!bnapi->rx_ring) 8015 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8016 else 8017 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 8018 if (rc) 8019 break; 8020 8021 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8022 continue; 8023 8024 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 8025 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8026 if (rc) 8027 break; 8028 } 8029 if (bnapi->rx_ring) 8030 hw_coal = &bp->rx_coal; 8031 else 8032 hw_coal = &bp->tx_coal; 8033 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 8034 } 8035 hwrm_req_drop(bp, req_rx); 8036 hwrm_req_drop(bp, req_tx); 8037 return rc; 8038 } 8039 8040 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 8041 { 8042 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 8043 struct hwrm_stat_ctx_free_input *req; 8044 int i; 8045 8046 if (!bp->bnapi) 8047 return; 8048 8049 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8050 return; 8051 8052 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 8053 return; 8054 if (BNXT_FW_MAJ(bp) <= 20) { 8055 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 8056 hwrm_req_drop(bp, req); 8057 return; 8058 } 8059 hwrm_req_hold(bp, req0); 8060 } 8061 hwrm_req_hold(bp, req); 8062 for (i = 0; i < bp->cp_nr_rings; i++) { 8063 struct bnxt_napi *bnapi = bp->bnapi[i]; 8064 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8065 8066 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 8067 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 8068 if (req0) { 8069 req0->stat_ctx_id = req->stat_ctx_id; 8070 hwrm_req_send(bp, req0); 8071 } 8072 hwrm_req_send(bp, req); 8073 8074 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 8075 } 8076 } 8077 hwrm_req_drop(bp, req); 8078 if (req0) 8079 hwrm_req_drop(bp, req0); 8080 } 8081 8082 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 8083 { 8084 struct hwrm_stat_ctx_alloc_output *resp; 8085 struct hwrm_stat_ctx_alloc_input *req; 8086 int rc, i; 8087 8088 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8089 return 0; 8090 8091 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 8092 if (rc) 8093 return rc; 8094 8095 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 8096 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 8097 8098 resp = hwrm_req_hold(bp, req); 8099 for (i = 0; i < bp->cp_nr_rings; i++) { 8100 struct bnxt_napi *bnapi = bp->bnapi[i]; 8101 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8102 8103 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 8104 8105 rc = hwrm_req_send(bp, req); 8106 if (rc) 8107 break; 8108 8109 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 8110 8111 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 8112 } 8113 hwrm_req_drop(bp, req); 8114 return rc; 8115 } 8116 8117 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 8118 { 8119 struct hwrm_func_qcfg_output *resp; 8120 struct hwrm_func_qcfg_input *req; 8121 u16 flags; 8122 int rc; 8123 8124 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 8125 if (rc) 8126 return rc; 8127 8128 req->fid = cpu_to_le16(0xffff); 8129 resp = hwrm_req_hold(bp, req); 8130 rc = hwrm_req_send(bp, req); 8131 if (rc) 8132 goto func_qcfg_exit; 8133 8134 #ifdef CONFIG_BNXT_SRIOV 8135 if (BNXT_VF(bp)) { 8136 struct bnxt_vf_info *vf = &bp->vf; 8137 8138 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 8139 } else { 8140 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 8141 } 8142 #endif 8143 flags = le16_to_cpu(resp->flags); 8144 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 8145 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 8146 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 8147 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 8148 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 8149 } 8150 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 8151 bp->flags |= BNXT_FLAG_MULTI_HOST; 8152 8153 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 8154 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 8155 8156 switch (resp->port_partition_type) { 8157 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 8158 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 8159 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 8160 bp->port_partition_type = resp->port_partition_type; 8161 break; 8162 } 8163 if (bp->hwrm_spec_code < 0x10707 || 8164 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 8165 bp->br_mode = BRIDGE_MODE_VEB; 8166 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 8167 bp->br_mode = BRIDGE_MODE_VEPA; 8168 else 8169 bp->br_mode = BRIDGE_MODE_UNDEF; 8170 8171 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 8172 if (!bp->max_mtu) 8173 bp->max_mtu = BNXT_MAX_MTU; 8174 8175 if (bp->db_size) 8176 goto func_qcfg_exit; 8177 8178 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; 8179 if (BNXT_CHIP_P5(bp)) { 8180 if (BNXT_PF(bp)) 8181 bp->db_offset = DB_PF_OFFSET_P5; 8182 else 8183 bp->db_offset = DB_VF_OFFSET_P5; 8184 } 8185 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 8186 1024); 8187 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 8188 bp->db_size <= bp->db_offset) 8189 bp->db_size = pci_resource_len(bp->pdev, 2); 8190 8191 func_qcfg_exit: 8192 hwrm_req_drop(bp, req); 8193 return rc; 8194 } 8195 8196 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 8197 u8 init_val, u8 init_offset, 8198 bool init_mask_set) 8199 { 8200 ctxm->init_value = init_val; 8201 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 8202 if (init_mask_set) 8203 ctxm->init_offset = init_offset * 4; 8204 else 8205 ctxm->init_value = 0; 8206 } 8207 8208 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 8209 { 8210 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8211 u16 type; 8212 8213 for (type = 0; type < ctx_max; type++) { 8214 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8215 int n = 1; 8216 8217 if (!ctxm->max_entries) 8218 continue; 8219 8220 if (ctxm->instance_bmap) 8221 n = hweight32(ctxm->instance_bmap); 8222 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 8223 if (!ctxm->pg_info) 8224 return -ENOMEM; 8225 } 8226 return 0; 8227 } 8228 8229 #define BNXT_CTX_INIT_VALID(flags) \ 8230 (!!((flags) & \ 8231 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 8232 8233 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 8234 { 8235 struct hwrm_func_backing_store_qcaps_v2_output *resp; 8236 struct hwrm_func_backing_store_qcaps_v2_input *req; 8237 struct bnxt_ctx_mem_info *ctx; 8238 u16 type; 8239 int rc; 8240 8241 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 8242 if (rc) 8243 return rc; 8244 8245 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8246 if (!ctx) 8247 return -ENOMEM; 8248 bp->ctx = ctx; 8249 8250 resp = hwrm_req_hold(bp, req); 8251 8252 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 8253 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8254 u8 init_val, init_off, i; 8255 __le32 *p; 8256 u32 flags; 8257 8258 req->type = cpu_to_le16(type); 8259 rc = hwrm_req_send(bp, req); 8260 if (rc) 8261 goto ctx_done; 8262 flags = le32_to_cpu(resp->flags); 8263 type = le16_to_cpu(resp->next_valid_type); 8264 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) 8265 continue; 8266 8267 ctxm->type = le16_to_cpu(resp->type); 8268 ctxm->entry_size = le16_to_cpu(resp->entry_size); 8269 ctxm->flags = flags; 8270 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 8271 ctxm->entry_multiple = resp->entry_multiple; 8272 ctxm->max_entries = le32_to_cpu(resp->max_num_entries); 8273 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 8274 init_val = resp->ctx_init_value; 8275 init_off = resp->ctx_init_offset; 8276 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 8277 BNXT_CTX_INIT_VALID(flags)); 8278 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 8279 BNXT_MAX_SPLIT_ENTRY); 8280 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 8281 i++, p++) 8282 ctxm->split[i] = le32_to_cpu(*p); 8283 } 8284 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 8285 8286 ctx_done: 8287 hwrm_req_drop(bp, req); 8288 return rc; 8289 } 8290 8291 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 8292 { 8293 struct hwrm_func_backing_store_qcaps_output *resp; 8294 struct hwrm_func_backing_store_qcaps_input *req; 8295 int rc; 8296 8297 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 8298 return 0; 8299 8300 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8301 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 8302 8303 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 8304 if (rc) 8305 return rc; 8306 8307 resp = hwrm_req_hold(bp, req); 8308 rc = hwrm_req_send_silent(bp, req); 8309 if (!rc) { 8310 struct bnxt_ctx_mem_type *ctxm; 8311 struct bnxt_ctx_mem_info *ctx; 8312 u8 init_val, init_idx = 0; 8313 u16 init_mask; 8314 8315 ctx = bp->ctx; 8316 if (!ctx) { 8317 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8318 if (!ctx) { 8319 rc = -ENOMEM; 8320 goto ctx_err; 8321 } 8322 bp->ctx = ctx; 8323 } 8324 init_val = resp->ctx_kind_initializer; 8325 init_mask = le16_to_cpu(resp->ctx_init_mask); 8326 8327 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8328 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 8329 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 8330 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 8331 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); 8332 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 8333 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 8334 (init_mask & (1 << init_idx++)) != 0); 8335 8336 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8337 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 8338 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 8339 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 8340 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 8341 (init_mask & (1 << init_idx++)) != 0); 8342 8343 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8344 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 8345 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 8346 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 8347 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 8348 (init_mask & (1 << init_idx++)) != 0); 8349 8350 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8351 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 8352 ctxm->max_entries = ctxm->vnic_entries + 8353 le16_to_cpu(resp->vnic_max_ring_table_entries); 8354 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 8355 bnxt_init_ctx_initializer(ctxm, init_val, 8356 resp->vnic_init_offset, 8357 (init_mask & (1 << init_idx++)) != 0); 8358 8359 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8360 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 8361 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 8362 bnxt_init_ctx_initializer(ctxm, init_val, 8363 resp->stat_init_offset, 8364 (init_mask & (1 << init_idx++)) != 0); 8365 8366 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8367 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 8368 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 8369 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 8370 ctxm->entry_multiple = resp->tqm_entries_multiple; 8371 if (!ctxm->entry_multiple) 8372 ctxm->entry_multiple = 1; 8373 8374 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 8375 8376 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8377 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 8378 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 8379 ctxm->mrav_num_entries_units = 8380 le16_to_cpu(resp->mrav_num_entries_units); 8381 bnxt_init_ctx_initializer(ctxm, init_val, 8382 resp->mrav_init_offset, 8383 (init_mask & (1 << init_idx++)) != 0); 8384 8385 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8386 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 8387 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 8388 8389 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 8390 if (!ctx->tqm_fp_rings_count) 8391 ctx->tqm_fp_rings_count = bp->max_q; 8392 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 8393 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 8394 8395 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8396 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 8397 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 8398 8399 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 8400 } else { 8401 rc = 0; 8402 } 8403 ctx_err: 8404 hwrm_req_drop(bp, req); 8405 return rc; 8406 } 8407 8408 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 8409 __le64 *pg_dir) 8410 { 8411 if (!rmem->nr_pages) 8412 return; 8413 8414 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 8415 if (rmem->depth >= 1) { 8416 if (rmem->depth == 2) 8417 *pg_attr |= 2; 8418 else 8419 *pg_attr |= 1; 8420 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 8421 } else { 8422 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 8423 } 8424 } 8425 8426 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 8427 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 8428 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 8429 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 8430 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 8431 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 8432 8433 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 8434 { 8435 struct hwrm_func_backing_store_cfg_input *req; 8436 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8437 struct bnxt_ctx_pg_info *ctx_pg; 8438 struct bnxt_ctx_mem_type *ctxm; 8439 void **__req = (void **)&req; 8440 u32 req_len = sizeof(*req); 8441 __le32 *num_entries; 8442 __le64 *pg_dir; 8443 u32 flags = 0; 8444 u8 *pg_attr; 8445 u32 ena; 8446 int rc; 8447 int i; 8448 8449 if (!ctx) 8450 return 0; 8451 8452 if (req_len > bp->hwrm_max_ext_req_len) 8453 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 8454 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 8455 if (rc) 8456 return rc; 8457 8458 req->enables = cpu_to_le32(enables); 8459 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 8460 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8461 ctx_pg = ctxm->pg_info; 8462 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 8463 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 8464 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 8465 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 8466 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8467 &req->qpc_pg_size_qpc_lvl, 8468 &req->qpc_page_dir); 8469 8470 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) 8471 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); 8472 } 8473 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 8474 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8475 ctx_pg = ctxm->pg_info; 8476 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 8477 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 8478 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 8479 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8480 &req->srq_pg_size_srq_lvl, 8481 &req->srq_page_dir); 8482 } 8483 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 8484 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8485 ctx_pg = ctxm->pg_info; 8486 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 8487 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 8488 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 8489 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8490 &req->cq_pg_size_cq_lvl, 8491 &req->cq_page_dir); 8492 } 8493 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 8494 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8495 ctx_pg = ctxm->pg_info; 8496 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 8497 req->vnic_num_ring_table_entries = 8498 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 8499 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 8500 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8501 &req->vnic_pg_size_vnic_lvl, 8502 &req->vnic_page_dir); 8503 } 8504 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 8505 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8506 ctx_pg = ctxm->pg_info; 8507 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 8508 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 8509 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8510 &req->stat_pg_size_stat_lvl, 8511 &req->stat_page_dir); 8512 } 8513 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 8514 u32 units; 8515 8516 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8517 ctx_pg = ctxm->pg_info; 8518 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 8519 units = ctxm->mrav_num_entries_units; 8520 if (units) { 8521 u32 num_mr, num_ah = ctxm->mrav_av_entries; 8522 u32 entries; 8523 8524 num_mr = ctx_pg->entries - num_ah; 8525 entries = ((num_mr / units) << 16) | (num_ah / units); 8526 req->mrav_num_entries = cpu_to_le32(entries); 8527 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 8528 } 8529 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 8530 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8531 &req->mrav_pg_size_mrav_lvl, 8532 &req->mrav_page_dir); 8533 } 8534 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 8535 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8536 ctx_pg = ctxm->pg_info; 8537 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 8538 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 8539 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8540 &req->tim_pg_size_tim_lvl, 8541 &req->tim_page_dir); 8542 } 8543 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8544 for (i = 0, num_entries = &req->tqm_sp_num_entries, 8545 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 8546 pg_dir = &req->tqm_sp_page_dir, 8547 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 8548 ctx_pg = ctxm->pg_info; 8549 i < BNXT_MAX_TQM_RINGS; 8550 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 8551 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 8552 if (!(enables & ena)) 8553 continue; 8554 8555 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 8556 *num_entries = cpu_to_le32(ctx_pg->entries); 8557 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 8558 } 8559 req->flags = cpu_to_le32(flags); 8560 return hwrm_req_send(bp, req); 8561 } 8562 8563 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 8564 struct bnxt_ctx_pg_info *ctx_pg) 8565 { 8566 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8567 8568 rmem->page_size = BNXT_PAGE_SIZE; 8569 rmem->pg_arr = ctx_pg->ctx_pg_arr; 8570 rmem->dma_arr = ctx_pg->ctx_dma_arr; 8571 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 8572 if (rmem->depth >= 1) 8573 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 8574 return bnxt_alloc_ring(bp, rmem); 8575 } 8576 8577 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 8578 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 8579 u8 depth, struct bnxt_ctx_mem_type *ctxm) 8580 { 8581 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8582 int rc; 8583 8584 if (!mem_size) 8585 return -EINVAL; 8586 8587 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8588 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 8589 ctx_pg->nr_pages = 0; 8590 return -EINVAL; 8591 } 8592 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 8593 int nr_tbls, i; 8594 8595 rmem->depth = 2; 8596 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 8597 GFP_KERNEL); 8598 if (!ctx_pg->ctx_pg_tbl) 8599 return -ENOMEM; 8600 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 8601 rmem->nr_pages = nr_tbls; 8602 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8603 if (rc) 8604 return rc; 8605 for (i = 0; i < nr_tbls; i++) { 8606 struct bnxt_ctx_pg_info *pg_tbl; 8607 8608 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 8609 if (!pg_tbl) 8610 return -ENOMEM; 8611 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 8612 rmem = &pg_tbl->ring_mem; 8613 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 8614 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 8615 rmem->depth = 1; 8616 rmem->nr_pages = MAX_CTX_PAGES; 8617 rmem->ctx_mem = ctxm; 8618 if (i == (nr_tbls - 1)) { 8619 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 8620 8621 if (rem) 8622 rmem->nr_pages = rem; 8623 } 8624 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 8625 if (rc) 8626 break; 8627 } 8628 } else { 8629 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8630 if (rmem->nr_pages > 1 || depth) 8631 rmem->depth = 1; 8632 rmem->ctx_mem = ctxm; 8633 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8634 } 8635 return rc; 8636 } 8637 8638 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 8639 struct bnxt_ctx_pg_info *ctx_pg) 8640 { 8641 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8642 8643 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 8644 ctx_pg->ctx_pg_tbl) { 8645 int i, nr_tbls = rmem->nr_pages; 8646 8647 for (i = 0; i < nr_tbls; i++) { 8648 struct bnxt_ctx_pg_info *pg_tbl; 8649 struct bnxt_ring_mem_info *rmem2; 8650 8651 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 8652 if (!pg_tbl) 8653 continue; 8654 rmem2 = &pg_tbl->ring_mem; 8655 bnxt_free_ring(bp, rmem2); 8656 ctx_pg->ctx_pg_arr[i] = NULL; 8657 kfree(pg_tbl); 8658 ctx_pg->ctx_pg_tbl[i] = NULL; 8659 } 8660 kfree(ctx_pg->ctx_pg_tbl); 8661 ctx_pg->ctx_pg_tbl = NULL; 8662 } 8663 bnxt_free_ring(bp, rmem); 8664 ctx_pg->nr_pages = 0; 8665 } 8666 8667 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 8668 struct bnxt_ctx_mem_type *ctxm, u32 entries, 8669 u8 pg_lvl) 8670 { 8671 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 8672 int i, rc = 0, n = 1; 8673 u32 mem_size; 8674 8675 if (!ctxm->entry_size || !ctx_pg) 8676 return -EINVAL; 8677 if (ctxm->instance_bmap) 8678 n = hweight32(ctxm->instance_bmap); 8679 if (ctxm->entry_multiple) 8680 entries = roundup(entries, ctxm->entry_multiple); 8681 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 8682 mem_size = entries * ctxm->entry_size; 8683 for (i = 0; i < n && !rc; i++) { 8684 ctx_pg[i].entries = entries; 8685 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 8686 ctxm->init_value ? ctxm : NULL); 8687 } 8688 return rc; 8689 } 8690 8691 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 8692 struct bnxt_ctx_mem_type *ctxm, 8693 bool last) 8694 { 8695 struct hwrm_func_backing_store_cfg_v2_input *req; 8696 u32 instance_bmap = ctxm->instance_bmap; 8697 int i, j, rc = 0, n = 1; 8698 __le32 *p; 8699 8700 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 8701 return 0; 8702 8703 if (instance_bmap) 8704 n = hweight32(ctxm->instance_bmap); 8705 else 8706 instance_bmap = 1; 8707 8708 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 8709 if (rc) 8710 return rc; 8711 hwrm_req_hold(bp, req); 8712 req->type = cpu_to_le16(ctxm->type); 8713 req->entry_size = cpu_to_le16(ctxm->entry_size); 8714 req->subtype_valid_cnt = ctxm->split_entry_cnt; 8715 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 8716 p[i] = cpu_to_le32(ctxm->split[i]); 8717 for (i = 0, j = 0; j < n && !rc; i++) { 8718 struct bnxt_ctx_pg_info *ctx_pg; 8719 8720 if (!(instance_bmap & (1 << i))) 8721 continue; 8722 req->instance = cpu_to_le16(i); 8723 ctx_pg = &ctxm->pg_info[j++]; 8724 if (!ctx_pg->entries) 8725 continue; 8726 req->num_entries = cpu_to_le32(ctx_pg->entries); 8727 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8728 &req->page_size_pbl_level, 8729 &req->page_dir); 8730 if (last && j == n) 8731 req->flags = 8732 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 8733 rc = hwrm_req_send(bp, req); 8734 } 8735 hwrm_req_drop(bp, req); 8736 return rc; 8737 } 8738 8739 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) 8740 { 8741 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8742 struct bnxt_ctx_mem_type *ctxm; 8743 u16 last_type; 8744 int rc = 0; 8745 u16 type; 8746 8747 if (!ena) 8748 return 0; 8749 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) 8750 last_type = BNXT_CTX_MAX - 1; 8751 else 8752 last_type = BNXT_CTX_L2_MAX - 1; 8753 ctx->ctx_arr[last_type].last = 1; 8754 8755 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 8756 ctxm = &ctx->ctx_arr[type]; 8757 8758 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 8759 if (rc) 8760 return rc; 8761 } 8762 return 0; 8763 } 8764 8765 void bnxt_free_ctx_mem(struct bnxt *bp) 8766 { 8767 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8768 u16 type; 8769 8770 if (!ctx) 8771 return; 8772 8773 for (type = 0; type < BNXT_CTX_V2_MAX; type++) { 8774 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8775 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 8776 int i, n = 1; 8777 8778 if (!ctx_pg) 8779 continue; 8780 if (ctxm->instance_bmap) 8781 n = hweight32(ctxm->instance_bmap); 8782 for (i = 0; i < n; i++) 8783 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 8784 8785 kfree(ctx_pg); 8786 ctxm->pg_info = NULL; 8787 } 8788 8789 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 8790 kfree(ctx); 8791 bp->ctx = NULL; 8792 } 8793 8794 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 8795 { 8796 struct bnxt_ctx_mem_type *ctxm; 8797 struct bnxt_ctx_mem_info *ctx; 8798 u32 l2_qps, qp1_qps, max_qps; 8799 u32 ena, entries_sp, entries; 8800 u32 srqs, max_srqs, min; 8801 u32 num_mr, num_ah; 8802 u32 extra_srqs = 0; 8803 u32 extra_qps = 0; 8804 u32 fast_qpmd_qps; 8805 u8 pg_lvl = 1; 8806 int i, rc; 8807 8808 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 8809 if (rc) { 8810 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 8811 rc); 8812 return rc; 8813 } 8814 ctx = bp->ctx; 8815 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 8816 return 0; 8817 8818 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8819 l2_qps = ctxm->qp_l2_entries; 8820 qp1_qps = ctxm->qp_qp1_entries; 8821 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; 8822 max_qps = ctxm->max_entries; 8823 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8824 srqs = ctxm->srq_l2_entries; 8825 max_srqs = ctxm->max_entries; 8826 ena = 0; 8827 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 8828 pg_lvl = 2; 8829 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); 8830 /* allocate extra qps if fw supports RoCE fast qp destroy feature */ 8831 extra_qps += fast_qpmd_qps; 8832 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 8833 if (fast_qpmd_qps) 8834 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; 8835 } 8836 8837 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8838 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 8839 pg_lvl); 8840 if (rc) 8841 return rc; 8842 8843 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8844 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 8845 if (rc) 8846 return rc; 8847 8848 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8849 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 8850 extra_qps * 2, pg_lvl); 8851 if (rc) 8852 return rc; 8853 8854 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8855 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8856 if (rc) 8857 return rc; 8858 8859 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8860 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8861 if (rc) 8862 return rc; 8863 8864 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 8865 goto skip_rdma; 8866 8867 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8868 /* 128K extra is needed to accommodate static AH context 8869 * allocation by f/w. 8870 */ 8871 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 8872 num_ah = min_t(u32, num_mr, 1024 * 128); 8873 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 8874 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 8875 ctxm->mrav_av_entries = num_ah; 8876 8877 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 8878 if (rc) 8879 return rc; 8880 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 8881 8882 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8883 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 8884 if (rc) 8885 return rc; 8886 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 8887 8888 skip_rdma: 8889 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8890 min = ctxm->min_entries; 8891 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 8892 2 * (extra_qps + qp1_qps) + min; 8893 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 8894 if (rc) 8895 return rc; 8896 8897 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8898 entries = l2_qps + 2 * (extra_qps + qp1_qps); 8899 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 8900 if (rc) 8901 return rc; 8902 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 8903 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 8904 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 8905 8906 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8907 rc = bnxt_backing_store_cfg_v2(bp, ena); 8908 else 8909 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 8910 if (rc) { 8911 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 8912 rc); 8913 return rc; 8914 } 8915 ctx->flags |= BNXT_CTX_FLAG_INITED; 8916 return 0; 8917 } 8918 8919 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp) 8920 { 8921 struct hwrm_dbg_crashdump_medium_cfg_input *req; 8922 u16 page_attr; 8923 int rc; 8924 8925 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 8926 return 0; 8927 8928 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG); 8929 if (rc) 8930 return rc; 8931 8932 if (BNXT_PAGE_SIZE == 0x2000) 8933 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K; 8934 else if (BNXT_PAGE_SIZE == 0x10000) 8935 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K; 8936 else 8937 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K; 8938 req->pg_size_lvl = cpu_to_le16(page_attr | 8939 bp->fw_crash_mem->ring_mem.depth); 8940 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map); 8941 req->size = cpu_to_le32(bp->fw_crash_len); 8942 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR); 8943 return hwrm_req_send(bp, req); 8944 } 8945 8946 static void bnxt_free_crash_dump_mem(struct bnxt *bp) 8947 { 8948 if (bp->fw_crash_mem) { 8949 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 8950 kfree(bp->fw_crash_mem); 8951 bp->fw_crash_mem = NULL; 8952 } 8953 } 8954 8955 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp) 8956 { 8957 u32 mem_size = 0; 8958 int rc; 8959 8960 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 8961 return 0; 8962 8963 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size); 8964 if (rc) 8965 return rc; 8966 8967 mem_size = round_up(mem_size, 4); 8968 8969 /* keep and use the existing pages */ 8970 if (bp->fw_crash_mem && 8971 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE) 8972 goto alloc_done; 8973 8974 if (bp->fw_crash_mem) 8975 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 8976 else 8977 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), 8978 GFP_KERNEL); 8979 if (!bp->fw_crash_mem) 8980 return -ENOMEM; 8981 8982 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL); 8983 if (rc) { 8984 bnxt_free_crash_dump_mem(bp); 8985 return rc; 8986 } 8987 8988 alloc_done: 8989 bp->fw_crash_len = mem_size; 8990 return 0; 8991 } 8992 8993 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 8994 { 8995 struct hwrm_func_resource_qcaps_output *resp; 8996 struct hwrm_func_resource_qcaps_input *req; 8997 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8998 int rc; 8999 9000 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 9001 if (rc) 9002 return rc; 9003 9004 req->fid = cpu_to_le16(0xffff); 9005 resp = hwrm_req_hold(bp, req); 9006 rc = hwrm_req_send_silent(bp, req); 9007 if (rc) 9008 goto hwrm_func_resc_qcaps_exit; 9009 9010 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 9011 if (!all) 9012 goto hwrm_func_resc_qcaps_exit; 9013 9014 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 9015 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9016 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 9017 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9018 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 9019 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9020 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 9021 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9022 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 9023 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 9024 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 9025 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9026 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 9027 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9028 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 9029 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9030 9031 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 9032 u16 max_msix = le16_to_cpu(resp->max_msix); 9033 9034 hw_resc->max_nqs = max_msix; 9035 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 9036 } 9037 9038 if (BNXT_PF(bp)) { 9039 struct bnxt_pf_info *pf = &bp->pf; 9040 9041 pf->vf_resv_strategy = 9042 le16_to_cpu(resp->vf_reservation_strategy); 9043 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 9044 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 9045 } 9046 hwrm_func_resc_qcaps_exit: 9047 hwrm_req_drop(bp, req); 9048 return rc; 9049 } 9050 9051 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 9052 { 9053 struct hwrm_port_mac_ptp_qcfg_output *resp; 9054 struct hwrm_port_mac_ptp_qcfg_input *req; 9055 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 9056 bool phc_cfg; 9057 u8 flags; 9058 int rc; 9059 9060 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { 9061 rc = -ENODEV; 9062 goto no_ptp; 9063 } 9064 9065 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 9066 if (rc) 9067 goto no_ptp; 9068 9069 req->port_id = cpu_to_le16(bp->pf.port_id); 9070 resp = hwrm_req_hold(bp, req); 9071 rc = hwrm_req_send(bp, req); 9072 if (rc) 9073 goto exit; 9074 9075 flags = resp->flags; 9076 if (BNXT_CHIP_P5_AND_MINUS(bp) && 9077 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 9078 rc = -ENODEV; 9079 goto exit; 9080 } 9081 if (!ptp) { 9082 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 9083 if (!ptp) { 9084 rc = -ENOMEM; 9085 goto exit; 9086 } 9087 ptp->bp = bp; 9088 bp->ptp_cfg = ptp; 9089 } 9090 9091 if (flags & 9092 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | 9093 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { 9094 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 9095 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 9096 } else if (BNXT_CHIP_P5(bp)) { 9097 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 9098 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 9099 } else { 9100 rc = -ENODEV; 9101 goto exit; 9102 } 9103 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 9104 rc = bnxt_ptp_init(bp, phc_cfg); 9105 if (rc) 9106 netdev_warn(bp->dev, "PTP initialization failed.\n"); 9107 exit: 9108 hwrm_req_drop(bp, req); 9109 if (!rc) 9110 return 0; 9111 9112 no_ptp: 9113 bnxt_ptp_clear(bp); 9114 kfree(ptp); 9115 bp->ptp_cfg = NULL; 9116 return rc; 9117 } 9118 9119 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 9120 { 9121 struct hwrm_func_qcaps_output *resp; 9122 struct hwrm_func_qcaps_input *req; 9123 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9124 u32 flags, flags_ext, flags_ext2; 9125 int rc; 9126 9127 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 9128 if (rc) 9129 return rc; 9130 9131 req->fid = cpu_to_le16(0xffff); 9132 resp = hwrm_req_hold(bp, req); 9133 rc = hwrm_req_send(bp, req); 9134 if (rc) 9135 goto hwrm_func_qcaps_exit; 9136 9137 flags = le32_to_cpu(resp->flags); 9138 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 9139 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 9140 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 9141 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 9142 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 9143 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 9144 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 9145 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 9146 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 9147 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 9148 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 9149 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 9150 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 9151 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 9152 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 9153 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 9154 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 9155 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 9156 9157 flags_ext = le32_to_cpu(resp->flags_ext); 9158 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 9159 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 9160 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 9161 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 9162 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 9163 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 9164 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 9165 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 9166 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 9167 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 9168 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) 9169 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; 9170 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 9171 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 9172 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) 9173 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; 9174 9175 flags_ext2 = le32_to_cpu(resp->flags_ext2); 9176 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 9177 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 9178 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) 9179 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; 9180 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) 9181 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; 9182 9183 bp->tx_push_thresh = 0; 9184 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 9185 BNXT_FW_MAJ(bp) > 217) 9186 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 9187 9188 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9189 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9190 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9191 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9192 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 9193 if (!hw_resc->max_hw_ring_grps) 9194 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 9195 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9196 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9197 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9198 9199 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); 9200 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); 9201 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 9202 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 9203 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 9204 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 9205 9206 if (BNXT_PF(bp)) { 9207 struct bnxt_pf_info *pf = &bp->pf; 9208 9209 pf->fw_fid = le16_to_cpu(resp->fid); 9210 pf->port_id = le16_to_cpu(resp->port_id); 9211 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 9212 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 9213 pf->max_vfs = le16_to_cpu(resp->max_vfs); 9214 bp->flags &= ~BNXT_FLAG_WOL_CAP; 9215 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 9216 bp->flags |= BNXT_FLAG_WOL_CAP; 9217 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 9218 bp->fw_cap |= BNXT_FW_CAP_PTP; 9219 } else { 9220 bnxt_ptp_clear(bp); 9221 kfree(bp->ptp_cfg); 9222 bp->ptp_cfg = NULL; 9223 } 9224 } else { 9225 #ifdef CONFIG_BNXT_SRIOV 9226 struct bnxt_vf_info *vf = &bp->vf; 9227 9228 vf->fw_fid = le16_to_cpu(resp->fid); 9229 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 9230 #endif 9231 } 9232 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); 9233 9234 hwrm_func_qcaps_exit: 9235 hwrm_req_drop(bp, req); 9236 return rc; 9237 } 9238 9239 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 9240 { 9241 struct hwrm_dbg_qcaps_output *resp; 9242 struct hwrm_dbg_qcaps_input *req; 9243 int rc; 9244 9245 bp->fw_dbg_cap = 0; 9246 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 9247 return; 9248 9249 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 9250 if (rc) 9251 return; 9252 9253 req->fid = cpu_to_le16(0xffff); 9254 resp = hwrm_req_hold(bp, req); 9255 rc = hwrm_req_send(bp, req); 9256 if (rc) 9257 goto hwrm_dbg_qcaps_exit; 9258 9259 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 9260 9261 hwrm_dbg_qcaps_exit: 9262 hwrm_req_drop(bp, req); 9263 } 9264 9265 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 9266 9267 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 9268 { 9269 int rc; 9270 9271 rc = __bnxt_hwrm_func_qcaps(bp); 9272 if (rc) 9273 return rc; 9274 9275 bnxt_hwrm_dbg_qcaps(bp); 9276 9277 rc = bnxt_hwrm_queue_qportcfg(bp); 9278 if (rc) { 9279 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 9280 return rc; 9281 } 9282 if (bp->hwrm_spec_code >= 0x10803) { 9283 rc = bnxt_alloc_ctx_mem(bp); 9284 if (rc) 9285 return rc; 9286 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 9287 if (!rc) 9288 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 9289 } 9290 return 0; 9291 } 9292 9293 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 9294 { 9295 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 9296 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 9297 u32 flags; 9298 int rc; 9299 9300 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 9301 return 0; 9302 9303 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 9304 if (rc) 9305 return rc; 9306 9307 resp = hwrm_req_hold(bp, req); 9308 rc = hwrm_req_send(bp, req); 9309 if (rc) 9310 goto hwrm_cfa_adv_qcaps_exit; 9311 9312 flags = le32_to_cpu(resp->flags); 9313 if (flags & 9314 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 9315 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 9316 9317 if (flags & 9318 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) 9319 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; 9320 9321 if (flags & 9322 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) 9323 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; 9324 9325 hwrm_cfa_adv_qcaps_exit: 9326 hwrm_req_drop(bp, req); 9327 return rc; 9328 } 9329 9330 static int __bnxt_alloc_fw_health(struct bnxt *bp) 9331 { 9332 if (bp->fw_health) 9333 return 0; 9334 9335 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 9336 if (!bp->fw_health) 9337 return -ENOMEM; 9338 9339 mutex_init(&bp->fw_health->lock); 9340 return 0; 9341 } 9342 9343 static int bnxt_alloc_fw_health(struct bnxt *bp) 9344 { 9345 int rc; 9346 9347 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 9348 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9349 return 0; 9350 9351 rc = __bnxt_alloc_fw_health(bp); 9352 if (rc) { 9353 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 9354 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9355 return rc; 9356 } 9357 9358 return 0; 9359 } 9360 9361 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 9362 { 9363 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 9364 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 9365 BNXT_FW_HEALTH_WIN_MAP_OFF); 9366 } 9367 9368 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 9369 { 9370 struct bnxt_fw_health *fw_health = bp->fw_health; 9371 u32 reg_type; 9372 9373 if (!fw_health) 9374 return; 9375 9376 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 9377 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9378 fw_health->status_reliable = false; 9379 9380 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 9381 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9382 fw_health->resets_reliable = false; 9383 } 9384 9385 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 9386 { 9387 void __iomem *hs; 9388 u32 status_loc; 9389 u32 reg_type; 9390 u32 sig; 9391 9392 if (bp->fw_health) 9393 bp->fw_health->status_reliable = false; 9394 9395 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 9396 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 9397 9398 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 9399 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 9400 if (!bp->chip_num) { 9401 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 9402 bp->chip_num = readl(bp->bar0 + 9403 BNXT_FW_HEALTH_WIN_BASE + 9404 BNXT_GRC_REG_CHIP_NUM); 9405 } 9406 if (!BNXT_CHIP_P5_PLUS(bp)) 9407 return; 9408 9409 status_loc = BNXT_GRC_REG_STATUS_P5 | 9410 BNXT_FW_HEALTH_REG_TYPE_BAR0; 9411 } else { 9412 status_loc = readl(hs + offsetof(struct hcomm_status, 9413 fw_status_loc)); 9414 } 9415 9416 if (__bnxt_alloc_fw_health(bp)) { 9417 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 9418 return; 9419 } 9420 9421 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 9422 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 9423 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 9424 __bnxt_map_fw_health_reg(bp, status_loc); 9425 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 9426 BNXT_FW_HEALTH_WIN_OFF(status_loc); 9427 } 9428 9429 bp->fw_health->status_reliable = true; 9430 } 9431 9432 static int bnxt_map_fw_health_regs(struct bnxt *bp) 9433 { 9434 struct bnxt_fw_health *fw_health = bp->fw_health; 9435 u32 reg_base = 0xffffffff; 9436 int i; 9437 9438 bp->fw_health->status_reliable = false; 9439 bp->fw_health->resets_reliable = false; 9440 /* Only pre-map the monitoring GRC registers using window 3 */ 9441 for (i = 0; i < 4; i++) { 9442 u32 reg = fw_health->regs[i]; 9443 9444 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 9445 continue; 9446 if (reg_base == 0xffffffff) 9447 reg_base = reg & BNXT_GRC_BASE_MASK; 9448 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 9449 return -ERANGE; 9450 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 9451 } 9452 bp->fw_health->status_reliable = true; 9453 bp->fw_health->resets_reliable = true; 9454 if (reg_base == 0xffffffff) 9455 return 0; 9456 9457 __bnxt_map_fw_health_reg(bp, reg_base); 9458 return 0; 9459 } 9460 9461 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 9462 { 9463 if (!bp->fw_health) 9464 return; 9465 9466 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 9467 bp->fw_health->status_reliable = true; 9468 bp->fw_health->resets_reliable = true; 9469 } else { 9470 bnxt_try_map_fw_health_reg(bp); 9471 } 9472 } 9473 9474 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 9475 { 9476 struct bnxt_fw_health *fw_health = bp->fw_health; 9477 struct hwrm_error_recovery_qcfg_output *resp; 9478 struct hwrm_error_recovery_qcfg_input *req; 9479 int rc, i; 9480 9481 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9482 return 0; 9483 9484 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 9485 if (rc) 9486 return rc; 9487 9488 resp = hwrm_req_hold(bp, req); 9489 rc = hwrm_req_send(bp, req); 9490 if (rc) 9491 goto err_recovery_out; 9492 fw_health->flags = le32_to_cpu(resp->flags); 9493 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 9494 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 9495 rc = -EINVAL; 9496 goto err_recovery_out; 9497 } 9498 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 9499 fw_health->master_func_wait_dsecs = 9500 le32_to_cpu(resp->master_func_wait_period); 9501 fw_health->normal_func_wait_dsecs = 9502 le32_to_cpu(resp->normal_func_wait_period); 9503 fw_health->post_reset_wait_dsecs = 9504 le32_to_cpu(resp->master_func_wait_period_after_reset); 9505 fw_health->post_reset_max_wait_dsecs = 9506 le32_to_cpu(resp->max_bailout_time_after_reset); 9507 fw_health->regs[BNXT_FW_HEALTH_REG] = 9508 le32_to_cpu(resp->fw_health_status_reg); 9509 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 9510 le32_to_cpu(resp->fw_heartbeat_reg); 9511 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 9512 le32_to_cpu(resp->fw_reset_cnt_reg); 9513 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 9514 le32_to_cpu(resp->reset_inprogress_reg); 9515 fw_health->fw_reset_inprog_reg_mask = 9516 le32_to_cpu(resp->reset_inprogress_reg_mask); 9517 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 9518 if (fw_health->fw_reset_seq_cnt >= 16) { 9519 rc = -EINVAL; 9520 goto err_recovery_out; 9521 } 9522 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 9523 fw_health->fw_reset_seq_regs[i] = 9524 le32_to_cpu(resp->reset_reg[i]); 9525 fw_health->fw_reset_seq_vals[i] = 9526 le32_to_cpu(resp->reset_reg_val[i]); 9527 fw_health->fw_reset_seq_delay_msec[i] = 9528 resp->delay_after_reset[i]; 9529 } 9530 err_recovery_out: 9531 hwrm_req_drop(bp, req); 9532 if (!rc) 9533 rc = bnxt_map_fw_health_regs(bp); 9534 if (rc) 9535 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9536 return rc; 9537 } 9538 9539 static int bnxt_hwrm_func_reset(struct bnxt *bp) 9540 { 9541 struct hwrm_func_reset_input *req; 9542 int rc; 9543 9544 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 9545 if (rc) 9546 return rc; 9547 9548 req->enables = 0; 9549 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 9550 return hwrm_req_send(bp, req); 9551 } 9552 9553 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 9554 { 9555 struct hwrm_nvm_get_dev_info_output nvm_info; 9556 9557 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 9558 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 9559 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 9560 nvm_info.nvm_cfg_ver_upd); 9561 } 9562 9563 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 9564 { 9565 struct hwrm_queue_qportcfg_output *resp; 9566 struct hwrm_queue_qportcfg_input *req; 9567 u8 i, j, *qptr; 9568 bool no_rdma; 9569 int rc = 0; 9570 9571 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 9572 if (rc) 9573 return rc; 9574 9575 resp = hwrm_req_hold(bp, req); 9576 rc = hwrm_req_send(bp, req); 9577 if (rc) 9578 goto qportcfg_exit; 9579 9580 if (!resp->max_configurable_queues) { 9581 rc = -EINVAL; 9582 goto qportcfg_exit; 9583 } 9584 bp->max_tc = resp->max_configurable_queues; 9585 bp->max_lltc = resp->max_configurable_lossless_queues; 9586 if (bp->max_tc > BNXT_MAX_QUEUE) 9587 bp->max_tc = BNXT_MAX_QUEUE; 9588 9589 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 9590 qptr = &resp->queue_id0; 9591 for (i = 0, j = 0; i < bp->max_tc; i++) { 9592 bp->q_info[j].queue_id = *qptr; 9593 bp->q_ids[i] = *qptr++; 9594 bp->q_info[j].queue_profile = *qptr++; 9595 bp->tc_to_qidx[j] = j; 9596 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 9597 (no_rdma && BNXT_PF(bp))) 9598 j++; 9599 } 9600 bp->max_q = bp->max_tc; 9601 bp->max_tc = max_t(u8, j, 1); 9602 9603 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 9604 bp->max_tc = 1; 9605 9606 if (bp->max_lltc > bp->max_tc) 9607 bp->max_lltc = bp->max_tc; 9608 9609 qportcfg_exit: 9610 hwrm_req_drop(bp, req); 9611 return rc; 9612 } 9613 9614 static int bnxt_hwrm_poll(struct bnxt *bp) 9615 { 9616 struct hwrm_ver_get_input *req; 9617 int rc; 9618 9619 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 9620 if (rc) 9621 return rc; 9622 9623 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 9624 req->hwrm_intf_min = HWRM_VERSION_MINOR; 9625 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 9626 9627 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 9628 rc = hwrm_req_send(bp, req); 9629 return rc; 9630 } 9631 9632 static int bnxt_hwrm_ver_get(struct bnxt *bp) 9633 { 9634 struct hwrm_ver_get_output *resp; 9635 struct hwrm_ver_get_input *req; 9636 u16 fw_maj, fw_min, fw_bld, fw_rsv; 9637 u32 dev_caps_cfg, hwrm_ver; 9638 int rc, len; 9639 9640 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 9641 if (rc) 9642 return rc; 9643 9644 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 9645 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 9646 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 9647 req->hwrm_intf_min = HWRM_VERSION_MINOR; 9648 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 9649 9650 resp = hwrm_req_hold(bp, req); 9651 rc = hwrm_req_send(bp, req); 9652 if (rc) 9653 goto hwrm_ver_get_exit; 9654 9655 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 9656 9657 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 9658 resp->hwrm_intf_min_8b << 8 | 9659 resp->hwrm_intf_upd_8b; 9660 if (resp->hwrm_intf_maj_8b < 1) { 9661 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 9662 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 9663 resp->hwrm_intf_upd_8b); 9664 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 9665 } 9666 9667 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 9668 HWRM_VERSION_UPDATE; 9669 9670 if (bp->hwrm_spec_code > hwrm_ver) 9671 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 9672 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 9673 HWRM_VERSION_UPDATE); 9674 else 9675 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 9676 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 9677 resp->hwrm_intf_upd_8b); 9678 9679 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 9680 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 9681 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 9682 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 9683 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 9684 len = FW_VER_STR_LEN; 9685 } else { 9686 fw_maj = resp->hwrm_fw_maj_8b; 9687 fw_min = resp->hwrm_fw_min_8b; 9688 fw_bld = resp->hwrm_fw_bld_8b; 9689 fw_rsv = resp->hwrm_fw_rsvd_8b; 9690 len = BC_HWRM_STR_LEN; 9691 } 9692 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 9693 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 9694 fw_rsv); 9695 9696 if (strlen(resp->active_pkg_name)) { 9697 int fw_ver_len = strlen(bp->fw_ver_str); 9698 9699 snprintf(bp->fw_ver_str + fw_ver_len, 9700 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 9701 resp->active_pkg_name); 9702 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 9703 } 9704 9705 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 9706 if (!bp->hwrm_cmd_timeout) 9707 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 9708 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 9709 if (!bp->hwrm_cmd_max_timeout) 9710 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 9711 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) 9712 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", 9713 bp->hwrm_cmd_max_timeout / 1000); 9714 9715 if (resp->hwrm_intf_maj_8b >= 1) { 9716 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 9717 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 9718 } 9719 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 9720 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 9721 9722 bp->chip_num = le16_to_cpu(resp->chip_num); 9723 bp->chip_rev = resp->chip_rev; 9724 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 9725 !resp->chip_metal) 9726 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 9727 9728 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 9729 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 9730 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 9731 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 9732 9733 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 9734 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 9735 9736 if (dev_caps_cfg & 9737 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 9738 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 9739 9740 if (dev_caps_cfg & 9741 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 9742 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 9743 9744 if (dev_caps_cfg & 9745 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 9746 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 9747 9748 hwrm_ver_get_exit: 9749 hwrm_req_drop(bp, req); 9750 return rc; 9751 } 9752 9753 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 9754 { 9755 struct hwrm_fw_set_time_input *req; 9756 struct tm tm; 9757 time64_t now = ktime_get_real_seconds(); 9758 int rc; 9759 9760 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 9761 bp->hwrm_spec_code < 0x10400) 9762 return -EOPNOTSUPP; 9763 9764 time64_to_tm(now, 0, &tm); 9765 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 9766 if (rc) 9767 return rc; 9768 9769 req->year = cpu_to_le16(1900 + tm.tm_year); 9770 req->month = 1 + tm.tm_mon; 9771 req->day = tm.tm_mday; 9772 req->hour = tm.tm_hour; 9773 req->minute = tm.tm_min; 9774 req->second = tm.tm_sec; 9775 return hwrm_req_send(bp, req); 9776 } 9777 9778 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 9779 { 9780 u64 sw_tmp; 9781 9782 hw &= mask; 9783 sw_tmp = (*sw & ~mask) | hw; 9784 if (hw < (*sw & mask)) 9785 sw_tmp += mask + 1; 9786 WRITE_ONCE(*sw, sw_tmp); 9787 } 9788 9789 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 9790 int count, bool ignore_zero) 9791 { 9792 int i; 9793 9794 for (i = 0; i < count; i++) { 9795 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 9796 9797 if (ignore_zero && !hw) 9798 continue; 9799 9800 if (masks[i] == -1ULL) 9801 sw_stats[i] = hw; 9802 else 9803 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 9804 } 9805 } 9806 9807 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 9808 { 9809 if (!stats->hw_stats) 9810 return; 9811 9812 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 9813 stats->hw_masks, stats->len / 8, false); 9814 } 9815 9816 static void bnxt_accumulate_all_stats(struct bnxt *bp) 9817 { 9818 struct bnxt_stats_mem *ring0_stats; 9819 bool ignore_zero = false; 9820 int i; 9821 9822 /* Chip bug. Counter intermittently becomes 0. */ 9823 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9824 ignore_zero = true; 9825 9826 for (i = 0; i < bp->cp_nr_rings; i++) { 9827 struct bnxt_napi *bnapi = bp->bnapi[i]; 9828 struct bnxt_cp_ring_info *cpr; 9829 struct bnxt_stats_mem *stats; 9830 9831 cpr = &bnapi->cp_ring; 9832 stats = &cpr->stats; 9833 if (!i) 9834 ring0_stats = stats; 9835 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 9836 ring0_stats->hw_masks, 9837 ring0_stats->len / 8, ignore_zero); 9838 } 9839 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9840 struct bnxt_stats_mem *stats = &bp->port_stats; 9841 __le64 *hw_stats = stats->hw_stats; 9842 u64 *sw_stats = stats->sw_stats; 9843 u64 *masks = stats->hw_masks; 9844 int cnt; 9845 9846 cnt = sizeof(struct rx_port_stats) / 8; 9847 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 9848 9849 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9850 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9851 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9852 cnt = sizeof(struct tx_port_stats) / 8; 9853 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 9854 } 9855 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 9856 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 9857 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 9858 } 9859 } 9860 9861 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 9862 { 9863 struct hwrm_port_qstats_input *req; 9864 struct bnxt_pf_info *pf = &bp->pf; 9865 int rc; 9866 9867 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 9868 return 0; 9869 9870 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 9871 return -EOPNOTSUPP; 9872 9873 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 9874 if (rc) 9875 return rc; 9876 9877 req->flags = flags; 9878 req->port_id = cpu_to_le16(pf->port_id); 9879 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 9880 BNXT_TX_PORT_STATS_BYTE_OFFSET); 9881 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 9882 return hwrm_req_send(bp, req); 9883 } 9884 9885 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 9886 { 9887 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 9888 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 9889 struct hwrm_port_qstats_ext_output *resp_qs; 9890 struct hwrm_port_qstats_ext_input *req_qs; 9891 struct bnxt_pf_info *pf = &bp->pf; 9892 u32 tx_stat_size; 9893 int rc; 9894 9895 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 9896 return 0; 9897 9898 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 9899 return -EOPNOTSUPP; 9900 9901 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 9902 if (rc) 9903 return rc; 9904 9905 req_qs->flags = flags; 9906 req_qs->port_id = cpu_to_le16(pf->port_id); 9907 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 9908 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 9909 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 9910 sizeof(struct tx_port_stats_ext) : 0; 9911 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 9912 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 9913 resp_qs = hwrm_req_hold(bp, req_qs); 9914 rc = hwrm_req_send(bp, req_qs); 9915 if (!rc) { 9916 bp->fw_rx_stats_ext_size = 9917 le16_to_cpu(resp_qs->rx_stat_size) / 8; 9918 if (BNXT_FW_MAJ(bp) < 220 && 9919 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 9920 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 9921 9922 bp->fw_tx_stats_ext_size = tx_stat_size ? 9923 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 9924 } else { 9925 bp->fw_rx_stats_ext_size = 0; 9926 bp->fw_tx_stats_ext_size = 0; 9927 } 9928 hwrm_req_drop(bp, req_qs); 9929 9930 if (flags) 9931 return rc; 9932 9933 if (bp->fw_tx_stats_ext_size <= 9934 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 9935 bp->pri2cos_valid = 0; 9936 return rc; 9937 } 9938 9939 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 9940 if (rc) 9941 return rc; 9942 9943 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 9944 9945 resp_qc = hwrm_req_hold(bp, req_qc); 9946 rc = hwrm_req_send(bp, req_qc); 9947 if (!rc) { 9948 u8 *pri2cos; 9949 int i, j; 9950 9951 pri2cos = &resp_qc->pri0_cos_queue_id; 9952 for (i = 0; i < 8; i++) { 9953 u8 queue_id = pri2cos[i]; 9954 u8 queue_idx; 9955 9956 /* Per port queue IDs start from 0, 10, 20, etc */ 9957 queue_idx = queue_id % 10; 9958 if (queue_idx > BNXT_MAX_QUEUE) { 9959 bp->pri2cos_valid = false; 9960 hwrm_req_drop(bp, req_qc); 9961 return rc; 9962 } 9963 for (j = 0; j < bp->max_q; j++) { 9964 if (bp->q_ids[j] == queue_id) 9965 bp->pri2cos_idx[i] = queue_idx; 9966 } 9967 } 9968 bp->pri2cos_valid = true; 9969 } 9970 hwrm_req_drop(bp, req_qc); 9971 9972 return rc; 9973 } 9974 9975 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 9976 { 9977 bnxt_hwrm_tunnel_dst_port_free(bp, 9978 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 9979 bnxt_hwrm_tunnel_dst_port_free(bp, 9980 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 9981 } 9982 9983 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 9984 { 9985 int rc, i; 9986 u32 tpa_flags = 0; 9987 9988 if (set_tpa) 9989 tpa_flags = bp->flags & BNXT_FLAG_TPA; 9990 else if (BNXT_NO_FW_ACCESS(bp)) 9991 return 0; 9992 for (i = 0; i < bp->nr_vnics; i++) { 9993 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); 9994 if (rc) { 9995 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 9996 i, rc); 9997 return rc; 9998 } 9999 } 10000 return 0; 10001 } 10002 10003 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 10004 { 10005 int i; 10006 10007 for (i = 0; i < bp->nr_vnics; i++) 10008 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); 10009 } 10010 10011 static void bnxt_clear_vnic(struct bnxt *bp) 10012 { 10013 if (!bp->vnic_info) 10014 return; 10015 10016 bnxt_hwrm_clear_vnic_filter(bp); 10017 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 10018 /* clear all RSS setting before free vnic ctx */ 10019 bnxt_hwrm_clear_vnic_rss(bp); 10020 bnxt_hwrm_vnic_ctx_free(bp); 10021 } 10022 /* before free the vnic, undo the vnic tpa settings */ 10023 if (bp->flags & BNXT_FLAG_TPA) 10024 bnxt_set_tpa(bp, false); 10025 bnxt_hwrm_vnic_free(bp); 10026 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10027 bnxt_hwrm_vnic_ctx_free(bp); 10028 } 10029 10030 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 10031 bool irq_re_init) 10032 { 10033 bnxt_clear_vnic(bp); 10034 bnxt_hwrm_ring_free(bp, close_path); 10035 bnxt_hwrm_ring_grp_free(bp); 10036 if (irq_re_init) { 10037 bnxt_hwrm_stat_ctx_free(bp); 10038 bnxt_hwrm_free_tunnel_ports(bp); 10039 } 10040 } 10041 10042 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 10043 { 10044 struct hwrm_func_cfg_input *req; 10045 u8 evb_mode; 10046 int rc; 10047 10048 if (br_mode == BRIDGE_MODE_VEB) 10049 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 10050 else if (br_mode == BRIDGE_MODE_VEPA) 10051 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 10052 else 10053 return -EINVAL; 10054 10055 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10056 if (rc) 10057 return rc; 10058 10059 req->fid = cpu_to_le16(0xffff); 10060 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 10061 req->evb_mode = evb_mode; 10062 return hwrm_req_send(bp, req); 10063 } 10064 10065 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 10066 { 10067 struct hwrm_func_cfg_input *req; 10068 int rc; 10069 10070 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 10071 return 0; 10072 10073 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10074 if (rc) 10075 return rc; 10076 10077 req->fid = cpu_to_le16(0xffff); 10078 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 10079 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 10080 if (size == 128) 10081 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 10082 10083 return hwrm_req_send(bp, req); 10084 } 10085 10086 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10087 { 10088 int rc; 10089 10090 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 10091 goto skip_rss_ctx; 10092 10093 /* allocate context for vnic */ 10094 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 10095 if (rc) { 10096 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10097 vnic->vnic_id, rc); 10098 goto vnic_setup_err; 10099 } 10100 bp->rsscos_nr_ctxs++; 10101 10102 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10103 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); 10104 if (rc) { 10105 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 10106 vnic->vnic_id, rc); 10107 goto vnic_setup_err; 10108 } 10109 bp->rsscos_nr_ctxs++; 10110 } 10111 10112 skip_rss_ctx: 10113 /* configure default vnic, ring grp */ 10114 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10115 if (rc) { 10116 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10117 vnic->vnic_id, rc); 10118 goto vnic_setup_err; 10119 } 10120 10121 /* Enable RSS hashing on vnic */ 10122 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); 10123 if (rc) { 10124 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 10125 vnic->vnic_id, rc); 10126 goto vnic_setup_err; 10127 } 10128 10129 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10130 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10131 if (rc) { 10132 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10133 vnic->vnic_id, rc); 10134 } 10135 } 10136 10137 vnic_setup_err: 10138 return rc; 10139 } 10140 10141 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, 10142 u8 valid) 10143 { 10144 struct hwrm_vnic_update_input *req; 10145 int rc; 10146 10147 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE); 10148 if (rc) 10149 return rc; 10150 10151 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 10152 10153 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID) 10154 req->mru = cpu_to_le16(vnic->mru); 10155 10156 req->enables = cpu_to_le32(valid); 10157 10158 return hwrm_req_send(bp, req); 10159 } 10160 10161 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10162 { 10163 int rc; 10164 10165 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 10166 if (rc) { 10167 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 10168 vnic->vnic_id, rc); 10169 return rc; 10170 } 10171 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10172 if (rc) 10173 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10174 vnic->vnic_id, rc); 10175 return rc; 10176 } 10177 10178 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10179 { 10180 int rc, i, nr_ctxs; 10181 10182 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 10183 for (i = 0; i < nr_ctxs; i++) { 10184 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); 10185 if (rc) { 10186 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 10187 vnic->vnic_id, i, rc); 10188 break; 10189 } 10190 bp->rsscos_nr_ctxs++; 10191 } 10192 if (i < nr_ctxs) 10193 return -ENOMEM; 10194 10195 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); 10196 if (rc) 10197 return rc; 10198 10199 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10200 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10201 if (rc) { 10202 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10203 vnic->vnic_id, rc); 10204 } 10205 } 10206 return rc; 10207 } 10208 10209 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10210 { 10211 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10212 return __bnxt_setup_vnic_p5(bp, vnic); 10213 else 10214 return __bnxt_setup_vnic(bp, vnic); 10215 } 10216 10217 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, 10218 struct bnxt_vnic_info *vnic, 10219 u16 start_rx_ring_idx, int rx_rings) 10220 { 10221 int rc; 10222 10223 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); 10224 if (rc) { 10225 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10226 vnic->vnic_id, rc); 10227 return rc; 10228 } 10229 return bnxt_setup_vnic(bp, vnic); 10230 } 10231 10232 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 10233 { 10234 struct bnxt_vnic_info *vnic; 10235 int i, rc = 0; 10236 10237 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 10238 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 10239 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); 10240 } 10241 10242 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10243 return 0; 10244 10245 for (i = 0; i < bp->rx_nr_rings; i++) { 10246 u16 vnic_id = i + 1; 10247 u16 ring_id = i; 10248 10249 if (vnic_id >= bp->nr_vnics) 10250 break; 10251 10252 vnic = &bp->vnic_info[vnic_id]; 10253 vnic->flags |= BNXT_VNIC_RFS_FLAG; 10254 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 10255 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 10256 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) 10257 break; 10258 } 10259 return rc; 10260 } 10261 10262 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, 10263 bool all) 10264 { 10265 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10266 struct bnxt_filter_base *usr_fltr, *tmp; 10267 struct bnxt_ntuple_filter *ntp_fltr; 10268 int i; 10269 10270 if (netif_running(bp->dev)) { 10271 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); 10272 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { 10273 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) 10274 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); 10275 } 10276 } 10277 if (!all) 10278 return; 10279 10280 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 10281 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && 10282 usr_fltr->fw_vnic_id == rss_ctx->index) { 10283 ntp_fltr = container_of(usr_fltr, 10284 struct bnxt_ntuple_filter, 10285 base); 10286 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); 10287 bnxt_del_ntp_filter(bp, ntp_fltr); 10288 bnxt_del_one_usr_fltr(bp, usr_fltr); 10289 } 10290 } 10291 10292 if (vnic->rss_table) 10293 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, 10294 vnic->rss_table, 10295 vnic->rss_table_dma_addr); 10296 bp->num_rss_ctx--; 10297 } 10298 10299 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) 10300 { 10301 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); 10302 struct ethtool_rxfh_context *ctx; 10303 unsigned long context; 10304 10305 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10306 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10307 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10308 10309 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || 10310 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || 10311 __bnxt_setup_vnic_p5(bp, vnic)) { 10312 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", 10313 rss_ctx->index); 10314 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 10315 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); 10316 } 10317 } 10318 } 10319 10320 static void bnxt_clear_rss_ctxs(struct bnxt *bp) 10321 { 10322 struct ethtool_rxfh_context *ctx; 10323 unsigned long context; 10324 10325 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10326 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10327 10328 bnxt_del_one_rss_ctx(bp, rss_ctx, false); 10329 } 10330 } 10331 10332 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 10333 static bool bnxt_promisc_ok(struct bnxt *bp) 10334 { 10335 #ifdef CONFIG_BNXT_SRIOV 10336 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 10337 return false; 10338 #endif 10339 return true; 10340 } 10341 10342 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 10343 { 10344 struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; 10345 unsigned int rc = 0; 10346 10347 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); 10348 if (rc) { 10349 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10350 rc); 10351 return rc; 10352 } 10353 10354 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10355 if (rc) { 10356 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10357 rc); 10358 return rc; 10359 } 10360 return rc; 10361 } 10362 10363 static int bnxt_cfg_rx_mode(struct bnxt *); 10364 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 10365 10366 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 10367 { 10368 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 10369 int rc = 0; 10370 unsigned int rx_nr_rings = bp->rx_nr_rings; 10371 10372 if (irq_re_init) { 10373 rc = bnxt_hwrm_stat_ctx_alloc(bp); 10374 if (rc) { 10375 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 10376 rc); 10377 goto err_out; 10378 } 10379 } 10380 10381 rc = bnxt_hwrm_ring_alloc(bp); 10382 if (rc) { 10383 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 10384 goto err_out; 10385 } 10386 10387 rc = bnxt_hwrm_ring_grp_alloc(bp); 10388 if (rc) { 10389 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 10390 goto err_out; 10391 } 10392 10393 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10394 rx_nr_rings--; 10395 10396 /* default vnic 0 */ 10397 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); 10398 if (rc) { 10399 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 10400 goto err_out; 10401 } 10402 10403 if (BNXT_VF(bp)) 10404 bnxt_hwrm_func_qcfg(bp); 10405 10406 rc = bnxt_setup_vnic(bp, vnic); 10407 if (rc) 10408 goto err_out; 10409 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 10410 bnxt_hwrm_update_rss_hash_cfg(bp); 10411 10412 if (bp->flags & BNXT_FLAG_RFS) { 10413 rc = bnxt_alloc_rfs_vnics(bp); 10414 if (rc) 10415 goto err_out; 10416 } 10417 10418 if (bp->flags & BNXT_FLAG_TPA) { 10419 rc = bnxt_set_tpa(bp, true); 10420 if (rc) 10421 goto err_out; 10422 } 10423 10424 if (BNXT_VF(bp)) 10425 bnxt_update_vf_mac(bp); 10426 10427 /* Filter for default vnic 0 */ 10428 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 10429 if (rc) { 10430 if (BNXT_VF(bp) && rc == -ENODEV) 10431 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 10432 else 10433 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 10434 goto err_out; 10435 } 10436 vnic->uc_filter_count = 1; 10437 10438 vnic->rx_mask = 0; 10439 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 10440 goto skip_rx_mask; 10441 10442 if (bp->dev->flags & IFF_BROADCAST) 10443 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 10444 10445 if (bp->dev->flags & IFF_PROMISC) 10446 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10447 10448 if (bp->dev->flags & IFF_ALLMULTI) { 10449 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10450 vnic->mc_list_count = 0; 10451 } else if (bp->dev->flags & IFF_MULTICAST) { 10452 u32 mask = 0; 10453 10454 bnxt_mc_list_updated(bp, &mask); 10455 vnic->rx_mask |= mask; 10456 } 10457 10458 rc = bnxt_cfg_rx_mode(bp); 10459 if (rc) 10460 goto err_out; 10461 10462 skip_rx_mask: 10463 rc = bnxt_hwrm_set_coal(bp); 10464 if (rc) 10465 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 10466 rc); 10467 10468 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10469 rc = bnxt_setup_nitroa0_vnic(bp); 10470 if (rc) 10471 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 10472 rc); 10473 } 10474 10475 if (BNXT_VF(bp)) { 10476 bnxt_hwrm_func_qcfg(bp); 10477 netdev_update_features(bp->dev); 10478 } 10479 10480 return 0; 10481 10482 err_out: 10483 bnxt_hwrm_resource_free(bp, 0, true); 10484 10485 return rc; 10486 } 10487 10488 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 10489 { 10490 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 10491 return 0; 10492 } 10493 10494 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 10495 { 10496 bnxt_init_cp_rings(bp); 10497 bnxt_init_rx_rings(bp); 10498 bnxt_init_tx_rings(bp); 10499 bnxt_init_ring_grps(bp, irq_re_init); 10500 bnxt_init_vnics(bp); 10501 10502 return bnxt_init_chip(bp, irq_re_init); 10503 } 10504 10505 static int bnxt_set_real_num_queues(struct bnxt *bp) 10506 { 10507 int rc; 10508 struct net_device *dev = bp->dev; 10509 10510 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 10511 bp->tx_nr_rings_xdp); 10512 if (rc) 10513 return rc; 10514 10515 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 10516 if (rc) 10517 return rc; 10518 10519 #ifdef CONFIG_RFS_ACCEL 10520 if (bp->flags & BNXT_FLAG_RFS) 10521 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 10522 #endif 10523 10524 return rc; 10525 } 10526 10527 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 10528 bool shared) 10529 { 10530 int _rx = *rx, _tx = *tx; 10531 10532 if (shared) { 10533 *rx = min_t(int, _rx, max); 10534 *tx = min_t(int, _tx, max); 10535 } else { 10536 if (max < 2) 10537 return -ENOMEM; 10538 10539 while (_rx + _tx > max) { 10540 if (_rx > _tx && _rx > 1) 10541 _rx--; 10542 else if (_tx > 1) 10543 _tx--; 10544 } 10545 *rx = _rx; 10546 *tx = _tx; 10547 } 10548 return 0; 10549 } 10550 10551 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 10552 { 10553 return (tx - tx_xdp) / tx_sets + tx_xdp; 10554 } 10555 10556 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 10557 { 10558 int tcs = bp->num_tc; 10559 10560 if (!tcs) 10561 tcs = 1; 10562 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 10563 } 10564 10565 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 10566 { 10567 int tcs = bp->num_tc; 10568 10569 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 10570 bp->tx_nr_rings_xdp; 10571 } 10572 10573 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 10574 bool sh) 10575 { 10576 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 10577 10578 if (tx_cp != *tx) { 10579 int tx_saved = tx_cp, rc; 10580 10581 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 10582 if (rc) 10583 return rc; 10584 if (tx_cp != tx_saved) 10585 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 10586 return 0; 10587 } 10588 return __bnxt_trim_rings(bp, rx, tx, max, sh); 10589 } 10590 10591 static void bnxt_setup_msix(struct bnxt *bp) 10592 { 10593 const int len = sizeof(bp->irq_tbl[0].name); 10594 struct net_device *dev = bp->dev; 10595 int tcs, i; 10596 10597 tcs = bp->num_tc; 10598 if (tcs) { 10599 int i, off, count; 10600 10601 for (i = 0; i < tcs; i++) { 10602 count = bp->tx_nr_rings_per_tc; 10603 off = BNXT_TC_TO_RING_BASE(bp, i); 10604 netdev_set_tc_queue(dev, i, count, off); 10605 } 10606 } 10607 10608 for (i = 0; i < bp->cp_nr_rings; i++) { 10609 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10610 char *attr; 10611 10612 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 10613 attr = "TxRx"; 10614 else if (i < bp->rx_nr_rings) 10615 attr = "rx"; 10616 else 10617 attr = "tx"; 10618 10619 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 10620 attr, i); 10621 bp->irq_tbl[map_idx].handler = bnxt_msix; 10622 } 10623 } 10624 10625 static int bnxt_init_int_mode(struct bnxt *bp); 10626 10627 static int bnxt_change_msix(struct bnxt *bp, int total) 10628 { 10629 struct msi_map map; 10630 int i; 10631 10632 /* add MSIX to the end if needed */ 10633 for (i = bp->total_irqs; i < total; i++) { 10634 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL); 10635 if (map.index < 0) 10636 return bp->total_irqs; 10637 bp->irq_tbl[i].vector = map.virq; 10638 bp->total_irqs++; 10639 } 10640 10641 /* trim MSIX from the end if needed */ 10642 for (i = bp->total_irqs; i > total; i--) { 10643 map.index = i - 1; 10644 map.virq = bp->irq_tbl[i - 1].vector; 10645 pci_msix_free_irq(bp->pdev, map); 10646 bp->total_irqs--; 10647 } 10648 return bp->total_irqs; 10649 } 10650 10651 static int bnxt_setup_int_mode(struct bnxt *bp) 10652 { 10653 int rc; 10654 10655 if (!bp->irq_tbl) { 10656 rc = bnxt_init_int_mode(bp); 10657 if (rc || !bp->irq_tbl) 10658 return rc ?: -ENODEV; 10659 } 10660 10661 bnxt_setup_msix(bp); 10662 10663 rc = bnxt_set_real_num_queues(bp); 10664 return rc; 10665 } 10666 10667 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 10668 { 10669 return bp->hw_resc.max_rsscos_ctxs; 10670 } 10671 10672 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 10673 { 10674 return bp->hw_resc.max_vnics; 10675 } 10676 10677 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 10678 { 10679 return bp->hw_resc.max_stat_ctxs; 10680 } 10681 10682 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 10683 { 10684 return bp->hw_resc.max_cp_rings; 10685 } 10686 10687 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 10688 { 10689 unsigned int cp = bp->hw_resc.max_cp_rings; 10690 10691 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 10692 cp -= bnxt_get_ulp_msix_num(bp); 10693 10694 return cp; 10695 } 10696 10697 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 10698 { 10699 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 10700 10701 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10702 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 10703 10704 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 10705 } 10706 10707 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 10708 { 10709 bp->hw_resc.max_irqs = max_irqs; 10710 } 10711 10712 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 10713 { 10714 unsigned int cp; 10715 10716 cp = bnxt_get_max_func_cp_rings_for_en(bp); 10717 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10718 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 10719 else 10720 return cp - bp->cp_nr_rings; 10721 } 10722 10723 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 10724 { 10725 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 10726 } 10727 10728 static int bnxt_get_avail_msix(struct bnxt *bp, int num) 10729 { 10730 int max_irq = bnxt_get_max_func_irqs(bp); 10731 int total_req = bp->cp_nr_rings + num; 10732 10733 if (max_irq < total_req) { 10734 num = max_irq - bp->cp_nr_rings; 10735 if (num <= 0) 10736 return 0; 10737 } 10738 return num; 10739 } 10740 10741 static int bnxt_get_num_msix(struct bnxt *bp) 10742 { 10743 if (!BNXT_NEW_RM(bp)) 10744 return bnxt_get_max_func_irqs(bp); 10745 10746 return bnxt_nq_rings_in_use(bp); 10747 } 10748 10749 static int bnxt_init_int_mode(struct bnxt *bp) 10750 { 10751 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size; 10752 10753 total_vecs = bnxt_get_num_msix(bp); 10754 max = bnxt_get_max_func_irqs(bp); 10755 if (total_vecs > max) 10756 total_vecs = max; 10757 10758 if (!total_vecs) 10759 return 0; 10760 10761 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 10762 min = 2; 10763 10764 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs, 10765 PCI_IRQ_MSIX); 10766 ulp_msix = bnxt_get_ulp_msix_num(bp); 10767 if (total_vecs < 0 || total_vecs < ulp_msix) { 10768 rc = -ENODEV; 10769 goto msix_setup_exit; 10770 } 10771 10772 tbl_size = total_vecs; 10773 if (pci_msix_can_alloc_dyn(bp->pdev)) 10774 tbl_size = max; 10775 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL); 10776 if (bp->irq_tbl) { 10777 for (i = 0; i < total_vecs; i++) 10778 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i); 10779 10780 bp->total_irqs = total_vecs; 10781 /* Trim rings based upon num of vectors allocated */ 10782 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 10783 total_vecs - ulp_msix, min == 1); 10784 if (rc) 10785 goto msix_setup_exit; 10786 10787 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 10788 bp->cp_nr_rings = (min == 1) ? 10789 max_t(int, tx_cp, bp->rx_nr_rings) : 10790 tx_cp + bp->rx_nr_rings; 10791 10792 } else { 10793 rc = -ENOMEM; 10794 goto msix_setup_exit; 10795 } 10796 return 0; 10797 10798 msix_setup_exit: 10799 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc); 10800 kfree(bp->irq_tbl); 10801 bp->irq_tbl = NULL; 10802 pci_free_irq_vectors(bp->pdev); 10803 return rc; 10804 } 10805 10806 static void bnxt_clear_int_mode(struct bnxt *bp) 10807 { 10808 pci_free_irq_vectors(bp->pdev); 10809 10810 kfree(bp->irq_tbl); 10811 bp->irq_tbl = NULL; 10812 } 10813 10814 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 10815 { 10816 bool irq_cleared = false; 10817 bool irq_change = false; 10818 int tcs = bp->num_tc; 10819 int irqs_required; 10820 int rc; 10821 10822 if (!bnxt_need_reserve_rings(bp)) 10823 return 0; 10824 10825 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 10826 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 10827 10828 if (ulp_msix > bp->ulp_num_msix_want) 10829 ulp_msix = bp->ulp_num_msix_want; 10830 irqs_required = ulp_msix + bp->cp_nr_rings; 10831 } else { 10832 irqs_required = bnxt_get_num_msix(bp); 10833 } 10834 10835 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { 10836 irq_change = true; 10837 if (!pci_msix_can_alloc_dyn(bp->pdev)) { 10838 bnxt_ulp_irq_stop(bp); 10839 bnxt_clear_int_mode(bp); 10840 irq_cleared = true; 10841 } 10842 } 10843 rc = __bnxt_reserve_rings(bp); 10844 if (irq_cleared) { 10845 if (!rc) 10846 rc = bnxt_init_int_mode(bp); 10847 bnxt_ulp_irq_restart(bp, rc); 10848 } else if (irq_change && !rc) { 10849 if (bnxt_change_msix(bp, irqs_required) != irqs_required) 10850 rc = -ENOSPC; 10851 } 10852 if (rc) { 10853 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 10854 return rc; 10855 } 10856 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 10857 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 10858 netdev_err(bp->dev, "tx ring reservation failure\n"); 10859 netdev_reset_tc(bp->dev); 10860 bp->num_tc = 0; 10861 if (bp->tx_nr_rings_xdp) 10862 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 10863 else 10864 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 10865 return -ENOMEM; 10866 } 10867 return 0; 10868 } 10869 10870 static void bnxt_free_irq(struct bnxt *bp) 10871 { 10872 struct bnxt_irq *irq; 10873 int i; 10874 10875 #ifdef CONFIG_RFS_ACCEL 10876 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 10877 bp->dev->rx_cpu_rmap = NULL; 10878 #endif 10879 if (!bp->irq_tbl || !bp->bnapi) 10880 return; 10881 10882 for (i = 0; i < bp->cp_nr_rings; i++) { 10883 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10884 10885 irq = &bp->irq_tbl[map_idx]; 10886 if (irq->requested) { 10887 if (irq->have_cpumask) { 10888 irq_set_affinity_hint(irq->vector, NULL); 10889 free_cpumask_var(irq->cpu_mask); 10890 irq->have_cpumask = 0; 10891 } 10892 free_irq(irq->vector, bp->bnapi[i]); 10893 } 10894 10895 irq->requested = 0; 10896 } 10897 } 10898 10899 static int bnxt_request_irq(struct bnxt *bp) 10900 { 10901 int i, j, rc = 0; 10902 unsigned long flags = 0; 10903 #ifdef CONFIG_RFS_ACCEL 10904 struct cpu_rmap *rmap; 10905 #endif 10906 10907 rc = bnxt_setup_int_mode(bp); 10908 if (rc) { 10909 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 10910 rc); 10911 return rc; 10912 } 10913 #ifdef CONFIG_RFS_ACCEL 10914 rmap = bp->dev->rx_cpu_rmap; 10915 #endif 10916 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 10917 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10918 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 10919 10920 #ifdef CONFIG_RFS_ACCEL 10921 if (rmap && bp->bnapi[i]->rx_ring) { 10922 rc = irq_cpu_rmap_add(rmap, irq->vector); 10923 if (rc) 10924 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 10925 j); 10926 j++; 10927 } 10928 #endif 10929 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 10930 bp->bnapi[i]); 10931 if (rc) 10932 break; 10933 10934 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); 10935 irq->requested = 1; 10936 10937 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 10938 int numa_node = dev_to_node(&bp->pdev->dev); 10939 10940 irq->have_cpumask = 1; 10941 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 10942 irq->cpu_mask); 10943 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 10944 if (rc) { 10945 netdev_warn(bp->dev, 10946 "Set affinity failed, IRQ = %d\n", 10947 irq->vector); 10948 break; 10949 } 10950 } 10951 } 10952 return rc; 10953 } 10954 10955 static void bnxt_del_napi(struct bnxt *bp) 10956 { 10957 int i; 10958 10959 if (!bp->bnapi) 10960 return; 10961 10962 for (i = 0; i < bp->rx_nr_rings; i++) 10963 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); 10964 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) 10965 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); 10966 10967 for (i = 0; i < bp->cp_nr_rings; i++) { 10968 struct bnxt_napi *bnapi = bp->bnapi[i]; 10969 10970 __netif_napi_del(&bnapi->napi); 10971 } 10972 /* We called __netif_napi_del(), we need 10973 * to respect an RCU grace period before freeing napi structures. 10974 */ 10975 synchronize_net(); 10976 } 10977 10978 static void bnxt_init_napi(struct bnxt *bp) 10979 { 10980 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 10981 unsigned int cp_nr_rings = bp->cp_nr_rings; 10982 struct bnxt_napi *bnapi; 10983 int i; 10984 10985 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10986 poll_fn = bnxt_poll_p5; 10987 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10988 cp_nr_rings--; 10989 for (i = 0; i < cp_nr_rings; i++) { 10990 bnapi = bp->bnapi[i]; 10991 netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn, 10992 bnapi->index); 10993 } 10994 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10995 bnapi = bp->bnapi[cp_nr_rings]; 10996 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); 10997 } 10998 } 10999 11000 static void bnxt_disable_napi(struct bnxt *bp) 11001 { 11002 int i; 11003 11004 if (!bp->bnapi || 11005 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 11006 return; 11007 11008 for (i = 0; i < bp->cp_nr_rings; i++) { 11009 struct bnxt_napi *bnapi = bp->bnapi[i]; 11010 struct bnxt_cp_ring_info *cpr; 11011 11012 cpr = &bnapi->cp_ring; 11013 if (bnapi->tx_fault) 11014 cpr->sw_stats->tx.tx_resets++; 11015 if (bnapi->in_reset) 11016 cpr->sw_stats->rx.rx_resets++; 11017 napi_disable(&bnapi->napi); 11018 if (bnapi->rx_ring) 11019 cancel_work_sync(&cpr->dim.work); 11020 } 11021 } 11022 11023 static void bnxt_enable_napi(struct bnxt *bp) 11024 { 11025 int i; 11026 11027 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 11028 for (i = 0; i < bp->cp_nr_rings; i++) { 11029 struct bnxt_napi *bnapi = bp->bnapi[i]; 11030 struct bnxt_cp_ring_info *cpr; 11031 11032 bnapi->tx_fault = 0; 11033 11034 cpr = &bnapi->cp_ring; 11035 bnapi->in_reset = false; 11036 11037 if (bnapi->rx_ring) { 11038 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 11039 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 11040 } 11041 napi_enable(&bnapi->napi); 11042 } 11043 } 11044 11045 void bnxt_tx_disable(struct bnxt *bp) 11046 { 11047 int i; 11048 struct bnxt_tx_ring_info *txr; 11049 11050 if (bp->tx_ring) { 11051 for (i = 0; i < bp->tx_nr_rings; i++) { 11052 txr = &bp->tx_ring[i]; 11053 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 11054 } 11055 } 11056 /* Make sure napi polls see @dev_state change */ 11057 synchronize_net(); 11058 /* Drop carrier first to prevent TX timeout */ 11059 netif_carrier_off(bp->dev); 11060 /* Stop all TX queues */ 11061 netif_tx_disable(bp->dev); 11062 } 11063 11064 void bnxt_tx_enable(struct bnxt *bp) 11065 { 11066 int i; 11067 struct bnxt_tx_ring_info *txr; 11068 11069 for (i = 0; i < bp->tx_nr_rings; i++) { 11070 txr = &bp->tx_ring[i]; 11071 WRITE_ONCE(txr->dev_state, 0); 11072 } 11073 /* Make sure napi polls see @dev_state change */ 11074 synchronize_net(); 11075 netif_tx_wake_all_queues(bp->dev); 11076 if (BNXT_LINK_IS_UP(bp)) 11077 netif_carrier_on(bp->dev); 11078 } 11079 11080 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 11081 { 11082 u8 active_fec = link_info->active_fec_sig_mode & 11083 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 11084 11085 switch (active_fec) { 11086 default: 11087 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 11088 return "None"; 11089 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 11090 return "Clause 74 BaseR"; 11091 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 11092 return "Clause 91 RS(528,514)"; 11093 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 11094 return "Clause 91 RS544_1XN"; 11095 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 11096 return "Clause 91 RS(544,514)"; 11097 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 11098 return "Clause 91 RS272_1XN"; 11099 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 11100 return "Clause 91 RS(272,257)"; 11101 } 11102 } 11103 11104 void bnxt_report_link(struct bnxt *bp) 11105 { 11106 if (BNXT_LINK_IS_UP(bp)) { 11107 const char *signal = ""; 11108 const char *flow_ctrl; 11109 const char *duplex; 11110 u32 speed; 11111 u16 fec; 11112 11113 netif_carrier_on(bp->dev); 11114 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 11115 if (speed == SPEED_UNKNOWN) { 11116 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 11117 return; 11118 } 11119 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 11120 duplex = "full"; 11121 else 11122 duplex = "half"; 11123 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 11124 flow_ctrl = "ON - receive & transmit"; 11125 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 11126 flow_ctrl = "ON - transmit"; 11127 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 11128 flow_ctrl = "ON - receive"; 11129 else 11130 flow_ctrl = "none"; 11131 if (bp->link_info.phy_qcfg_resp.option_flags & 11132 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 11133 u8 sig_mode = bp->link_info.active_fec_sig_mode & 11134 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 11135 switch (sig_mode) { 11136 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 11137 signal = "(NRZ) "; 11138 break; 11139 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 11140 signal = "(PAM4 56Gbps) "; 11141 break; 11142 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: 11143 signal = "(PAM4 112Gbps) "; 11144 break; 11145 default: 11146 break; 11147 } 11148 } 11149 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 11150 speed, signal, duplex, flow_ctrl); 11151 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 11152 netdev_info(bp->dev, "EEE is %s\n", 11153 bp->eee.eee_active ? "active" : 11154 "not active"); 11155 fec = bp->link_info.fec_cfg; 11156 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 11157 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 11158 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 11159 bnxt_report_fec(&bp->link_info)); 11160 } else { 11161 netif_carrier_off(bp->dev); 11162 netdev_err(bp->dev, "NIC Link is Down\n"); 11163 } 11164 } 11165 11166 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 11167 { 11168 if (!resp->supported_speeds_auto_mode && 11169 !resp->supported_speeds_force_mode && 11170 !resp->supported_pam4_speeds_auto_mode && 11171 !resp->supported_pam4_speeds_force_mode && 11172 !resp->supported_speeds2_auto_mode && 11173 !resp->supported_speeds2_force_mode) 11174 return true; 11175 return false; 11176 } 11177 11178 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 11179 { 11180 struct bnxt_link_info *link_info = &bp->link_info; 11181 struct hwrm_port_phy_qcaps_output *resp; 11182 struct hwrm_port_phy_qcaps_input *req; 11183 int rc = 0; 11184 11185 if (bp->hwrm_spec_code < 0x10201) 11186 return 0; 11187 11188 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 11189 if (rc) 11190 return rc; 11191 11192 resp = hwrm_req_hold(bp, req); 11193 rc = hwrm_req_send(bp, req); 11194 if (rc) 11195 goto hwrm_phy_qcaps_exit; 11196 11197 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 11198 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 11199 struct ethtool_keee *eee = &bp->eee; 11200 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 11201 11202 _bnxt_fw_to_linkmode(eee->supported, fw_speeds); 11203 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 11204 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 11205 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 11206 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 11207 } 11208 11209 if (bp->hwrm_spec_code >= 0x10a01) { 11210 if (bnxt_phy_qcaps_no_speed(resp)) { 11211 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 11212 netdev_warn(bp->dev, "Ethernet link disabled\n"); 11213 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 11214 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 11215 netdev_info(bp->dev, "Ethernet link enabled\n"); 11216 /* Phy re-enabled, reprobe the speeds */ 11217 link_info->support_auto_speeds = 0; 11218 link_info->support_pam4_auto_speeds = 0; 11219 link_info->support_auto_speeds2 = 0; 11220 } 11221 } 11222 if (resp->supported_speeds_auto_mode) 11223 link_info->support_auto_speeds = 11224 le16_to_cpu(resp->supported_speeds_auto_mode); 11225 if (resp->supported_pam4_speeds_auto_mode) 11226 link_info->support_pam4_auto_speeds = 11227 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 11228 if (resp->supported_speeds2_auto_mode) 11229 link_info->support_auto_speeds2 = 11230 le16_to_cpu(resp->supported_speeds2_auto_mode); 11231 11232 bp->port_count = resp->port_cnt; 11233 11234 hwrm_phy_qcaps_exit: 11235 hwrm_req_drop(bp, req); 11236 return rc; 11237 } 11238 11239 static bool bnxt_support_dropped(u16 advertising, u16 supported) 11240 { 11241 u16 diff = advertising ^ supported; 11242 11243 return ((supported | diff) != supported); 11244 } 11245 11246 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 11247 { 11248 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 11249 11250 /* Check if any advertised speeds are no longer supported. The caller 11251 * holds the link_lock mutex, so we can modify link_info settings. 11252 */ 11253 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11254 if (bnxt_support_dropped(link_info->advertising, 11255 link_info->support_auto_speeds2)) { 11256 link_info->advertising = link_info->support_auto_speeds2; 11257 return true; 11258 } 11259 return false; 11260 } 11261 if (bnxt_support_dropped(link_info->advertising, 11262 link_info->support_auto_speeds)) { 11263 link_info->advertising = link_info->support_auto_speeds; 11264 return true; 11265 } 11266 if (bnxt_support_dropped(link_info->advertising_pam4, 11267 link_info->support_pam4_auto_speeds)) { 11268 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 11269 return true; 11270 } 11271 return false; 11272 } 11273 11274 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 11275 { 11276 struct bnxt_link_info *link_info = &bp->link_info; 11277 struct hwrm_port_phy_qcfg_output *resp; 11278 struct hwrm_port_phy_qcfg_input *req; 11279 u8 link_state = link_info->link_state; 11280 bool support_changed; 11281 int rc; 11282 11283 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 11284 if (rc) 11285 return rc; 11286 11287 resp = hwrm_req_hold(bp, req); 11288 rc = hwrm_req_send(bp, req); 11289 if (rc) { 11290 hwrm_req_drop(bp, req); 11291 if (BNXT_VF(bp) && rc == -ENODEV) { 11292 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 11293 rc = 0; 11294 } 11295 return rc; 11296 } 11297 11298 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 11299 link_info->phy_link_status = resp->link; 11300 link_info->duplex = resp->duplex_cfg; 11301 if (bp->hwrm_spec_code >= 0x10800) 11302 link_info->duplex = resp->duplex_state; 11303 link_info->pause = resp->pause; 11304 link_info->auto_mode = resp->auto_mode; 11305 link_info->auto_pause_setting = resp->auto_pause; 11306 link_info->lp_pause = resp->link_partner_adv_pause; 11307 link_info->force_pause_setting = resp->force_pause; 11308 link_info->duplex_setting = resp->duplex_cfg; 11309 if (link_info->phy_link_status == BNXT_LINK_LINK) { 11310 link_info->link_speed = le16_to_cpu(resp->link_speed); 11311 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 11312 link_info->active_lanes = resp->active_lanes; 11313 } else { 11314 link_info->link_speed = 0; 11315 link_info->active_lanes = 0; 11316 } 11317 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 11318 link_info->force_pam4_link_speed = 11319 le16_to_cpu(resp->force_pam4_link_speed); 11320 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); 11321 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 11322 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 11323 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); 11324 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 11325 link_info->auto_pam4_link_speeds = 11326 le16_to_cpu(resp->auto_pam4_link_speed_mask); 11327 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); 11328 link_info->lp_auto_link_speeds = 11329 le16_to_cpu(resp->link_partner_adv_speeds); 11330 link_info->lp_auto_pam4_link_speeds = 11331 resp->link_partner_pam4_adv_speeds; 11332 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 11333 link_info->phy_ver[0] = resp->phy_maj; 11334 link_info->phy_ver[1] = resp->phy_min; 11335 link_info->phy_ver[2] = resp->phy_bld; 11336 link_info->media_type = resp->media_type; 11337 link_info->phy_type = resp->phy_type; 11338 link_info->transceiver = resp->xcvr_pkg_type; 11339 link_info->phy_addr = resp->eee_config_phy_addr & 11340 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 11341 link_info->module_status = resp->module_status; 11342 11343 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 11344 struct ethtool_keee *eee = &bp->eee; 11345 u16 fw_speeds; 11346 11347 eee->eee_active = 0; 11348 if (resp->eee_config_phy_addr & 11349 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 11350 eee->eee_active = 1; 11351 fw_speeds = le16_to_cpu( 11352 resp->link_partner_adv_eee_link_speed_mask); 11353 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds); 11354 } 11355 11356 /* Pull initial EEE config */ 11357 if (!chng_link_state) { 11358 if (resp->eee_config_phy_addr & 11359 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 11360 eee->eee_enabled = 1; 11361 11362 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 11363 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds); 11364 11365 if (resp->eee_config_phy_addr & 11366 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 11367 __le32 tmr; 11368 11369 eee->tx_lpi_enabled = 1; 11370 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 11371 eee->tx_lpi_timer = le32_to_cpu(tmr) & 11372 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 11373 } 11374 } 11375 } 11376 11377 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 11378 if (bp->hwrm_spec_code >= 0x10504) { 11379 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 11380 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 11381 } 11382 /* TODO: need to add more logic to report VF link */ 11383 if (chng_link_state) { 11384 if (link_info->phy_link_status == BNXT_LINK_LINK) 11385 link_info->link_state = BNXT_LINK_STATE_UP; 11386 else 11387 link_info->link_state = BNXT_LINK_STATE_DOWN; 11388 if (link_state != link_info->link_state) 11389 bnxt_report_link(bp); 11390 } else { 11391 /* always link down if not require to update link state */ 11392 link_info->link_state = BNXT_LINK_STATE_DOWN; 11393 } 11394 hwrm_req_drop(bp, req); 11395 11396 if (!BNXT_PHY_CFG_ABLE(bp)) 11397 return 0; 11398 11399 support_changed = bnxt_support_speed_dropped(link_info); 11400 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 11401 bnxt_hwrm_set_link_setting(bp, true, false); 11402 return 0; 11403 } 11404 11405 static void bnxt_get_port_module_status(struct bnxt *bp) 11406 { 11407 struct bnxt_link_info *link_info = &bp->link_info; 11408 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 11409 u8 module_status; 11410 11411 if (bnxt_update_link(bp, true)) 11412 return; 11413 11414 module_status = link_info->module_status; 11415 switch (module_status) { 11416 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 11417 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 11418 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 11419 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 11420 bp->pf.port_id); 11421 if (bp->hwrm_spec_code >= 0x10201) { 11422 netdev_warn(bp->dev, "Module part number %s\n", 11423 resp->phy_vendor_partnumber); 11424 } 11425 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 11426 netdev_warn(bp->dev, "TX is disabled\n"); 11427 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 11428 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 11429 } 11430 } 11431 11432 static void 11433 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 11434 { 11435 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 11436 if (bp->hwrm_spec_code >= 0x10201) 11437 req->auto_pause = 11438 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 11439 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 11440 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 11441 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 11442 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 11443 req->enables |= 11444 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 11445 } else { 11446 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 11447 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 11448 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 11449 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 11450 req->enables |= 11451 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 11452 if (bp->hwrm_spec_code >= 0x10201) { 11453 req->auto_pause = req->force_pause; 11454 req->enables |= cpu_to_le32( 11455 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 11456 } 11457 } 11458 } 11459 11460 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 11461 { 11462 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 11463 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 11464 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11465 req->enables |= 11466 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); 11467 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); 11468 } else if (bp->link_info.advertising) { 11469 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 11470 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 11471 } 11472 if (bp->link_info.advertising_pam4) { 11473 req->enables |= 11474 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 11475 req->auto_link_pam4_speed_mask = 11476 cpu_to_le16(bp->link_info.advertising_pam4); 11477 } 11478 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 11479 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 11480 } else { 11481 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 11482 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11483 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); 11484 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); 11485 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", 11486 (u32)bp->link_info.req_link_speed); 11487 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 11488 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 11489 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 11490 } else { 11491 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 11492 } 11493 } 11494 11495 /* tell chimp that the setting takes effect immediately */ 11496 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 11497 } 11498 11499 int bnxt_hwrm_set_pause(struct bnxt *bp) 11500 { 11501 struct hwrm_port_phy_cfg_input *req; 11502 int rc; 11503 11504 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 11505 if (rc) 11506 return rc; 11507 11508 bnxt_hwrm_set_pause_common(bp, req); 11509 11510 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 11511 bp->link_info.force_link_chng) 11512 bnxt_hwrm_set_link_common(bp, req); 11513 11514 rc = hwrm_req_send(bp, req); 11515 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 11516 /* since changing of pause setting doesn't trigger any link 11517 * change event, the driver needs to update the current pause 11518 * result upon successfully return of the phy_cfg command 11519 */ 11520 bp->link_info.pause = 11521 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 11522 bp->link_info.auto_pause_setting = 0; 11523 if (!bp->link_info.force_link_chng) 11524 bnxt_report_link(bp); 11525 } 11526 bp->link_info.force_link_chng = false; 11527 return rc; 11528 } 11529 11530 static void bnxt_hwrm_set_eee(struct bnxt *bp, 11531 struct hwrm_port_phy_cfg_input *req) 11532 { 11533 struct ethtool_keee *eee = &bp->eee; 11534 11535 if (eee->eee_enabled) { 11536 u16 eee_speeds; 11537 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 11538 11539 if (eee->tx_lpi_enabled) 11540 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 11541 else 11542 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 11543 11544 req->flags |= cpu_to_le32(flags); 11545 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 11546 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 11547 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 11548 } else { 11549 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 11550 } 11551 } 11552 11553 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 11554 { 11555 struct hwrm_port_phy_cfg_input *req; 11556 int rc; 11557 11558 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 11559 if (rc) 11560 return rc; 11561 11562 if (set_pause) 11563 bnxt_hwrm_set_pause_common(bp, req); 11564 11565 bnxt_hwrm_set_link_common(bp, req); 11566 11567 if (set_eee) 11568 bnxt_hwrm_set_eee(bp, req); 11569 return hwrm_req_send(bp, req); 11570 } 11571 11572 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 11573 { 11574 struct hwrm_port_phy_cfg_input *req; 11575 int rc; 11576 11577 if (!BNXT_SINGLE_PF(bp)) 11578 return 0; 11579 11580 if (pci_num_vf(bp->pdev) && 11581 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 11582 return 0; 11583 11584 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 11585 if (rc) 11586 return rc; 11587 11588 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 11589 rc = hwrm_req_send(bp, req); 11590 if (!rc) { 11591 mutex_lock(&bp->link_lock); 11592 /* Device is not obliged link down in certain scenarios, even 11593 * when forced. Setting the state unknown is consistent with 11594 * driver startup and will force link state to be reported 11595 * during subsequent open based on PORT_PHY_QCFG. 11596 */ 11597 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 11598 mutex_unlock(&bp->link_lock); 11599 } 11600 return rc; 11601 } 11602 11603 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 11604 { 11605 #ifdef CONFIG_TEE_BNXT_FW 11606 int rc = tee_bnxt_fw_load(); 11607 11608 if (rc) 11609 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 11610 11611 return rc; 11612 #else 11613 netdev_err(bp->dev, "OP-TEE not supported\n"); 11614 return -ENODEV; 11615 #endif 11616 } 11617 11618 static int bnxt_try_recover_fw(struct bnxt *bp) 11619 { 11620 if (bp->fw_health && bp->fw_health->status_reliable) { 11621 int retry = 0, rc; 11622 u32 sts; 11623 11624 do { 11625 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11626 rc = bnxt_hwrm_poll(bp); 11627 if (!BNXT_FW_IS_BOOTING(sts) && 11628 !BNXT_FW_IS_RECOVERING(sts)) 11629 break; 11630 retry++; 11631 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 11632 11633 if (!BNXT_FW_IS_HEALTHY(sts)) { 11634 netdev_err(bp->dev, 11635 "Firmware not responding, status: 0x%x\n", 11636 sts); 11637 rc = -ENODEV; 11638 } 11639 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 11640 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 11641 return bnxt_fw_reset_via_optee(bp); 11642 } 11643 return rc; 11644 } 11645 11646 return -ENODEV; 11647 } 11648 11649 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 11650 { 11651 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11652 11653 if (!BNXT_NEW_RM(bp)) 11654 return; /* no resource reservations required */ 11655 11656 hw_resc->resv_cp_rings = 0; 11657 hw_resc->resv_stat_ctxs = 0; 11658 hw_resc->resv_irqs = 0; 11659 hw_resc->resv_tx_rings = 0; 11660 hw_resc->resv_rx_rings = 0; 11661 hw_resc->resv_hw_ring_grps = 0; 11662 hw_resc->resv_vnics = 0; 11663 hw_resc->resv_rsscos_ctxs = 0; 11664 if (!fw_reset) { 11665 bp->tx_nr_rings = 0; 11666 bp->rx_nr_rings = 0; 11667 } 11668 } 11669 11670 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 11671 { 11672 int rc; 11673 11674 if (!BNXT_NEW_RM(bp)) 11675 return 0; /* no resource reservations required */ 11676 11677 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 11678 if (rc) 11679 netdev_err(bp->dev, "resc_qcaps failed\n"); 11680 11681 bnxt_clear_reservations(bp, fw_reset); 11682 11683 return rc; 11684 } 11685 11686 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 11687 { 11688 struct hwrm_func_drv_if_change_output *resp; 11689 struct hwrm_func_drv_if_change_input *req; 11690 bool fw_reset = !bp->irq_tbl; 11691 bool resc_reinit = false; 11692 int rc, retry = 0; 11693 u32 flags = 0; 11694 11695 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 11696 return 0; 11697 11698 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 11699 if (rc) 11700 return rc; 11701 11702 if (up) 11703 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 11704 resp = hwrm_req_hold(bp, req); 11705 11706 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 11707 while (retry < BNXT_FW_IF_RETRY) { 11708 rc = hwrm_req_send(bp, req); 11709 if (rc != -EAGAIN) 11710 break; 11711 11712 msleep(50); 11713 retry++; 11714 } 11715 11716 if (rc == -EAGAIN) { 11717 hwrm_req_drop(bp, req); 11718 return rc; 11719 } else if (!rc) { 11720 flags = le32_to_cpu(resp->flags); 11721 } else if (up) { 11722 rc = bnxt_try_recover_fw(bp); 11723 fw_reset = true; 11724 } 11725 hwrm_req_drop(bp, req); 11726 if (rc) 11727 return rc; 11728 11729 if (!up) { 11730 bnxt_inv_fw_health_reg(bp); 11731 return 0; 11732 } 11733 11734 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 11735 resc_reinit = true; 11736 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 11737 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 11738 fw_reset = true; 11739 else 11740 bnxt_remap_fw_health_regs(bp); 11741 11742 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 11743 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 11744 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11745 return -ENODEV; 11746 } 11747 if (resc_reinit || fw_reset) { 11748 if (fw_reset) { 11749 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11750 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11751 bnxt_ulp_irq_stop(bp); 11752 bnxt_free_ctx_mem(bp); 11753 bnxt_dcb_free(bp); 11754 rc = bnxt_fw_init_one(bp); 11755 if (rc) { 11756 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11757 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11758 return rc; 11759 } 11760 bnxt_clear_int_mode(bp); 11761 rc = bnxt_init_int_mode(bp); 11762 if (rc) { 11763 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11764 netdev_err(bp->dev, "init int mode failed\n"); 11765 return rc; 11766 } 11767 } 11768 rc = bnxt_cancel_reservations(bp, fw_reset); 11769 } 11770 return rc; 11771 } 11772 11773 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 11774 { 11775 struct hwrm_port_led_qcaps_output *resp; 11776 struct hwrm_port_led_qcaps_input *req; 11777 struct bnxt_pf_info *pf = &bp->pf; 11778 int rc; 11779 11780 bp->num_leds = 0; 11781 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 11782 return 0; 11783 11784 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 11785 if (rc) 11786 return rc; 11787 11788 req->port_id = cpu_to_le16(pf->port_id); 11789 resp = hwrm_req_hold(bp, req); 11790 rc = hwrm_req_send(bp, req); 11791 if (rc) { 11792 hwrm_req_drop(bp, req); 11793 return rc; 11794 } 11795 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 11796 int i; 11797 11798 bp->num_leds = resp->num_leds; 11799 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 11800 bp->num_leds); 11801 for (i = 0; i < bp->num_leds; i++) { 11802 struct bnxt_led_info *led = &bp->leds[i]; 11803 __le16 caps = led->led_state_caps; 11804 11805 if (!led->led_group_id || 11806 !BNXT_LED_ALT_BLINK_CAP(caps)) { 11807 bp->num_leds = 0; 11808 break; 11809 } 11810 } 11811 } 11812 hwrm_req_drop(bp, req); 11813 return 0; 11814 } 11815 11816 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 11817 { 11818 struct hwrm_wol_filter_alloc_output *resp; 11819 struct hwrm_wol_filter_alloc_input *req; 11820 int rc; 11821 11822 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 11823 if (rc) 11824 return rc; 11825 11826 req->port_id = cpu_to_le16(bp->pf.port_id); 11827 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 11828 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 11829 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 11830 11831 resp = hwrm_req_hold(bp, req); 11832 rc = hwrm_req_send(bp, req); 11833 if (!rc) 11834 bp->wol_filter_id = resp->wol_filter_id; 11835 hwrm_req_drop(bp, req); 11836 return rc; 11837 } 11838 11839 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 11840 { 11841 struct hwrm_wol_filter_free_input *req; 11842 int rc; 11843 11844 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 11845 if (rc) 11846 return rc; 11847 11848 req->port_id = cpu_to_le16(bp->pf.port_id); 11849 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 11850 req->wol_filter_id = bp->wol_filter_id; 11851 11852 return hwrm_req_send(bp, req); 11853 } 11854 11855 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 11856 { 11857 struct hwrm_wol_filter_qcfg_output *resp; 11858 struct hwrm_wol_filter_qcfg_input *req; 11859 u16 next_handle = 0; 11860 int rc; 11861 11862 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 11863 if (rc) 11864 return rc; 11865 11866 req->port_id = cpu_to_le16(bp->pf.port_id); 11867 req->handle = cpu_to_le16(handle); 11868 resp = hwrm_req_hold(bp, req); 11869 rc = hwrm_req_send(bp, req); 11870 if (!rc) { 11871 next_handle = le16_to_cpu(resp->next_handle); 11872 if (next_handle != 0) { 11873 if (resp->wol_type == 11874 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 11875 bp->wol = 1; 11876 bp->wol_filter_id = resp->wol_filter_id; 11877 } 11878 } 11879 } 11880 hwrm_req_drop(bp, req); 11881 return next_handle; 11882 } 11883 11884 static void bnxt_get_wol_settings(struct bnxt *bp) 11885 { 11886 u16 handle = 0; 11887 11888 bp->wol = 0; 11889 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 11890 return; 11891 11892 do { 11893 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 11894 } while (handle && handle != 0xffff); 11895 } 11896 11897 static bool bnxt_eee_config_ok(struct bnxt *bp) 11898 { 11899 struct ethtool_keee *eee = &bp->eee; 11900 struct bnxt_link_info *link_info = &bp->link_info; 11901 11902 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 11903 return true; 11904 11905 if (eee->eee_enabled) { 11906 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 11907 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 11908 11909 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 11910 11911 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 11912 eee->eee_enabled = 0; 11913 return false; 11914 } 11915 if (linkmode_andnot(tmp, eee->advertised, advertising)) { 11916 linkmode_and(eee->advertised, advertising, 11917 eee->supported); 11918 return false; 11919 } 11920 } 11921 return true; 11922 } 11923 11924 static int bnxt_update_phy_setting(struct bnxt *bp) 11925 { 11926 int rc; 11927 bool update_link = false; 11928 bool update_pause = false; 11929 bool update_eee = false; 11930 struct bnxt_link_info *link_info = &bp->link_info; 11931 11932 rc = bnxt_update_link(bp, true); 11933 if (rc) { 11934 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 11935 rc); 11936 return rc; 11937 } 11938 if (!BNXT_SINGLE_PF(bp)) 11939 return 0; 11940 11941 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 11942 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 11943 link_info->req_flow_ctrl) 11944 update_pause = true; 11945 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 11946 link_info->force_pause_setting != link_info->req_flow_ctrl) 11947 update_pause = true; 11948 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 11949 if (BNXT_AUTO_MODE(link_info->auto_mode)) 11950 update_link = true; 11951 if (bnxt_force_speed_updated(link_info)) 11952 update_link = true; 11953 if (link_info->req_duplex != link_info->duplex_setting) 11954 update_link = true; 11955 } else { 11956 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 11957 update_link = true; 11958 if (bnxt_auto_speed_updated(link_info)) 11959 update_link = true; 11960 } 11961 11962 /* The last close may have shutdown the link, so need to call 11963 * PHY_CFG to bring it back up. 11964 */ 11965 if (!BNXT_LINK_IS_UP(bp)) 11966 update_link = true; 11967 11968 if (!bnxt_eee_config_ok(bp)) 11969 update_eee = true; 11970 11971 if (update_link) 11972 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 11973 else if (update_pause) 11974 rc = bnxt_hwrm_set_pause(bp); 11975 if (rc) { 11976 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 11977 rc); 11978 return rc; 11979 } 11980 11981 return rc; 11982 } 11983 11984 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 11985 11986 static int bnxt_reinit_after_abort(struct bnxt *bp) 11987 { 11988 int rc; 11989 11990 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11991 return -EBUSY; 11992 11993 if (bp->dev->reg_state == NETREG_UNREGISTERED) 11994 return -ENODEV; 11995 11996 rc = bnxt_fw_init_one(bp); 11997 if (!rc) { 11998 bnxt_clear_int_mode(bp); 11999 rc = bnxt_init_int_mode(bp); 12000 if (!rc) { 12001 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12002 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12003 } 12004 } 12005 return rc; 12006 } 12007 12008 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 12009 { 12010 struct bnxt_ntuple_filter *ntp_fltr; 12011 struct bnxt_l2_filter *l2_fltr; 12012 12013 if (list_empty(&fltr->list)) 12014 return; 12015 12016 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { 12017 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); 12018 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 12019 atomic_inc(&l2_fltr->refcnt); 12020 ntp_fltr->l2_fltr = l2_fltr; 12021 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { 12022 bnxt_del_ntp_filter(bp, ntp_fltr); 12023 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", 12024 fltr->sw_id); 12025 } 12026 } else if (fltr->type == BNXT_FLTR_TYPE_L2) { 12027 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); 12028 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { 12029 bnxt_del_l2_filter(bp, l2_fltr); 12030 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", 12031 fltr->sw_id); 12032 } 12033 } 12034 } 12035 12036 static void bnxt_cfg_usr_fltrs(struct bnxt *bp) 12037 { 12038 struct bnxt_filter_base *usr_fltr, *tmp; 12039 12040 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) 12041 bnxt_cfg_one_usr_fltr(bp, usr_fltr); 12042 } 12043 12044 static int bnxt_set_xps_mapping(struct bnxt *bp) 12045 { 12046 int numa_node = dev_to_node(&bp->pdev->dev); 12047 unsigned int q_idx, map_idx, cpu, i; 12048 const struct cpumask *cpu_mask_ptr; 12049 int nr_cpus = num_online_cpus(); 12050 cpumask_t *q_map; 12051 int rc = 0; 12052 12053 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); 12054 if (!q_map) 12055 return -ENOMEM; 12056 12057 /* Create CPU mask for all TX queues across MQPRIO traffic classes. 12058 * Each TC has the same number of TX queues. The nth TX queue for each 12059 * TC will have the same CPU mask. 12060 */ 12061 for (i = 0; i < nr_cpus; i++) { 12062 map_idx = i % bp->tx_nr_rings_per_tc; 12063 cpu = cpumask_local_spread(i, numa_node); 12064 cpu_mask_ptr = get_cpu_mask(cpu); 12065 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); 12066 } 12067 12068 /* Register CPU mask for each TX queue except the ones marked for XDP */ 12069 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { 12070 map_idx = q_idx % bp->tx_nr_rings_per_tc; 12071 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); 12072 if (rc) { 12073 netdev_warn(bp->dev, "Error setting XPS for q:%d\n", 12074 q_idx); 12075 break; 12076 } 12077 } 12078 12079 kfree(q_map); 12080 12081 return rc; 12082 } 12083 12084 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12085 { 12086 int rc = 0; 12087 12088 netif_carrier_off(bp->dev); 12089 if (irq_re_init) { 12090 /* Reserve rings now if none were reserved at driver probe. */ 12091 rc = bnxt_init_dflt_ring_mode(bp); 12092 if (rc) { 12093 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 12094 return rc; 12095 } 12096 } 12097 rc = bnxt_reserve_rings(bp, irq_re_init); 12098 if (rc) 12099 return rc; 12100 12101 rc = bnxt_alloc_mem(bp, irq_re_init); 12102 if (rc) { 12103 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12104 goto open_err_free_mem; 12105 } 12106 12107 if (irq_re_init) { 12108 bnxt_init_napi(bp); 12109 rc = bnxt_request_irq(bp); 12110 if (rc) { 12111 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 12112 goto open_err_irq; 12113 } 12114 } 12115 12116 rc = bnxt_init_nic(bp, irq_re_init); 12117 if (rc) { 12118 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12119 goto open_err_irq; 12120 } 12121 12122 bnxt_enable_napi(bp); 12123 bnxt_debug_dev_init(bp); 12124 12125 if (link_re_init) { 12126 mutex_lock(&bp->link_lock); 12127 rc = bnxt_update_phy_setting(bp); 12128 mutex_unlock(&bp->link_lock); 12129 if (rc) { 12130 netdev_warn(bp->dev, "failed to update phy settings\n"); 12131 if (BNXT_SINGLE_PF(bp)) { 12132 bp->link_info.phy_retry = true; 12133 bp->link_info.phy_retry_expires = 12134 jiffies + 5 * HZ; 12135 } 12136 } 12137 } 12138 12139 if (irq_re_init) { 12140 udp_tunnel_nic_reset_ntf(bp->dev); 12141 rc = bnxt_set_xps_mapping(bp); 12142 if (rc) 12143 netdev_warn(bp->dev, "failed to set xps mapping\n"); 12144 } 12145 12146 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 12147 if (!static_key_enabled(&bnxt_xdp_locking_key)) 12148 static_branch_enable(&bnxt_xdp_locking_key); 12149 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 12150 static_branch_disable(&bnxt_xdp_locking_key); 12151 } 12152 set_bit(BNXT_STATE_OPEN, &bp->state); 12153 bnxt_enable_int(bp); 12154 /* Enable TX queues */ 12155 bnxt_tx_enable(bp); 12156 mod_timer(&bp->timer, jiffies + bp->current_interval); 12157 /* Poll link status and check for SFP+ module status */ 12158 mutex_lock(&bp->link_lock); 12159 bnxt_get_port_module_status(bp); 12160 mutex_unlock(&bp->link_lock); 12161 12162 /* VF-reps may need to be re-opened after the PF is re-opened */ 12163 if (BNXT_PF(bp)) 12164 bnxt_vf_reps_open(bp); 12165 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 12166 WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); 12167 bnxt_ptp_init_rtc(bp, true); 12168 bnxt_ptp_cfg_tstamp_filters(bp); 12169 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12170 bnxt_hwrm_realloc_rss_ctx_vnic(bp); 12171 bnxt_cfg_usr_fltrs(bp); 12172 return 0; 12173 12174 open_err_irq: 12175 bnxt_del_napi(bp); 12176 12177 open_err_free_mem: 12178 bnxt_free_skbs(bp); 12179 bnxt_free_irq(bp); 12180 bnxt_free_mem(bp, true); 12181 return rc; 12182 } 12183 12184 /* rtnl_lock held */ 12185 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12186 { 12187 int rc = 0; 12188 12189 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 12190 rc = -EIO; 12191 if (!rc) 12192 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 12193 if (rc) { 12194 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 12195 dev_close(bp->dev); 12196 } 12197 return rc; 12198 } 12199 12200 /* rtnl_lock held, open the NIC half way by allocating all resources, but 12201 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 12202 * self tests. 12203 */ 12204 int bnxt_half_open_nic(struct bnxt *bp) 12205 { 12206 int rc = 0; 12207 12208 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12209 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 12210 rc = -ENODEV; 12211 goto half_open_err; 12212 } 12213 12214 rc = bnxt_alloc_mem(bp, true); 12215 if (rc) { 12216 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12217 goto half_open_err; 12218 } 12219 bnxt_init_napi(bp); 12220 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12221 rc = bnxt_init_nic(bp, true); 12222 if (rc) { 12223 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12224 bnxt_del_napi(bp); 12225 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12226 goto half_open_err; 12227 } 12228 return 0; 12229 12230 half_open_err: 12231 bnxt_free_skbs(bp); 12232 bnxt_free_mem(bp, true); 12233 dev_close(bp->dev); 12234 return rc; 12235 } 12236 12237 /* rtnl_lock held, this call can only be made after a previous successful 12238 * call to bnxt_half_open_nic(). 12239 */ 12240 void bnxt_half_close_nic(struct bnxt *bp) 12241 { 12242 bnxt_hwrm_resource_free(bp, false, true); 12243 bnxt_del_napi(bp); 12244 bnxt_free_skbs(bp); 12245 bnxt_free_mem(bp, true); 12246 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12247 } 12248 12249 void bnxt_reenable_sriov(struct bnxt *bp) 12250 { 12251 if (BNXT_PF(bp)) { 12252 struct bnxt_pf_info *pf = &bp->pf; 12253 int n = pf->active_vfs; 12254 12255 if (n) 12256 bnxt_cfg_hw_sriov(bp, &n, true); 12257 } 12258 } 12259 12260 static int bnxt_open(struct net_device *dev) 12261 { 12262 struct bnxt *bp = netdev_priv(dev); 12263 int rc; 12264 12265 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12266 rc = bnxt_reinit_after_abort(bp); 12267 if (rc) { 12268 if (rc == -EBUSY) 12269 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 12270 else 12271 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 12272 return -ENODEV; 12273 } 12274 } 12275 12276 rc = bnxt_hwrm_if_change(bp, true); 12277 if (rc) 12278 return rc; 12279 12280 rc = __bnxt_open_nic(bp, true, true); 12281 if (rc) { 12282 bnxt_hwrm_if_change(bp, false); 12283 } else { 12284 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 12285 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12286 bnxt_queue_sp_work(bp, 12287 BNXT_RESTART_ULP_SP_EVENT); 12288 } 12289 } 12290 12291 return rc; 12292 } 12293 12294 static bool bnxt_drv_busy(struct bnxt *bp) 12295 { 12296 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 12297 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 12298 } 12299 12300 static void bnxt_get_ring_stats(struct bnxt *bp, 12301 struct rtnl_link_stats64 *stats); 12302 12303 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 12304 bool link_re_init) 12305 { 12306 /* Close the VF-reps before closing PF */ 12307 if (BNXT_PF(bp)) 12308 bnxt_vf_reps_close(bp); 12309 12310 /* Change device state to avoid TX queue wake up's */ 12311 bnxt_tx_disable(bp); 12312 12313 clear_bit(BNXT_STATE_OPEN, &bp->state); 12314 smp_mb__after_atomic(); 12315 while (bnxt_drv_busy(bp)) 12316 msleep(20); 12317 12318 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12319 bnxt_clear_rss_ctxs(bp); 12320 /* Flush rings and disable interrupts */ 12321 bnxt_shutdown_nic(bp, irq_re_init); 12322 12323 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 12324 12325 bnxt_debug_dev_exit(bp); 12326 bnxt_disable_napi(bp); 12327 del_timer_sync(&bp->timer); 12328 bnxt_free_skbs(bp); 12329 12330 /* Save ring stats before shutdown */ 12331 if (bp->bnapi && irq_re_init) { 12332 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 12333 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 12334 } 12335 if (irq_re_init) { 12336 bnxt_free_irq(bp); 12337 bnxt_del_napi(bp); 12338 } 12339 bnxt_free_mem(bp, irq_re_init); 12340 } 12341 12342 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12343 { 12344 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12345 /* If we get here, it means firmware reset is in progress 12346 * while we are trying to close. We can safely proceed with 12347 * the close because we are holding rtnl_lock(). Some firmware 12348 * messages may fail as we proceed to close. We set the 12349 * ABORT_ERR flag here so that the FW reset thread will later 12350 * abort when it gets the rtnl_lock() and sees the flag. 12351 */ 12352 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 12353 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12354 } 12355 12356 #ifdef CONFIG_BNXT_SRIOV 12357 if (bp->sriov_cfg) { 12358 int rc; 12359 12360 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 12361 !bp->sriov_cfg, 12362 BNXT_SRIOV_CFG_WAIT_TMO); 12363 if (!rc) 12364 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); 12365 else if (rc < 0) 12366 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); 12367 } 12368 #endif 12369 __bnxt_close_nic(bp, irq_re_init, link_re_init); 12370 } 12371 12372 static int bnxt_close(struct net_device *dev) 12373 { 12374 struct bnxt *bp = netdev_priv(dev); 12375 12376 bnxt_close_nic(bp, true, true); 12377 bnxt_hwrm_shutdown_link(bp); 12378 bnxt_hwrm_if_change(bp, false); 12379 return 0; 12380 } 12381 12382 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 12383 u16 *val) 12384 { 12385 struct hwrm_port_phy_mdio_read_output *resp; 12386 struct hwrm_port_phy_mdio_read_input *req; 12387 int rc; 12388 12389 if (bp->hwrm_spec_code < 0x10a00) 12390 return -EOPNOTSUPP; 12391 12392 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 12393 if (rc) 12394 return rc; 12395 12396 req->port_id = cpu_to_le16(bp->pf.port_id); 12397 req->phy_addr = phy_addr; 12398 req->reg_addr = cpu_to_le16(reg & 0x1f); 12399 if (mdio_phy_id_is_c45(phy_addr)) { 12400 req->cl45_mdio = 1; 12401 req->phy_addr = mdio_phy_id_prtad(phy_addr); 12402 req->dev_addr = mdio_phy_id_devad(phy_addr); 12403 req->reg_addr = cpu_to_le16(reg); 12404 } 12405 12406 resp = hwrm_req_hold(bp, req); 12407 rc = hwrm_req_send(bp, req); 12408 if (!rc) 12409 *val = le16_to_cpu(resp->reg_data); 12410 hwrm_req_drop(bp, req); 12411 return rc; 12412 } 12413 12414 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 12415 u16 val) 12416 { 12417 struct hwrm_port_phy_mdio_write_input *req; 12418 int rc; 12419 12420 if (bp->hwrm_spec_code < 0x10a00) 12421 return -EOPNOTSUPP; 12422 12423 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 12424 if (rc) 12425 return rc; 12426 12427 req->port_id = cpu_to_le16(bp->pf.port_id); 12428 req->phy_addr = phy_addr; 12429 req->reg_addr = cpu_to_le16(reg & 0x1f); 12430 if (mdio_phy_id_is_c45(phy_addr)) { 12431 req->cl45_mdio = 1; 12432 req->phy_addr = mdio_phy_id_prtad(phy_addr); 12433 req->dev_addr = mdio_phy_id_devad(phy_addr); 12434 req->reg_addr = cpu_to_le16(reg); 12435 } 12436 req->reg_data = cpu_to_le16(val); 12437 12438 return hwrm_req_send(bp, req); 12439 } 12440 12441 /* rtnl_lock held */ 12442 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 12443 { 12444 struct mii_ioctl_data *mdio = if_mii(ifr); 12445 struct bnxt *bp = netdev_priv(dev); 12446 int rc; 12447 12448 switch (cmd) { 12449 case SIOCGMIIPHY: 12450 mdio->phy_id = bp->link_info.phy_addr; 12451 12452 fallthrough; 12453 case SIOCGMIIREG: { 12454 u16 mii_regval = 0; 12455 12456 if (!netif_running(dev)) 12457 return -EAGAIN; 12458 12459 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 12460 &mii_regval); 12461 mdio->val_out = mii_regval; 12462 return rc; 12463 } 12464 12465 case SIOCSMIIREG: 12466 if (!netif_running(dev)) 12467 return -EAGAIN; 12468 12469 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 12470 mdio->val_in); 12471 12472 case SIOCSHWTSTAMP: 12473 return bnxt_hwtstamp_set(dev, ifr); 12474 12475 case SIOCGHWTSTAMP: 12476 return bnxt_hwtstamp_get(dev, ifr); 12477 12478 default: 12479 /* do nothing */ 12480 break; 12481 } 12482 return -EOPNOTSUPP; 12483 } 12484 12485 static void bnxt_get_ring_stats(struct bnxt *bp, 12486 struct rtnl_link_stats64 *stats) 12487 { 12488 int i; 12489 12490 for (i = 0; i < bp->cp_nr_rings; i++) { 12491 struct bnxt_napi *bnapi = bp->bnapi[i]; 12492 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 12493 u64 *sw = cpr->stats.sw_stats; 12494 12495 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 12496 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 12497 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 12498 12499 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 12500 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 12501 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 12502 12503 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 12504 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 12505 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 12506 12507 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 12508 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 12509 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 12510 12511 stats->rx_missed_errors += 12512 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 12513 12514 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 12515 12516 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 12517 12518 stats->rx_dropped += 12519 cpr->sw_stats->rx.rx_netpoll_discards + 12520 cpr->sw_stats->rx.rx_oom_discards; 12521 } 12522 } 12523 12524 static void bnxt_add_prev_stats(struct bnxt *bp, 12525 struct rtnl_link_stats64 *stats) 12526 { 12527 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 12528 12529 stats->rx_packets += prev_stats->rx_packets; 12530 stats->tx_packets += prev_stats->tx_packets; 12531 stats->rx_bytes += prev_stats->rx_bytes; 12532 stats->tx_bytes += prev_stats->tx_bytes; 12533 stats->rx_missed_errors += prev_stats->rx_missed_errors; 12534 stats->multicast += prev_stats->multicast; 12535 stats->rx_dropped += prev_stats->rx_dropped; 12536 stats->tx_dropped += prev_stats->tx_dropped; 12537 } 12538 12539 static void 12540 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 12541 { 12542 struct bnxt *bp = netdev_priv(dev); 12543 12544 set_bit(BNXT_STATE_READ_STATS, &bp->state); 12545 /* Make sure bnxt_close_nic() sees that we are reading stats before 12546 * we check the BNXT_STATE_OPEN flag. 12547 */ 12548 smp_mb__after_atomic(); 12549 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12550 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 12551 *stats = bp->net_stats_prev; 12552 return; 12553 } 12554 12555 bnxt_get_ring_stats(bp, stats); 12556 bnxt_add_prev_stats(bp, stats); 12557 12558 if (bp->flags & BNXT_FLAG_PORT_STATS) { 12559 u64 *rx = bp->port_stats.sw_stats; 12560 u64 *tx = bp->port_stats.sw_stats + 12561 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 12562 12563 stats->rx_crc_errors = 12564 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 12565 stats->rx_frame_errors = 12566 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 12567 stats->rx_length_errors = 12568 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 12569 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 12570 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 12571 stats->rx_errors = 12572 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 12573 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 12574 stats->collisions = 12575 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 12576 stats->tx_fifo_errors = 12577 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 12578 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 12579 } 12580 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 12581 } 12582 12583 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 12584 struct bnxt_total_ring_err_stats *stats, 12585 struct bnxt_cp_ring_info *cpr) 12586 { 12587 struct bnxt_sw_stats *sw_stats = cpr->sw_stats; 12588 u64 *hw_stats = cpr->stats.sw_stats; 12589 12590 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 12591 stats->rx_total_resets += sw_stats->rx.rx_resets; 12592 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 12593 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 12594 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 12595 stats->rx_total_ring_discards += 12596 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 12597 stats->tx_total_resets += sw_stats->tx.tx_resets; 12598 stats->tx_total_ring_discards += 12599 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 12600 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 12601 } 12602 12603 void bnxt_get_ring_err_stats(struct bnxt *bp, 12604 struct bnxt_total_ring_err_stats *stats) 12605 { 12606 int i; 12607 12608 for (i = 0; i < bp->cp_nr_rings; i++) 12609 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 12610 } 12611 12612 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 12613 { 12614 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12615 struct net_device *dev = bp->dev; 12616 struct netdev_hw_addr *ha; 12617 u8 *haddr; 12618 int mc_count = 0; 12619 bool update = false; 12620 int off = 0; 12621 12622 netdev_for_each_mc_addr(ha, dev) { 12623 if (mc_count >= BNXT_MAX_MC_ADDRS) { 12624 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12625 vnic->mc_list_count = 0; 12626 return false; 12627 } 12628 haddr = ha->addr; 12629 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 12630 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 12631 update = true; 12632 } 12633 off += ETH_ALEN; 12634 mc_count++; 12635 } 12636 if (mc_count) 12637 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 12638 12639 if (mc_count != vnic->mc_list_count) { 12640 vnic->mc_list_count = mc_count; 12641 update = true; 12642 } 12643 return update; 12644 } 12645 12646 static bool bnxt_uc_list_updated(struct bnxt *bp) 12647 { 12648 struct net_device *dev = bp->dev; 12649 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12650 struct netdev_hw_addr *ha; 12651 int off = 0; 12652 12653 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 12654 return true; 12655 12656 netdev_for_each_uc_addr(ha, dev) { 12657 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 12658 return true; 12659 12660 off += ETH_ALEN; 12661 } 12662 return false; 12663 } 12664 12665 static void bnxt_set_rx_mode(struct net_device *dev) 12666 { 12667 struct bnxt *bp = netdev_priv(dev); 12668 struct bnxt_vnic_info *vnic; 12669 bool mc_update = false; 12670 bool uc_update; 12671 u32 mask; 12672 12673 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 12674 return; 12675 12676 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12677 mask = vnic->rx_mask; 12678 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 12679 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 12680 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 12681 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 12682 12683 if (dev->flags & IFF_PROMISC) 12684 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12685 12686 uc_update = bnxt_uc_list_updated(bp); 12687 12688 if (dev->flags & IFF_BROADCAST) 12689 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 12690 if (dev->flags & IFF_ALLMULTI) { 12691 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12692 vnic->mc_list_count = 0; 12693 } else if (dev->flags & IFF_MULTICAST) { 12694 mc_update = bnxt_mc_list_updated(bp, &mask); 12695 } 12696 12697 if (mask != vnic->rx_mask || uc_update || mc_update) { 12698 vnic->rx_mask = mask; 12699 12700 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 12701 } 12702 } 12703 12704 static int bnxt_cfg_rx_mode(struct bnxt *bp) 12705 { 12706 struct net_device *dev = bp->dev; 12707 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12708 struct netdev_hw_addr *ha; 12709 int i, off = 0, rc; 12710 bool uc_update; 12711 12712 netif_addr_lock_bh(dev); 12713 uc_update = bnxt_uc_list_updated(bp); 12714 netif_addr_unlock_bh(dev); 12715 12716 if (!uc_update) 12717 goto skip_uc; 12718 12719 for (i = 1; i < vnic->uc_filter_count; i++) { 12720 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; 12721 12722 bnxt_hwrm_l2_filter_free(bp, fltr); 12723 bnxt_del_l2_filter(bp, fltr); 12724 } 12725 12726 vnic->uc_filter_count = 1; 12727 12728 netif_addr_lock_bh(dev); 12729 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 12730 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12731 } else { 12732 netdev_for_each_uc_addr(ha, dev) { 12733 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 12734 off += ETH_ALEN; 12735 vnic->uc_filter_count++; 12736 } 12737 } 12738 netif_addr_unlock_bh(dev); 12739 12740 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 12741 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 12742 if (rc) { 12743 if (BNXT_VF(bp) && rc == -ENODEV) { 12744 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12745 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 12746 else 12747 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 12748 rc = 0; 12749 } else { 12750 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 12751 } 12752 vnic->uc_filter_count = i; 12753 return rc; 12754 } 12755 } 12756 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12757 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 12758 12759 skip_uc: 12760 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 12761 !bnxt_promisc_ok(bp)) 12762 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12763 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 12764 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 12765 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 12766 rc); 12767 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 12768 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12769 vnic->mc_list_count = 0; 12770 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 12771 } 12772 if (rc) 12773 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 12774 rc); 12775 12776 return rc; 12777 } 12778 12779 static bool bnxt_can_reserve_rings(struct bnxt *bp) 12780 { 12781 #ifdef CONFIG_BNXT_SRIOV 12782 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 12783 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12784 12785 /* No minimum rings were provisioned by the PF. Don't 12786 * reserve rings by default when device is down. 12787 */ 12788 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 12789 return true; 12790 12791 if (!netif_running(bp->dev)) 12792 return false; 12793 } 12794 #endif 12795 return true; 12796 } 12797 12798 /* If the chip and firmware supports RFS */ 12799 static bool bnxt_rfs_supported(struct bnxt *bp) 12800 { 12801 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 12802 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 12803 return true; 12804 return false; 12805 } 12806 /* 212 firmware is broken for aRFS */ 12807 if (BNXT_FW_MAJ(bp) == 212) 12808 return false; 12809 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 12810 return true; 12811 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 12812 return true; 12813 return false; 12814 } 12815 12816 /* If runtime conditions support RFS */ 12817 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) 12818 { 12819 struct bnxt_hw_rings hwr = {0}; 12820 int max_vnics, max_rss_ctxs; 12821 12822 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 12823 !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 12824 return bnxt_rfs_supported(bp); 12825 12826 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 12827 return false; 12828 12829 hwr.grp = bp->rx_nr_rings; 12830 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); 12831 if (new_rss_ctx) 12832 hwr.vnic++; 12833 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 12834 max_vnics = bnxt_get_max_func_vnics(bp); 12835 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 12836 12837 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { 12838 if (bp->rx_nr_rings > 1) 12839 netdev_warn(bp->dev, 12840 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 12841 min(max_rss_ctxs - 1, max_vnics - 1)); 12842 return false; 12843 } 12844 12845 if (!BNXT_NEW_RM(bp)) 12846 return true; 12847 12848 /* Do not reduce VNIC and RSS ctx reservations. There is a FW 12849 * issue that will mess up the default VNIC if we reduce the 12850 * reservations. 12851 */ 12852 if (hwr.vnic <= bp->hw_resc.resv_vnics && 12853 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 12854 return true; 12855 12856 bnxt_hwrm_reserve_rings(bp, &hwr); 12857 if (hwr.vnic <= bp->hw_resc.resv_vnics && 12858 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 12859 return true; 12860 12861 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 12862 hwr.vnic = 1; 12863 hwr.rss_ctx = 0; 12864 bnxt_hwrm_reserve_rings(bp, &hwr); 12865 return false; 12866 } 12867 12868 static netdev_features_t bnxt_fix_features(struct net_device *dev, 12869 netdev_features_t features) 12870 { 12871 struct bnxt *bp = netdev_priv(dev); 12872 netdev_features_t vlan_features; 12873 12874 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) 12875 features &= ~NETIF_F_NTUPLE; 12876 12877 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 12878 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12879 12880 if (!(features & NETIF_F_GRO)) 12881 features &= ~NETIF_F_GRO_HW; 12882 12883 if (features & NETIF_F_GRO_HW) 12884 features &= ~NETIF_F_LRO; 12885 12886 /* Both CTAG and STAG VLAN acceleration on the RX side have to be 12887 * turned on or off together. 12888 */ 12889 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 12890 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 12891 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 12892 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 12893 else if (vlan_features) 12894 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 12895 } 12896 #ifdef CONFIG_BNXT_SRIOV 12897 if (BNXT_VF(bp) && bp->vf.vlan) 12898 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 12899 #endif 12900 return features; 12901 } 12902 12903 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, 12904 bool link_re_init, u32 flags, bool update_tpa) 12905 { 12906 bnxt_close_nic(bp, irq_re_init, link_re_init); 12907 bp->flags = flags; 12908 if (update_tpa) 12909 bnxt_set_ring_params(bp); 12910 return bnxt_open_nic(bp, irq_re_init, link_re_init); 12911 } 12912 12913 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 12914 { 12915 bool update_tpa = false, update_ntuple = false; 12916 struct bnxt *bp = netdev_priv(dev); 12917 u32 flags = bp->flags; 12918 u32 changes; 12919 int rc = 0; 12920 bool re_init = false; 12921 12922 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 12923 if (features & NETIF_F_GRO_HW) 12924 flags |= BNXT_FLAG_GRO; 12925 else if (features & NETIF_F_LRO) 12926 flags |= BNXT_FLAG_LRO; 12927 12928 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 12929 flags &= ~BNXT_FLAG_TPA; 12930 12931 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 12932 flags |= BNXT_FLAG_STRIP_VLAN; 12933 12934 if (features & NETIF_F_NTUPLE) 12935 flags |= BNXT_FLAG_RFS; 12936 else 12937 bnxt_clear_usr_fltrs(bp, true); 12938 12939 changes = flags ^ bp->flags; 12940 if (changes & BNXT_FLAG_TPA) { 12941 update_tpa = true; 12942 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 12943 (flags & BNXT_FLAG_TPA) == 0 || 12944 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 12945 re_init = true; 12946 } 12947 12948 if (changes & ~BNXT_FLAG_TPA) 12949 re_init = true; 12950 12951 if (changes & BNXT_FLAG_RFS) 12952 update_ntuple = true; 12953 12954 if (flags != bp->flags) { 12955 u32 old_flags = bp->flags; 12956 12957 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12958 bp->flags = flags; 12959 if (update_tpa) 12960 bnxt_set_ring_params(bp); 12961 return rc; 12962 } 12963 12964 if (update_ntuple) 12965 return bnxt_reinit_features(bp, true, false, flags, update_tpa); 12966 12967 if (re_init) 12968 return bnxt_reinit_features(bp, false, false, flags, update_tpa); 12969 12970 if (update_tpa) { 12971 bp->flags = flags; 12972 rc = bnxt_set_tpa(bp, 12973 (flags & BNXT_FLAG_TPA) ? 12974 true : false); 12975 if (rc) 12976 bp->flags = old_flags; 12977 } 12978 } 12979 return rc; 12980 } 12981 12982 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 12983 u8 **nextp) 12984 { 12985 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 12986 struct hop_jumbo_hdr *jhdr; 12987 int hdr_count = 0; 12988 u8 *nexthdr; 12989 int start; 12990 12991 /* Check that there are at most 2 IPv6 extension headers, no 12992 * fragment header, and each is <= 64 bytes. 12993 */ 12994 start = nw_off + sizeof(*ip6h); 12995 nexthdr = &ip6h->nexthdr; 12996 while (ipv6_ext_hdr(*nexthdr)) { 12997 struct ipv6_opt_hdr *hp; 12998 int hdrlen; 12999 13000 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 13001 *nexthdr == NEXTHDR_FRAGMENT) 13002 return false; 13003 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 13004 skb_headlen(skb), NULL); 13005 if (!hp) 13006 return false; 13007 if (*nexthdr == NEXTHDR_AUTH) 13008 hdrlen = ipv6_authlen(hp); 13009 else 13010 hdrlen = ipv6_optlen(hp); 13011 13012 if (hdrlen > 64) 13013 return false; 13014 13015 /* The ext header may be a hop-by-hop header inserted for 13016 * big TCP purposes. This will be removed before sending 13017 * from NIC, so do not count it. 13018 */ 13019 if (*nexthdr == NEXTHDR_HOP) { 13020 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 13021 goto increment_hdr; 13022 13023 jhdr = (struct hop_jumbo_hdr *)hp; 13024 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 13025 jhdr->nexthdr != IPPROTO_TCP) 13026 goto increment_hdr; 13027 13028 goto next_hdr; 13029 } 13030 increment_hdr: 13031 hdr_count++; 13032 next_hdr: 13033 nexthdr = &hp->nexthdr; 13034 start += hdrlen; 13035 } 13036 if (nextp) { 13037 /* Caller will check inner protocol */ 13038 if (skb->encapsulation) { 13039 *nextp = nexthdr; 13040 return true; 13041 } 13042 *nextp = NULL; 13043 } 13044 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 13045 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 13046 } 13047 13048 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 13049 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 13050 { 13051 struct udphdr *uh = udp_hdr(skb); 13052 __be16 udp_port = uh->dest; 13053 13054 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && 13055 udp_port != bp->vxlan_gpe_port) 13056 return false; 13057 if (skb->inner_protocol == htons(ETH_P_TEB)) { 13058 struct ethhdr *eh = inner_eth_hdr(skb); 13059 13060 switch (eh->h_proto) { 13061 case htons(ETH_P_IP): 13062 return true; 13063 case htons(ETH_P_IPV6): 13064 return bnxt_exthdr_check(bp, skb, 13065 skb_inner_network_offset(skb), 13066 NULL); 13067 } 13068 } else if (skb->inner_protocol == htons(ETH_P_IP)) { 13069 return true; 13070 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { 13071 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13072 NULL); 13073 } 13074 return false; 13075 } 13076 13077 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 13078 { 13079 switch (l4_proto) { 13080 case IPPROTO_UDP: 13081 return bnxt_udp_tunl_check(bp, skb); 13082 case IPPROTO_IPIP: 13083 return true; 13084 case IPPROTO_GRE: { 13085 switch (skb->inner_protocol) { 13086 default: 13087 return false; 13088 case htons(ETH_P_IP): 13089 return true; 13090 case htons(ETH_P_IPV6): 13091 fallthrough; 13092 } 13093 } 13094 case IPPROTO_IPV6: 13095 /* Check ext headers of inner ipv6 */ 13096 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13097 NULL); 13098 } 13099 return false; 13100 } 13101 13102 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 13103 struct net_device *dev, 13104 netdev_features_t features) 13105 { 13106 struct bnxt *bp = netdev_priv(dev); 13107 u8 *l4_proto; 13108 13109 features = vlan_features_check(skb, features); 13110 switch (vlan_get_protocol(skb)) { 13111 case htons(ETH_P_IP): 13112 if (!skb->encapsulation) 13113 return features; 13114 l4_proto = &ip_hdr(skb)->protocol; 13115 if (bnxt_tunl_check(bp, skb, *l4_proto)) 13116 return features; 13117 break; 13118 case htons(ETH_P_IPV6): 13119 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 13120 &l4_proto)) 13121 break; 13122 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 13123 return features; 13124 break; 13125 } 13126 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 13127 } 13128 13129 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 13130 u32 *reg_buf) 13131 { 13132 struct hwrm_dbg_read_direct_output *resp; 13133 struct hwrm_dbg_read_direct_input *req; 13134 __le32 *dbg_reg_buf; 13135 dma_addr_t mapping; 13136 int rc, i; 13137 13138 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 13139 if (rc) 13140 return rc; 13141 13142 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 13143 &mapping); 13144 if (!dbg_reg_buf) { 13145 rc = -ENOMEM; 13146 goto dbg_rd_reg_exit; 13147 } 13148 13149 req->host_dest_addr = cpu_to_le64(mapping); 13150 13151 resp = hwrm_req_hold(bp, req); 13152 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 13153 req->read_len32 = cpu_to_le32(num_words); 13154 13155 rc = hwrm_req_send(bp, req); 13156 if (rc || resp->error_code) { 13157 rc = -EIO; 13158 goto dbg_rd_reg_exit; 13159 } 13160 for (i = 0; i < num_words; i++) 13161 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 13162 13163 dbg_rd_reg_exit: 13164 hwrm_req_drop(bp, req); 13165 return rc; 13166 } 13167 13168 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 13169 u32 ring_id, u32 *prod, u32 *cons) 13170 { 13171 struct hwrm_dbg_ring_info_get_output *resp; 13172 struct hwrm_dbg_ring_info_get_input *req; 13173 int rc; 13174 13175 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 13176 if (rc) 13177 return rc; 13178 13179 req->ring_type = ring_type; 13180 req->fw_ring_id = cpu_to_le32(ring_id); 13181 resp = hwrm_req_hold(bp, req); 13182 rc = hwrm_req_send(bp, req); 13183 if (!rc) { 13184 *prod = le32_to_cpu(resp->producer_index); 13185 *cons = le32_to_cpu(resp->consumer_index); 13186 } 13187 hwrm_req_drop(bp, req); 13188 return rc; 13189 } 13190 13191 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 13192 { 13193 struct bnxt_tx_ring_info *txr; 13194 int i = bnapi->index, j; 13195 13196 bnxt_for_each_napi_tx(j, bnapi, txr) 13197 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 13198 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 13199 txr->tx_cons); 13200 } 13201 13202 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 13203 { 13204 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 13205 int i = bnapi->index; 13206 13207 if (!rxr) 13208 return; 13209 13210 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 13211 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 13212 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 13213 rxr->rx_sw_agg_prod); 13214 } 13215 13216 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 13217 { 13218 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13219 int i = bnapi->index; 13220 13221 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 13222 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 13223 } 13224 13225 static void bnxt_dbg_dump_states(struct bnxt *bp) 13226 { 13227 int i; 13228 struct bnxt_napi *bnapi; 13229 13230 for (i = 0; i < bp->cp_nr_rings; i++) { 13231 bnapi = bp->bnapi[i]; 13232 if (netif_msg_drv(bp)) { 13233 bnxt_dump_tx_sw_state(bnapi); 13234 bnxt_dump_rx_sw_state(bnapi); 13235 bnxt_dump_cp_sw_state(bnapi); 13236 } 13237 } 13238 } 13239 13240 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 13241 { 13242 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 13243 struct hwrm_ring_reset_input *req; 13244 struct bnxt_napi *bnapi = rxr->bnapi; 13245 struct bnxt_cp_ring_info *cpr; 13246 u16 cp_ring_id; 13247 int rc; 13248 13249 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 13250 if (rc) 13251 return rc; 13252 13253 cpr = &bnapi->cp_ring; 13254 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 13255 req->cmpl_ring = cpu_to_le16(cp_ring_id); 13256 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 13257 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 13258 return hwrm_req_send_silent(bp, req); 13259 } 13260 13261 static void bnxt_reset_task(struct bnxt *bp, bool silent) 13262 { 13263 if (!silent) 13264 bnxt_dbg_dump_states(bp); 13265 if (netif_running(bp->dev)) { 13266 bnxt_close_nic(bp, !silent, false); 13267 bnxt_open_nic(bp, !silent, false); 13268 } 13269 } 13270 13271 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 13272 { 13273 struct bnxt *bp = netdev_priv(dev); 13274 13275 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 13276 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 13277 } 13278 13279 static void bnxt_fw_health_check(struct bnxt *bp) 13280 { 13281 struct bnxt_fw_health *fw_health = bp->fw_health; 13282 struct pci_dev *pdev = bp->pdev; 13283 u32 val; 13284 13285 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 13286 return; 13287 13288 /* Make sure it is enabled before checking the tmr_counter. */ 13289 smp_rmb(); 13290 if (fw_health->tmr_counter) { 13291 fw_health->tmr_counter--; 13292 return; 13293 } 13294 13295 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 13296 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 13297 fw_health->arrests++; 13298 goto fw_reset; 13299 } 13300 13301 fw_health->last_fw_heartbeat = val; 13302 13303 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13304 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 13305 fw_health->discoveries++; 13306 goto fw_reset; 13307 } 13308 13309 fw_health->tmr_counter = fw_health->tmr_multiplier; 13310 return; 13311 13312 fw_reset: 13313 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 13314 } 13315 13316 static void bnxt_timer(struct timer_list *t) 13317 { 13318 struct bnxt *bp = from_timer(bp, t, timer); 13319 struct net_device *dev = bp->dev; 13320 13321 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 13322 return; 13323 13324 if (atomic_read(&bp->intr_sem) != 0) 13325 goto bnxt_restart_timer; 13326 13327 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 13328 bnxt_fw_health_check(bp); 13329 13330 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 13331 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 13332 13333 if (bnxt_tc_flower_enabled(bp)) 13334 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 13335 13336 #ifdef CONFIG_RFS_ACCEL 13337 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 13338 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 13339 #endif /*CONFIG_RFS_ACCEL*/ 13340 13341 if (bp->link_info.phy_retry) { 13342 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 13343 bp->link_info.phy_retry = false; 13344 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 13345 } else { 13346 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 13347 } 13348 } 13349 13350 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13351 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 13352 13353 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 13354 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 13355 13356 bnxt_restart_timer: 13357 mod_timer(&bp->timer, jiffies + bp->current_interval); 13358 } 13359 13360 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 13361 { 13362 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 13363 * set. If the device is being closed, bnxt_close() may be holding 13364 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 13365 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 13366 */ 13367 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13368 rtnl_lock(); 13369 } 13370 13371 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 13372 { 13373 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13374 rtnl_unlock(); 13375 } 13376 13377 /* Only called from bnxt_sp_task() */ 13378 static void bnxt_reset(struct bnxt *bp, bool silent) 13379 { 13380 bnxt_rtnl_lock_sp(bp); 13381 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 13382 bnxt_reset_task(bp, silent); 13383 bnxt_rtnl_unlock_sp(bp); 13384 } 13385 13386 /* Only called from bnxt_sp_task() */ 13387 static void bnxt_rx_ring_reset(struct bnxt *bp) 13388 { 13389 int i; 13390 13391 bnxt_rtnl_lock_sp(bp); 13392 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13393 bnxt_rtnl_unlock_sp(bp); 13394 return; 13395 } 13396 /* Disable and flush TPA before resetting the RX ring */ 13397 if (bp->flags & BNXT_FLAG_TPA) 13398 bnxt_set_tpa(bp, false); 13399 for (i = 0; i < bp->rx_nr_rings; i++) { 13400 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 13401 struct bnxt_cp_ring_info *cpr; 13402 int rc; 13403 13404 if (!rxr->bnapi->in_reset) 13405 continue; 13406 13407 rc = bnxt_hwrm_rx_ring_reset(bp, i); 13408 if (rc) { 13409 if (rc == -EINVAL || rc == -EOPNOTSUPP) 13410 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 13411 else 13412 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 13413 rc); 13414 bnxt_reset_task(bp, true); 13415 break; 13416 } 13417 bnxt_free_one_rx_ring_skbs(bp, i); 13418 rxr->rx_prod = 0; 13419 rxr->rx_agg_prod = 0; 13420 rxr->rx_sw_agg_prod = 0; 13421 rxr->rx_next_cons = 0; 13422 rxr->bnapi->in_reset = false; 13423 bnxt_alloc_one_rx_ring(bp, i); 13424 cpr = &rxr->bnapi->cp_ring; 13425 cpr->sw_stats->rx.rx_resets++; 13426 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13427 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 13428 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 13429 } 13430 if (bp->flags & BNXT_FLAG_TPA) 13431 bnxt_set_tpa(bp, true); 13432 bnxt_rtnl_unlock_sp(bp); 13433 } 13434 13435 static void bnxt_fw_fatal_close(struct bnxt *bp) 13436 { 13437 bnxt_tx_disable(bp); 13438 bnxt_disable_napi(bp); 13439 bnxt_disable_int_sync(bp); 13440 bnxt_free_irq(bp); 13441 bnxt_clear_int_mode(bp); 13442 pci_disable_device(bp->pdev); 13443 } 13444 13445 static void bnxt_fw_reset_close(struct bnxt *bp) 13446 { 13447 /* When firmware is in fatal state, quiesce device and disable 13448 * bus master to prevent any potential bad DMAs before freeing 13449 * kernel memory. 13450 */ 13451 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 13452 u16 val = 0; 13453 13454 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 13455 if (val == 0xffff) 13456 bp->fw_reset_min_dsecs = 0; 13457 bnxt_fw_fatal_close(bp); 13458 } 13459 __bnxt_close_nic(bp, true, false); 13460 bnxt_vf_reps_free(bp); 13461 bnxt_clear_int_mode(bp); 13462 bnxt_hwrm_func_drv_unrgtr(bp); 13463 if (pci_is_enabled(bp->pdev)) 13464 pci_disable_device(bp->pdev); 13465 bnxt_free_ctx_mem(bp); 13466 } 13467 13468 static bool is_bnxt_fw_ok(struct bnxt *bp) 13469 { 13470 struct bnxt_fw_health *fw_health = bp->fw_health; 13471 bool no_heartbeat = false, has_reset = false; 13472 u32 val; 13473 13474 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 13475 if (val == fw_health->last_fw_heartbeat) 13476 no_heartbeat = true; 13477 13478 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13479 if (val != fw_health->last_fw_reset_cnt) 13480 has_reset = true; 13481 13482 if (!no_heartbeat && has_reset) 13483 return true; 13484 13485 return false; 13486 } 13487 13488 /* rtnl_lock is acquired before calling this function */ 13489 static void bnxt_force_fw_reset(struct bnxt *bp) 13490 { 13491 struct bnxt_fw_health *fw_health = bp->fw_health; 13492 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 13493 u32 wait_dsecs; 13494 13495 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 13496 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 13497 return; 13498 13499 if (ptp) { 13500 unsigned long flags; 13501 13502 spin_lock_irqsave(&ptp->ptp_lock, flags); 13503 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13504 spin_unlock_irqrestore(&ptp->ptp_lock, flags); 13505 } else { 13506 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13507 } 13508 bnxt_fw_reset_close(bp); 13509 wait_dsecs = fw_health->master_func_wait_dsecs; 13510 if (fw_health->primary) { 13511 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 13512 wait_dsecs = 0; 13513 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 13514 } else { 13515 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 13516 wait_dsecs = fw_health->normal_func_wait_dsecs; 13517 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13518 } 13519 13520 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 13521 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 13522 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 13523 } 13524 13525 void bnxt_fw_exception(struct bnxt *bp) 13526 { 13527 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 13528 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 13529 bnxt_ulp_stop(bp); 13530 bnxt_rtnl_lock_sp(bp); 13531 bnxt_force_fw_reset(bp); 13532 bnxt_rtnl_unlock_sp(bp); 13533 } 13534 13535 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 13536 * < 0 on error. 13537 */ 13538 static int bnxt_get_registered_vfs(struct bnxt *bp) 13539 { 13540 #ifdef CONFIG_BNXT_SRIOV 13541 int rc; 13542 13543 if (!BNXT_PF(bp)) 13544 return 0; 13545 13546 rc = bnxt_hwrm_func_qcfg(bp); 13547 if (rc) { 13548 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 13549 return rc; 13550 } 13551 if (bp->pf.registered_vfs) 13552 return bp->pf.registered_vfs; 13553 if (bp->sriov_cfg) 13554 return 1; 13555 #endif 13556 return 0; 13557 } 13558 13559 void bnxt_fw_reset(struct bnxt *bp) 13560 { 13561 bnxt_ulp_stop(bp); 13562 bnxt_rtnl_lock_sp(bp); 13563 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 13564 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 13565 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 13566 int n = 0, tmo; 13567 13568 if (ptp) { 13569 unsigned long flags; 13570 13571 spin_lock_irqsave(&ptp->ptp_lock, flags); 13572 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13573 spin_unlock_irqrestore(&ptp->ptp_lock, flags); 13574 } else { 13575 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13576 } 13577 if (bp->pf.active_vfs && 13578 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 13579 n = bnxt_get_registered_vfs(bp); 13580 if (n < 0) { 13581 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 13582 n); 13583 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13584 dev_close(bp->dev); 13585 goto fw_reset_exit; 13586 } else if (n > 0) { 13587 u16 vf_tmo_dsecs = n * 10; 13588 13589 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 13590 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 13591 bp->fw_reset_state = 13592 BNXT_FW_RESET_STATE_POLL_VF; 13593 bnxt_queue_fw_reset_work(bp, HZ / 10); 13594 goto fw_reset_exit; 13595 } 13596 bnxt_fw_reset_close(bp); 13597 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 13598 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 13599 tmo = HZ / 10; 13600 } else { 13601 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13602 tmo = bp->fw_reset_min_dsecs * HZ / 10; 13603 } 13604 bnxt_queue_fw_reset_work(bp, tmo); 13605 } 13606 fw_reset_exit: 13607 bnxt_rtnl_unlock_sp(bp); 13608 } 13609 13610 static void bnxt_chk_missed_irq(struct bnxt *bp) 13611 { 13612 int i; 13613 13614 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 13615 return; 13616 13617 for (i = 0; i < bp->cp_nr_rings; i++) { 13618 struct bnxt_napi *bnapi = bp->bnapi[i]; 13619 struct bnxt_cp_ring_info *cpr; 13620 u32 fw_ring_id; 13621 int j; 13622 13623 if (!bnapi) 13624 continue; 13625 13626 cpr = &bnapi->cp_ring; 13627 for (j = 0; j < cpr->cp_ring_count; j++) { 13628 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 13629 u32 val[2]; 13630 13631 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 13632 continue; 13633 13634 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 13635 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 13636 continue; 13637 } 13638 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 13639 bnxt_dbg_hwrm_ring_info_get(bp, 13640 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 13641 fw_ring_id, &val[0], &val[1]); 13642 cpr->sw_stats->cmn.missed_irqs++; 13643 } 13644 } 13645 } 13646 13647 static void bnxt_cfg_ntp_filters(struct bnxt *); 13648 13649 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 13650 { 13651 struct bnxt_link_info *link_info = &bp->link_info; 13652 13653 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 13654 link_info->autoneg = BNXT_AUTONEG_SPEED; 13655 if (bp->hwrm_spec_code >= 0x10201) { 13656 if (link_info->auto_pause_setting & 13657 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 13658 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 13659 } else { 13660 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 13661 } 13662 bnxt_set_auto_speed(link_info); 13663 } else { 13664 bnxt_set_force_speed(link_info); 13665 link_info->req_duplex = link_info->duplex_setting; 13666 } 13667 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 13668 link_info->req_flow_ctrl = 13669 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 13670 else 13671 link_info->req_flow_ctrl = link_info->force_pause_setting; 13672 } 13673 13674 static void bnxt_fw_echo_reply(struct bnxt *bp) 13675 { 13676 struct bnxt_fw_health *fw_health = bp->fw_health; 13677 struct hwrm_func_echo_response_input *req; 13678 int rc; 13679 13680 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 13681 if (rc) 13682 return; 13683 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 13684 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 13685 hwrm_req_send(bp, req); 13686 } 13687 13688 static void bnxt_ulp_restart(struct bnxt *bp) 13689 { 13690 bnxt_ulp_stop(bp); 13691 bnxt_ulp_start(bp, 0); 13692 } 13693 13694 static void bnxt_sp_task(struct work_struct *work) 13695 { 13696 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 13697 13698 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13699 smp_mb__after_atomic(); 13700 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13701 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13702 return; 13703 } 13704 13705 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { 13706 bnxt_ulp_restart(bp); 13707 bnxt_reenable_sriov(bp); 13708 } 13709 13710 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 13711 bnxt_cfg_rx_mode(bp); 13712 13713 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 13714 bnxt_cfg_ntp_filters(bp); 13715 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 13716 bnxt_hwrm_exec_fwd_req(bp); 13717 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 13718 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 13719 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 13720 bnxt_hwrm_port_qstats(bp, 0); 13721 bnxt_hwrm_port_qstats_ext(bp, 0); 13722 bnxt_accumulate_all_stats(bp); 13723 } 13724 13725 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 13726 int rc; 13727 13728 mutex_lock(&bp->link_lock); 13729 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 13730 &bp->sp_event)) 13731 bnxt_hwrm_phy_qcaps(bp); 13732 13733 rc = bnxt_update_link(bp, true); 13734 if (rc) 13735 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 13736 rc); 13737 13738 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 13739 &bp->sp_event)) 13740 bnxt_init_ethtool_link_settings(bp); 13741 mutex_unlock(&bp->link_lock); 13742 } 13743 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 13744 int rc; 13745 13746 mutex_lock(&bp->link_lock); 13747 rc = bnxt_update_phy_setting(bp); 13748 mutex_unlock(&bp->link_lock); 13749 if (rc) { 13750 netdev_warn(bp->dev, "update phy settings retry failed\n"); 13751 } else { 13752 bp->link_info.phy_retry = false; 13753 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 13754 } 13755 } 13756 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 13757 mutex_lock(&bp->link_lock); 13758 bnxt_get_port_module_status(bp); 13759 mutex_unlock(&bp->link_lock); 13760 } 13761 13762 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 13763 bnxt_tc_flow_stats_work(bp); 13764 13765 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 13766 bnxt_chk_missed_irq(bp); 13767 13768 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 13769 bnxt_fw_echo_reply(bp); 13770 13771 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 13772 bnxt_hwmon_notify_event(bp); 13773 13774 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 13775 * must be the last functions to be called before exiting. 13776 */ 13777 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 13778 bnxt_reset(bp, false); 13779 13780 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 13781 bnxt_reset(bp, true); 13782 13783 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 13784 bnxt_rx_ring_reset(bp); 13785 13786 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 13787 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 13788 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 13789 bnxt_devlink_health_fw_report(bp); 13790 else 13791 bnxt_fw_reset(bp); 13792 } 13793 13794 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 13795 if (!is_bnxt_fw_ok(bp)) 13796 bnxt_devlink_health_fw_report(bp); 13797 } 13798 13799 smp_mb__before_atomic(); 13800 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13801 } 13802 13803 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 13804 int *max_cp); 13805 13806 /* Under rtnl_lock */ 13807 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 13808 int tx_xdp) 13809 { 13810 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 13811 struct bnxt_hw_rings hwr = {0}; 13812 int rx_rings = rx; 13813 int rc; 13814 13815 if (tcs) 13816 tx_sets = tcs; 13817 13818 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 13819 13820 if (max_rx < rx_rings) 13821 return -ENOMEM; 13822 13823 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13824 rx_rings <<= 1; 13825 13826 hwr.rx = rx_rings; 13827 hwr.tx = tx * tx_sets + tx_xdp; 13828 if (max_tx < hwr.tx) 13829 return -ENOMEM; 13830 13831 hwr.vnic = bnxt_get_total_vnics(bp, rx); 13832 13833 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); 13834 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 13835 if (max_cp < hwr.cp) 13836 return -ENOMEM; 13837 hwr.stat = hwr.cp; 13838 if (BNXT_NEW_RM(bp)) { 13839 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); 13840 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); 13841 hwr.grp = rx; 13842 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 13843 } 13844 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 13845 hwr.cp_p5 = hwr.tx + rx; 13846 rc = bnxt_hwrm_check_rings(bp, &hwr); 13847 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) { 13848 if (!bnxt_ulp_registered(bp->edev)) { 13849 hwr.cp += bnxt_get_ulp_msix_num(bp); 13850 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp)); 13851 } 13852 if (hwr.cp > bp->total_irqs) { 13853 int total_msix = bnxt_change_msix(bp, hwr.cp); 13854 13855 if (total_msix < hwr.cp) { 13856 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n", 13857 hwr.cp, total_msix); 13858 rc = -ENOSPC; 13859 } 13860 } 13861 } 13862 return rc; 13863 } 13864 13865 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 13866 { 13867 if (bp->bar2) { 13868 pci_iounmap(pdev, bp->bar2); 13869 bp->bar2 = NULL; 13870 } 13871 13872 if (bp->bar1) { 13873 pci_iounmap(pdev, bp->bar1); 13874 bp->bar1 = NULL; 13875 } 13876 13877 if (bp->bar0) { 13878 pci_iounmap(pdev, bp->bar0); 13879 bp->bar0 = NULL; 13880 } 13881 } 13882 13883 static void bnxt_cleanup_pci(struct bnxt *bp) 13884 { 13885 bnxt_unmap_bars(bp, bp->pdev); 13886 pci_release_regions(bp->pdev); 13887 if (pci_is_enabled(bp->pdev)) 13888 pci_disable_device(bp->pdev); 13889 } 13890 13891 static void bnxt_init_dflt_coal(struct bnxt *bp) 13892 { 13893 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 13894 struct bnxt_coal *coal; 13895 u16 flags = 0; 13896 13897 if (coal_cap->cmpl_params & 13898 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 13899 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 13900 13901 /* Tick values in micro seconds. 13902 * 1 coal_buf x bufs_per_record = 1 completion record. 13903 */ 13904 coal = &bp->rx_coal; 13905 coal->coal_ticks = 10; 13906 coal->coal_bufs = 30; 13907 coal->coal_ticks_irq = 1; 13908 coal->coal_bufs_irq = 2; 13909 coal->idle_thresh = 50; 13910 coal->bufs_per_record = 2; 13911 coal->budget = 64; /* NAPI budget */ 13912 coal->flags = flags; 13913 13914 coal = &bp->tx_coal; 13915 coal->coal_ticks = 28; 13916 coal->coal_bufs = 30; 13917 coal->coal_ticks_irq = 2; 13918 coal->coal_bufs_irq = 2; 13919 coal->bufs_per_record = 1; 13920 coal->flags = flags; 13921 13922 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 13923 } 13924 13925 /* FW that pre-reserves 1 VNIC per function */ 13926 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 13927 { 13928 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 13929 13930 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13931 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 13932 return true; 13933 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13934 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 13935 return true; 13936 return false; 13937 } 13938 13939 static int bnxt_fw_init_one_p1(struct bnxt *bp) 13940 { 13941 int rc; 13942 13943 bp->fw_cap = 0; 13944 rc = bnxt_hwrm_ver_get(bp); 13945 /* FW may be unresponsive after FLR. FLR must complete within 100 msec 13946 * so wait before continuing with recovery. 13947 */ 13948 if (rc) 13949 msleep(100); 13950 bnxt_try_map_fw_health_reg(bp); 13951 if (rc) { 13952 rc = bnxt_try_recover_fw(bp); 13953 if (rc) 13954 return rc; 13955 rc = bnxt_hwrm_ver_get(bp); 13956 if (rc) 13957 return rc; 13958 } 13959 13960 bnxt_nvm_cfg_ver_get(bp); 13961 13962 rc = bnxt_hwrm_func_reset(bp); 13963 if (rc) 13964 return -ENODEV; 13965 13966 bnxt_hwrm_fw_set_time(bp); 13967 return 0; 13968 } 13969 13970 static int bnxt_fw_init_one_p2(struct bnxt *bp) 13971 { 13972 int rc; 13973 13974 /* Get the MAX capabilities for this function */ 13975 rc = bnxt_hwrm_func_qcaps(bp); 13976 if (rc) { 13977 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 13978 rc); 13979 return -ENODEV; 13980 } 13981 13982 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 13983 if (rc) 13984 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 13985 rc); 13986 13987 if (bnxt_alloc_fw_health(bp)) { 13988 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 13989 } else { 13990 rc = bnxt_hwrm_error_recovery_qcfg(bp); 13991 if (rc) 13992 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 13993 rc); 13994 } 13995 13996 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 13997 if (rc) 13998 return -ENODEV; 13999 14000 rc = bnxt_alloc_crash_dump_mem(bp); 14001 if (rc) 14002 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n", 14003 rc); 14004 if (!rc) { 14005 rc = bnxt_hwrm_crash_dump_mem_cfg(bp); 14006 if (rc) { 14007 bnxt_free_crash_dump_mem(bp); 14008 netdev_warn(bp->dev, 14009 "hwrm crash dump mem failure rc: %d\n", rc); 14010 } 14011 } 14012 14013 if (bnxt_fw_pre_resv_vnics(bp)) 14014 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 14015 14016 bnxt_hwrm_func_qcfg(bp); 14017 bnxt_hwrm_vnic_qcaps(bp); 14018 bnxt_hwrm_port_led_qcaps(bp); 14019 bnxt_ethtool_init(bp); 14020 if (bp->fw_cap & BNXT_FW_CAP_PTP) 14021 __bnxt_hwrm_ptp_qcfg(bp); 14022 bnxt_dcb_init(bp); 14023 bnxt_hwmon_init(bp); 14024 return 0; 14025 } 14026 14027 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 14028 { 14029 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; 14030 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 14031 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 14032 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 14033 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 14034 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 14035 bp->rss_hash_delta = bp->rss_hash_cfg; 14036 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 14037 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; 14038 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 14039 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 14040 } 14041 } 14042 14043 static void bnxt_set_dflt_rfs(struct bnxt *bp) 14044 { 14045 struct net_device *dev = bp->dev; 14046 14047 dev->hw_features &= ~NETIF_F_NTUPLE; 14048 dev->features &= ~NETIF_F_NTUPLE; 14049 bp->flags &= ~BNXT_FLAG_RFS; 14050 if (bnxt_rfs_supported(bp)) { 14051 dev->hw_features |= NETIF_F_NTUPLE; 14052 if (bnxt_rfs_capable(bp, false)) { 14053 bp->flags |= BNXT_FLAG_RFS; 14054 dev->features |= NETIF_F_NTUPLE; 14055 } 14056 } 14057 } 14058 14059 static void bnxt_fw_init_one_p3(struct bnxt *bp) 14060 { 14061 struct pci_dev *pdev = bp->pdev; 14062 14063 bnxt_set_dflt_rss_hash_type(bp); 14064 bnxt_set_dflt_rfs(bp); 14065 14066 bnxt_get_wol_settings(bp); 14067 if (bp->flags & BNXT_FLAG_WOL_CAP) 14068 device_set_wakeup_enable(&pdev->dev, bp->wol); 14069 else 14070 device_set_wakeup_capable(&pdev->dev, false); 14071 14072 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 14073 bnxt_hwrm_coal_params_qcaps(bp); 14074 } 14075 14076 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 14077 14078 int bnxt_fw_init_one(struct bnxt *bp) 14079 { 14080 int rc; 14081 14082 rc = bnxt_fw_init_one_p1(bp); 14083 if (rc) { 14084 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 14085 return rc; 14086 } 14087 rc = bnxt_fw_init_one_p2(bp); 14088 if (rc) { 14089 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 14090 return rc; 14091 } 14092 rc = bnxt_probe_phy(bp, false); 14093 if (rc) 14094 return rc; 14095 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 14096 if (rc) 14097 return rc; 14098 14099 bnxt_fw_init_one_p3(bp); 14100 return 0; 14101 } 14102 14103 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 14104 { 14105 struct bnxt_fw_health *fw_health = bp->fw_health; 14106 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 14107 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 14108 u32 reg_type, reg_off, delay_msecs; 14109 14110 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 14111 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 14112 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 14113 switch (reg_type) { 14114 case BNXT_FW_HEALTH_REG_TYPE_CFG: 14115 pci_write_config_dword(bp->pdev, reg_off, val); 14116 break; 14117 case BNXT_FW_HEALTH_REG_TYPE_GRC: 14118 writel(reg_off & BNXT_GRC_BASE_MASK, 14119 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 14120 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 14121 fallthrough; 14122 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 14123 writel(val, bp->bar0 + reg_off); 14124 break; 14125 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 14126 writel(val, bp->bar1 + reg_off); 14127 break; 14128 } 14129 if (delay_msecs) { 14130 pci_read_config_dword(bp->pdev, 0, &val); 14131 msleep(delay_msecs); 14132 } 14133 } 14134 14135 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 14136 { 14137 struct hwrm_func_qcfg_output *resp; 14138 struct hwrm_func_qcfg_input *req; 14139 bool result = true; /* firmware will enforce if unknown */ 14140 14141 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 14142 return result; 14143 14144 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 14145 return result; 14146 14147 req->fid = cpu_to_le16(0xffff); 14148 resp = hwrm_req_hold(bp, req); 14149 if (!hwrm_req_send(bp, req)) 14150 result = !!(le16_to_cpu(resp->flags) & 14151 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 14152 hwrm_req_drop(bp, req); 14153 return result; 14154 } 14155 14156 static void bnxt_reset_all(struct bnxt *bp) 14157 { 14158 struct bnxt_fw_health *fw_health = bp->fw_health; 14159 int i, rc; 14160 14161 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14162 bnxt_fw_reset_via_optee(bp); 14163 bp->fw_reset_timestamp = jiffies; 14164 return; 14165 } 14166 14167 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 14168 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 14169 bnxt_fw_reset_writel(bp, i); 14170 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 14171 struct hwrm_fw_reset_input *req; 14172 14173 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 14174 if (!rc) { 14175 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 14176 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 14177 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 14178 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 14179 rc = hwrm_req_send(bp, req); 14180 } 14181 if (rc != -ENODEV) 14182 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 14183 } 14184 bp->fw_reset_timestamp = jiffies; 14185 } 14186 14187 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 14188 { 14189 return time_after(jiffies, bp->fw_reset_timestamp + 14190 (bp->fw_reset_max_dsecs * HZ / 10)); 14191 } 14192 14193 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 14194 { 14195 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14196 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 14197 bnxt_dl_health_fw_status_update(bp, false); 14198 bp->fw_reset_state = 0; 14199 dev_close(bp->dev); 14200 } 14201 14202 static void bnxt_fw_reset_task(struct work_struct *work) 14203 { 14204 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 14205 int rc = 0; 14206 14207 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 14208 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 14209 return; 14210 } 14211 14212 switch (bp->fw_reset_state) { 14213 case BNXT_FW_RESET_STATE_POLL_VF: { 14214 int n = bnxt_get_registered_vfs(bp); 14215 int tmo; 14216 14217 if (n < 0) { 14218 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 14219 n, jiffies_to_msecs(jiffies - 14220 bp->fw_reset_timestamp)); 14221 goto fw_reset_abort; 14222 } else if (n > 0) { 14223 if (bnxt_fw_reset_timeout(bp)) { 14224 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14225 bp->fw_reset_state = 0; 14226 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 14227 n); 14228 goto ulp_start; 14229 } 14230 bnxt_queue_fw_reset_work(bp, HZ / 10); 14231 return; 14232 } 14233 bp->fw_reset_timestamp = jiffies; 14234 rtnl_lock(); 14235 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 14236 bnxt_fw_reset_abort(bp, rc); 14237 rtnl_unlock(); 14238 goto ulp_start; 14239 } 14240 bnxt_fw_reset_close(bp); 14241 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14242 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 14243 tmo = HZ / 10; 14244 } else { 14245 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14246 tmo = bp->fw_reset_min_dsecs * HZ / 10; 14247 } 14248 rtnl_unlock(); 14249 bnxt_queue_fw_reset_work(bp, tmo); 14250 return; 14251 } 14252 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 14253 u32 val; 14254 14255 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 14256 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 14257 !bnxt_fw_reset_timeout(bp)) { 14258 bnxt_queue_fw_reset_work(bp, HZ / 5); 14259 return; 14260 } 14261 14262 if (!bp->fw_health->primary) { 14263 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 14264 14265 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14266 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 14267 return; 14268 } 14269 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 14270 } 14271 fallthrough; 14272 case BNXT_FW_RESET_STATE_RESET_FW: 14273 bnxt_reset_all(bp); 14274 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14275 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 14276 return; 14277 case BNXT_FW_RESET_STATE_ENABLE_DEV: 14278 bnxt_inv_fw_health_reg(bp); 14279 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 14280 !bp->fw_reset_min_dsecs) { 14281 u16 val; 14282 14283 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 14284 if (val == 0xffff) { 14285 if (bnxt_fw_reset_timeout(bp)) { 14286 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 14287 rc = -ETIMEDOUT; 14288 goto fw_reset_abort; 14289 } 14290 bnxt_queue_fw_reset_work(bp, HZ / 1000); 14291 return; 14292 } 14293 } 14294 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 14295 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 14296 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 14297 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 14298 bnxt_dl_remote_reload(bp); 14299 if (pci_enable_device(bp->pdev)) { 14300 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 14301 rc = -ENODEV; 14302 goto fw_reset_abort; 14303 } 14304 pci_set_master(bp->pdev); 14305 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 14306 fallthrough; 14307 case BNXT_FW_RESET_STATE_POLL_FW: 14308 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 14309 rc = bnxt_hwrm_poll(bp); 14310 if (rc) { 14311 if (bnxt_fw_reset_timeout(bp)) { 14312 netdev_err(bp->dev, "Firmware reset aborted\n"); 14313 goto fw_reset_abort_status; 14314 } 14315 bnxt_queue_fw_reset_work(bp, HZ / 5); 14316 return; 14317 } 14318 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 14319 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 14320 fallthrough; 14321 case BNXT_FW_RESET_STATE_OPENING: 14322 while (!rtnl_trylock()) { 14323 bnxt_queue_fw_reset_work(bp, HZ / 10); 14324 return; 14325 } 14326 rc = bnxt_open(bp->dev); 14327 if (rc) { 14328 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 14329 bnxt_fw_reset_abort(bp, rc); 14330 rtnl_unlock(); 14331 goto ulp_start; 14332 } 14333 14334 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 14335 bp->fw_health->enabled) { 14336 bp->fw_health->last_fw_reset_cnt = 14337 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14338 } 14339 bp->fw_reset_state = 0; 14340 /* Make sure fw_reset_state is 0 before clearing the flag */ 14341 smp_mb__before_atomic(); 14342 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14343 bnxt_ptp_reapply_pps(bp); 14344 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 14345 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 14346 bnxt_dl_health_fw_recovery_done(bp); 14347 bnxt_dl_health_fw_status_update(bp, true); 14348 } 14349 rtnl_unlock(); 14350 bnxt_ulp_start(bp, 0); 14351 bnxt_reenable_sriov(bp); 14352 rtnl_lock(); 14353 bnxt_vf_reps_alloc(bp); 14354 bnxt_vf_reps_open(bp); 14355 rtnl_unlock(); 14356 break; 14357 } 14358 return; 14359 14360 fw_reset_abort_status: 14361 if (bp->fw_health->status_reliable || 14362 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 14363 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 14364 14365 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 14366 } 14367 fw_reset_abort: 14368 rtnl_lock(); 14369 bnxt_fw_reset_abort(bp, rc); 14370 rtnl_unlock(); 14371 ulp_start: 14372 bnxt_ulp_start(bp, rc); 14373 } 14374 14375 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 14376 { 14377 int rc; 14378 struct bnxt *bp = netdev_priv(dev); 14379 14380 SET_NETDEV_DEV(dev, &pdev->dev); 14381 14382 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 14383 rc = pci_enable_device(pdev); 14384 if (rc) { 14385 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 14386 goto init_err; 14387 } 14388 14389 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 14390 dev_err(&pdev->dev, 14391 "Cannot find PCI device base address, aborting\n"); 14392 rc = -ENODEV; 14393 goto init_err_disable; 14394 } 14395 14396 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 14397 if (rc) { 14398 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 14399 goto init_err_disable; 14400 } 14401 14402 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 14403 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 14404 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 14405 rc = -EIO; 14406 goto init_err_release; 14407 } 14408 14409 pci_set_master(pdev); 14410 14411 bp->dev = dev; 14412 bp->pdev = pdev; 14413 14414 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 14415 * determines the BAR size. 14416 */ 14417 bp->bar0 = pci_ioremap_bar(pdev, 0); 14418 if (!bp->bar0) { 14419 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 14420 rc = -ENOMEM; 14421 goto init_err_release; 14422 } 14423 14424 bp->bar2 = pci_ioremap_bar(pdev, 4); 14425 if (!bp->bar2) { 14426 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 14427 rc = -ENOMEM; 14428 goto init_err_release; 14429 } 14430 14431 INIT_WORK(&bp->sp_task, bnxt_sp_task); 14432 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 14433 14434 spin_lock_init(&bp->ntp_fltr_lock); 14435 #if BITS_PER_LONG == 32 14436 spin_lock_init(&bp->db_lock); 14437 #endif 14438 14439 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 14440 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 14441 14442 timer_setup(&bp->timer, bnxt_timer, 0); 14443 bp->current_interval = BNXT_TIMER_INTERVAL; 14444 14445 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 14446 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 14447 14448 clear_bit(BNXT_STATE_OPEN, &bp->state); 14449 return 0; 14450 14451 init_err_release: 14452 bnxt_unmap_bars(bp, pdev); 14453 pci_release_regions(pdev); 14454 14455 init_err_disable: 14456 pci_disable_device(pdev); 14457 14458 init_err: 14459 return rc; 14460 } 14461 14462 /* rtnl_lock held */ 14463 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 14464 { 14465 struct sockaddr *addr = p; 14466 struct bnxt *bp = netdev_priv(dev); 14467 int rc = 0; 14468 14469 if (!is_valid_ether_addr(addr->sa_data)) 14470 return -EADDRNOTAVAIL; 14471 14472 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 14473 return 0; 14474 14475 rc = bnxt_approve_mac(bp, addr->sa_data, true); 14476 if (rc) 14477 return rc; 14478 14479 eth_hw_addr_set(dev, addr->sa_data); 14480 bnxt_clear_usr_fltrs(bp, true); 14481 if (netif_running(dev)) { 14482 bnxt_close_nic(bp, false, false); 14483 rc = bnxt_open_nic(bp, false, false); 14484 } 14485 14486 return rc; 14487 } 14488 14489 /* rtnl_lock held */ 14490 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 14491 { 14492 struct bnxt *bp = netdev_priv(dev); 14493 14494 if (netif_running(dev)) 14495 bnxt_close_nic(bp, true, false); 14496 14497 WRITE_ONCE(dev->mtu, new_mtu); 14498 bnxt_set_ring_params(bp); 14499 14500 if (netif_running(dev)) 14501 return bnxt_open_nic(bp, true, false); 14502 14503 return 0; 14504 } 14505 14506 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 14507 { 14508 struct bnxt *bp = netdev_priv(dev); 14509 bool sh = false; 14510 int rc, tx_cp; 14511 14512 if (tc > bp->max_tc) { 14513 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 14514 tc, bp->max_tc); 14515 return -EINVAL; 14516 } 14517 14518 if (bp->num_tc == tc) 14519 return 0; 14520 14521 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 14522 sh = true; 14523 14524 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 14525 sh, tc, bp->tx_nr_rings_xdp); 14526 if (rc) 14527 return rc; 14528 14529 /* Needs to close the device and do hw resource re-allocations */ 14530 if (netif_running(bp->dev)) 14531 bnxt_close_nic(bp, true, false); 14532 14533 if (tc) { 14534 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 14535 netdev_set_num_tc(dev, tc); 14536 bp->num_tc = tc; 14537 } else { 14538 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 14539 netdev_reset_tc(dev); 14540 bp->num_tc = 0; 14541 } 14542 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 14543 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 14544 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 14545 tx_cp + bp->rx_nr_rings; 14546 14547 if (netif_running(bp->dev)) 14548 return bnxt_open_nic(bp, true, false); 14549 14550 return 0; 14551 } 14552 14553 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 14554 void *cb_priv) 14555 { 14556 struct bnxt *bp = cb_priv; 14557 14558 if (!bnxt_tc_flower_enabled(bp) || 14559 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 14560 return -EOPNOTSUPP; 14561 14562 switch (type) { 14563 case TC_SETUP_CLSFLOWER: 14564 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 14565 default: 14566 return -EOPNOTSUPP; 14567 } 14568 } 14569 14570 LIST_HEAD(bnxt_block_cb_list); 14571 14572 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 14573 void *type_data) 14574 { 14575 struct bnxt *bp = netdev_priv(dev); 14576 14577 switch (type) { 14578 case TC_SETUP_BLOCK: 14579 return flow_block_cb_setup_simple(type_data, 14580 &bnxt_block_cb_list, 14581 bnxt_setup_tc_block_cb, 14582 bp, bp, true); 14583 case TC_SETUP_QDISC_MQPRIO: { 14584 struct tc_mqprio_qopt *mqprio = type_data; 14585 14586 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 14587 14588 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 14589 } 14590 default: 14591 return -EOPNOTSUPP; 14592 } 14593 } 14594 14595 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, 14596 const struct sk_buff *skb) 14597 { 14598 struct bnxt_vnic_info *vnic; 14599 14600 if (skb) 14601 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 14602 14603 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 14604 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); 14605 } 14606 14607 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, 14608 u32 idx) 14609 { 14610 struct hlist_head *head; 14611 int bit_id; 14612 14613 spin_lock_bh(&bp->ntp_fltr_lock); 14614 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); 14615 if (bit_id < 0) { 14616 spin_unlock_bh(&bp->ntp_fltr_lock); 14617 return -ENOMEM; 14618 } 14619 14620 fltr->base.sw_id = (u16)bit_id; 14621 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; 14622 fltr->base.flags |= BNXT_ACT_RING_DST; 14623 head = &bp->ntp_fltr_hash_tbl[idx]; 14624 hlist_add_head_rcu(&fltr->base.hash, head); 14625 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 14626 bnxt_insert_usr_fltr(bp, &fltr->base); 14627 bp->ntp_fltr_count++; 14628 spin_unlock_bh(&bp->ntp_fltr_lock); 14629 return 0; 14630 } 14631 14632 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 14633 struct bnxt_ntuple_filter *f2) 14634 { 14635 struct bnxt_flow_masks *masks1 = &f1->fmasks; 14636 struct bnxt_flow_masks *masks2 = &f2->fmasks; 14637 struct flow_keys *keys1 = &f1->fkeys; 14638 struct flow_keys *keys2 = &f2->fkeys; 14639 14640 if (keys1->basic.n_proto != keys2->basic.n_proto || 14641 keys1->basic.ip_proto != keys2->basic.ip_proto) 14642 return false; 14643 14644 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 14645 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 14646 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || 14647 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || 14648 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) 14649 return false; 14650 } else { 14651 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src, 14652 &keys2->addrs.v6addrs.src) || 14653 !ipv6_addr_equal(&masks1->addrs.v6addrs.src, 14654 &masks2->addrs.v6addrs.src) || 14655 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst, 14656 &keys2->addrs.v6addrs.dst) || 14657 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst, 14658 &masks2->addrs.v6addrs.dst)) 14659 return false; 14660 } 14661 14662 return keys1->ports.src == keys2->ports.src && 14663 masks1->ports.src == masks2->ports.src && 14664 keys1->ports.dst == keys2->ports.dst && 14665 masks1->ports.dst == masks2->ports.dst && 14666 keys1->control.flags == keys2->control.flags && 14667 f1->l2_fltr == f2->l2_fltr; 14668 } 14669 14670 struct bnxt_ntuple_filter * 14671 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, 14672 struct bnxt_ntuple_filter *fltr, u32 idx) 14673 { 14674 struct bnxt_ntuple_filter *f; 14675 struct hlist_head *head; 14676 14677 head = &bp->ntp_fltr_hash_tbl[idx]; 14678 hlist_for_each_entry_rcu(f, head, base.hash) { 14679 if (bnxt_fltr_match(f, fltr)) 14680 return f; 14681 } 14682 return NULL; 14683 } 14684 14685 #ifdef CONFIG_RFS_ACCEL 14686 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 14687 u16 rxq_index, u32 flow_id) 14688 { 14689 struct bnxt *bp = netdev_priv(dev); 14690 struct bnxt_ntuple_filter *fltr, *new_fltr; 14691 struct flow_keys *fkeys; 14692 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 14693 struct bnxt_l2_filter *l2_fltr; 14694 int rc = 0, idx; 14695 u32 flags; 14696 14697 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { 14698 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 14699 atomic_inc(&l2_fltr->refcnt); 14700 } else { 14701 struct bnxt_l2_key key; 14702 14703 ether_addr_copy(key.dst_mac_addr, eth->h_dest); 14704 key.vlan = 0; 14705 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); 14706 if (!l2_fltr) 14707 return -EINVAL; 14708 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { 14709 bnxt_del_l2_filter(bp, l2_fltr); 14710 return -EINVAL; 14711 } 14712 } 14713 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 14714 if (!new_fltr) { 14715 bnxt_del_l2_filter(bp, l2_fltr); 14716 return -ENOMEM; 14717 } 14718 14719 fkeys = &new_fltr->fkeys; 14720 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 14721 rc = -EPROTONOSUPPORT; 14722 goto err_free; 14723 } 14724 14725 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 14726 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 14727 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 14728 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 14729 rc = -EPROTONOSUPPORT; 14730 goto err_free; 14731 } 14732 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; 14733 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 14734 if (bp->hwrm_spec_code < 0x10601) { 14735 rc = -EPROTONOSUPPORT; 14736 goto err_free; 14737 } 14738 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; 14739 } 14740 flags = fkeys->control.flags; 14741 if (((flags & FLOW_DIS_ENCAPSULATION) && 14742 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 14743 rc = -EPROTONOSUPPORT; 14744 goto err_free; 14745 } 14746 new_fltr->l2_fltr = l2_fltr; 14747 14748 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); 14749 rcu_read_lock(); 14750 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 14751 if (fltr) { 14752 rc = fltr->base.sw_id; 14753 rcu_read_unlock(); 14754 goto err_free; 14755 } 14756 rcu_read_unlock(); 14757 14758 new_fltr->flow_id = flow_id; 14759 new_fltr->base.rxq = rxq_index; 14760 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 14761 if (!rc) { 14762 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 14763 return new_fltr->base.sw_id; 14764 } 14765 14766 err_free: 14767 bnxt_del_l2_filter(bp, l2_fltr); 14768 kfree(new_fltr); 14769 return rc; 14770 } 14771 #endif 14772 14773 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) 14774 { 14775 spin_lock_bh(&bp->ntp_fltr_lock); 14776 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 14777 spin_unlock_bh(&bp->ntp_fltr_lock); 14778 return; 14779 } 14780 hlist_del_rcu(&fltr->base.hash); 14781 bnxt_del_one_usr_fltr(bp, &fltr->base); 14782 bp->ntp_fltr_count--; 14783 spin_unlock_bh(&bp->ntp_fltr_lock); 14784 bnxt_del_l2_filter(bp, fltr->l2_fltr); 14785 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 14786 kfree_rcu(fltr, base.rcu); 14787 } 14788 14789 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 14790 { 14791 #ifdef CONFIG_RFS_ACCEL 14792 int i; 14793 14794 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 14795 struct hlist_head *head; 14796 struct hlist_node *tmp; 14797 struct bnxt_ntuple_filter *fltr; 14798 int rc; 14799 14800 head = &bp->ntp_fltr_hash_tbl[i]; 14801 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 14802 bool del = false; 14803 14804 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { 14805 if (fltr->base.flags & BNXT_ACT_NO_AGING) 14806 continue; 14807 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, 14808 fltr->flow_id, 14809 fltr->base.sw_id)) { 14810 bnxt_hwrm_cfa_ntuple_filter_free(bp, 14811 fltr); 14812 del = true; 14813 } 14814 } else { 14815 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 14816 fltr); 14817 if (rc) 14818 del = true; 14819 else 14820 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 14821 } 14822 14823 if (del) 14824 bnxt_del_ntp_filter(bp, fltr); 14825 } 14826 } 14827 #endif 14828 } 14829 14830 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 14831 unsigned int entry, struct udp_tunnel_info *ti) 14832 { 14833 struct bnxt *bp = netdev_priv(netdev); 14834 unsigned int cmd; 14835 14836 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 14837 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 14838 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 14839 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; 14840 else 14841 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; 14842 14843 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 14844 } 14845 14846 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 14847 unsigned int entry, struct udp_tunnel_info *ti) 14848 { 14849 struct bnxt *bp = netdev_priv(netdev); 14850 unsigned int cmd; 14851 14852 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 14853 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 14854 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 14855 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 14856 else 14857 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; 14858 14859 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 14860 } 14861 14862 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 14863 .set_port = bnxt_udp_tunnel_set_port, 14864 .unset_port = bnxt_udp_tunnel_unset_port, 14865 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 14866 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 14867 .tables = { 14868 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 14869 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 14870 }, 14871 }, bnxt_udp_tunnels_p7 = { 14872 .set_port = bnxt_udp_tunnel_set_port, 14873 .unset_port = bnxt_udp_tunnel_unset_port, 14874 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 14875 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 14876 .tables = { 14877 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 14878 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 14879 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, 14880 }, 14881 }; 14882 14883 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 14884 struct net_device *dev, u32 filter_mask, 14885 int nlflags) 14886 { 14887 struct bnxt *bp = netdev_priv(dev); 14888 14889 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 14890 nlflags, filter_mask, NULL); 14891 } 14892 14893 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 14894 u16 flags, struct netlink_ext_ack *extack) 14895 { 14896 struct bnxt *bp = netdev_priv(dev); 14897 struct nlattr *attr, *br_spec; 14898 int rem, rc = 0; 14899 14900 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 14901 return -EOPNOTSUPP; 14902 14903 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 14904 if (!br_spec) 14905 return -EINVAL; 14906 14907 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { 14908 u16 mode; 14909 14910 mode = nla_get_u16(attr); 14911 if (mode == bp->br_mode) 14912 break; 14913 14914 rc = bnxt_hwrm_set_br_mode(bp, mode); 14915 if (!rc) 14916 bp->br_mode = mode; 14917 break; 14918 } 14919 return rc; 14920 } 14921 14922 int bnxt_get_port_parent_id(struct net_device *dev, 14923 struct netdev_phys_item_id *ppid) 14924 { 14925 struct bnxt *bp = netdev_priv(dev); 14926 14927 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 14928 return -EOPNOTSUPP; 14929 14930 /* The PF and it's VF-reps only support the switchdev framework */ 14931 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 14932 return -EOPNOTSUPP; 14933 14934 ppid->id_len = sizeof(bp->dsn); 14935 memcpy(ppid->id, bp->dsn, ppid->id_len); 14936 14937 return 0; 14938 } 14939 14940 static const struct net_device_ops bnxt_netdev_ops = { 14941 .ndo_open = bnxt_open, 14942 .ndo_start_xmit = bnxt_start_xmit, 14943 .ndo_stop = bnxt_close, 14944 .ndo_get_stats64 = bnxt_get_stats64, 14945 .ndo_set_rx_mode = bnxt_set_rx_mode, 14946 .ndo_eth_ioctl = bnxt_ioctl, 14947 .ndo_validate_addr = eth_validate_addr, 14948 .ndo_set_mac_address = bnxt_change_mac_addr, 14949 .ndo_change_mtu = bnxt_change_mtu, 14950 .ndo_fix_features = bnxt_fix_features, 14951 .ndo_set_features = bnxt_set_features, 14952 .ndo_features_check = bnxt_features_check, 14953 .ndo_tx_timeout = bnxt_tx_timeout, 14954 #ifdef CONFIG_BNXT_SRIOV 14955 .ndo_get_vf_config = bnxt_get_vf_config, 14956 .ndo_set_vf_mac = bnxt_set_vf_mac, 14957 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 14958 .ndo_set_vf_rate = bnxt_set_vf_bw, 14959 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 14960 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 14961 .ndo_set_vf_trust = bnxt_set_vf_trust, 14962 #endif 14963 .ndo_setup_tc = bnxt_setup_tc, 14964 #ifdef CONFIG_RFS_ACCEL 14965 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 14966 #endif 14967 .ndo_bpf = bnxt_xdp, 14968 .ndo_xdp_xmit = bnxt_xdp_xmit, 14969 .ndo_bridge_getlink = bnxt_bridge_getlink, 14970 .ndo_bridge_setlink = bnxt_bridge_setlink, 14971 }; 14972 14973 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, 14974 struct netdev_queue_stats_rx *stats) 14975 { 14976 struct bnxt *bp = netdev_priv(dev); 14977 struct bnxt_cp_ring_info *cpr; 14978 u64 *sw; 14979 14980 cpr = &bp->bnapi[i]->cp_ring; 14981 sw = cpr->stats.sw_stats; 14982 14983 stats->packets = 0; 14984 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 14985 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 14986 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 14987 14988 stats->bytes = 0; 14989 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 14990 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 14991 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 14992 14993 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards; 14994 } 14995 14996 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, 14997 struct netdev_queue_stats_tx *stats) 14998 { 14999 struct bnxt *bp = netdev_priv(dev); 15000 struct bnxt_napi *bnapi; 15001 u64 *sw; 15002 15003 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; 15004 sw = bnapi->cp_ring.stats.sw_stats; 15005 15006 stats->packets = 0; 15007 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 15008 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 15009 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 15010 15011 stats->bytes = 0; 15012 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 15013 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 15014 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 15015 } 15016 15017 static void bnxt_get_base_stats(struct net_device *dev, 15018 struct netdev_queue_stats_rx *rx, 15019 struct netdev_queue_stats_tx *tx) 15020 { 15021 struct bnxt *bp = netdev_priv(dev); 15022 15023 rx->packets = bp->net_stats_prev.rx_packets; 15024 rx->bytes = bp->net_stats_prev.rx_bytes; 15025 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; 15026 15027 tx->packets = bp->net_stats_prev.tx_packets; 15028 tx->bytes = bp->net_stats_prev.tx_bytes; 15029 } 15030 15031 static const struct netdev_stat_ops bnxt_stat_ops = { 15032 .get_queue_stats_rx = bnxt_get_queue_stats_rx, 15033 .get_queue_stats_tx = bnxt_get_queue_stats_tx, 15034 .get_base_stats = bnxt_get_base_stats, 15035 }; 15036 15037 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 15038 { 15039 u16 mem_size; 15040 15041 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 15042 mem_size = rxr->rx_agg_bmap_size / 8; 15043 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 15044 if (!rxr->rx_agg_bmap) 15045 return -ENOMEM; 15046 15047 return 0; 15048 } 15049 15050 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) 15051 { 15052 struct bnxt_rx_ring_info *rxr, *clone; 15053 struct bnxt *bp = netdev_priv(dev); 15054 struct bnxt_ring_struct *ring; 15055 int rc; 15056 15057 rxr = &bp->rx_ring[idx]; 15058 clone = qmem; 15059 memcpy(clone, rxr, sizeof(*rxr)); 15060 bnxt_init_rx_ring_struct(bp, clone); 15061 bnxt_reset_rx_ring_struct(bp, clone); 15062 15063 clone->rx_prod = 0; 15064 clone->rx_agg_prod = 0; 15065 clone->rx_sw_agg_prod = 0; 15066 clone->rx_next_cons = 0; 15067 15068 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); 15069 if (rc) 15070 return rc; 15071 15072 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0); 15073 if (rc < 0) 15074 goto err_page_pool_destroy; 15075 15076 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq, 15077 MEM_TYPE_PAGE_POOL, 15078 clone->page_pool); 15079 if (rc) 15080 goto err_rxq_info_unreg; 15081 15082 ring = &clone->rx_ring_struct; 15083 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15084 if (rc) 15085 goto err_free_rx_ring; 15086 15087 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 15088 ring = &clone->rx_agg_ring_struct; 15089 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15090 if (rc) 15091 goto err_free_rx_agg_ring; 15092 15093 rc = bnxt_alloc_rx_agg_bmap(bp, clone); 15094 if (rc) 15095 goto err_free_rx_agg_ring; 15096 } 15097 15098 bnxt_init_one_rx_ring_rxbd(bp, clone); 15099 bnxt_init_one_rx_agg_ring_rxbd(bp, clone); 15100 15101 bnxt_alloc_one_rx_ring_skb(bp, clone, idx); 15102 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15103 bnxt_alloc_one_rx_ring_page(bp, clone, idx); 15104 15105 return 0; 15106 15107 err_free_rx_agg_ring: 15108 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); 15109 err_free_rx_ring: 15110 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); 15111 err_rxq_info_unreg: 15112 xdp_rxq_info_unreg(&clone->xdp_rxq); 15113 err_page_pool_destroy: 15114 clone->page_pool->p.napi = NULL; 15115 page_pool_destroy(clone->page_pool); 15116 clone->page_pool = NULL; 15117 return rc; 15118 } 15119 15120 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) 15121 { 15122 struct bnxt_rx_ring_info *rxr = qmem; 15123 struct bnxt *bp = netdev_priv(dev); 15124 struct bnxt_ring_struct *ring; 15125 15126 bnxt_free_one_rx_ring(bp, rxr); 15127 bnxt_free_one_rx_agg_ring(bp, rxr); 15128 15129 xdp_rxq_info_unreg(&rxr->xdp_rxq); 15130 15131 page_pool_destroy(rxr->page_pool); 15132 rxr->page_pool = NULL; 15133 15134 ring = &rxr->rx_ring_struct; 15135 bnxt_free_ring(bp, &ring->ring_mem); 15136 15137 ring = &rxr->rx_agg_ring_struct; 15138 bnxt_free_ring(bp, &ring->ring_mem); 15139 15140 kfree(rxr->rx_agg_bmap); 15141 rxr->rx_agg_bmap = NULL; 15142 } 15143 15144 static void bnxt_copy_rx_ring(struct bnxt *bp, 15145 struct bnxt_rx_ring_info *dst, 15146 struct bnxt_rx_ring_info *src) 15147 { 15148 struct bnxt_ring_mem_info *dst_rmem, *src_rmem; 15149 struct bnxt_ring_struct *dst_ring, *src_ring; 15150 int i; 15151 15152 dst_ring = &dst->rx_ring_struct; 15153 dst_rmem = &dst_ring->ring_mem; 15154 src_ring = &src->rx_ring_struct; 15155 src_rmem = &src_ring->ring_mem; 15156 15157 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15158 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15159 WARN_ON(dst_rmem->flags != src_rmem->flags); 15160 WARN_ON(dst_rmem->depth != src_rmem->depth); 15161 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15162 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15163 15164 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15165 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15166 *dst_rmem->vmem = *src_rmem->vmem; 15167 for (i = 0; i < dst_rmem->nr_pages; i++) { 15168 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15169 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15170 } 15171 15172 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 15173 return; 15174 15175 dst_ring = &dst->rx_agg_ring_struct; 15176 dst_rmem = &dst_ring->ring_mem; 15177 src_ring = &src->rx_agg_ring_struct; 15178 src_rmem = &src_ring->ring_mem; 15179 15180 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15181 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15182 WARN_ON(dst_rmem->flags != src_rmem->flags); 15183 WARN_ON(dst_rmem->depth != src_rmem->depth); 15184 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15185 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15186 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size); 15187 15188 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15189 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15190 *dst_rmem->vmem = *src_rmem->vmem; 15191 for (i = 0; i < dst_rmem->nr_pages; i++) { 15192 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15193 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15194 } 15195 15196 dst->rx_agg_bmap = src->rx_agg_bmap; 15197 } 15198 15199 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) 15200 { 15201 struct bnxt *bp = netdev_priv(dev); 15202 struct bnxt_rx_ring_info *rxr, *clone; 15203 struct bnxt_cp_ring_info *cpr; 15204 struct bnxt_vnic_info *vnic; 15205 int i, rc; 15206 15207 rxr = &bp->rx_ring[idx]; 15208 clone = qmem; 15209 15210 rxr->rx_prod = clone->rx_prod; 15211 rxr->rx_agg_prod = clone->rx_agg_prod; 15212 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; 15213 rxr->rx_next_cons = clone->rx_next_cons; 15214 rxr->page_pool = clone->page_pool; 15215 rxr->xdp_rxq = clone->xdp_rxq; 15216 15217 bnxt_copy_rx_ring(bp, rxr, clone); 15218 15219 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 15220 if (rc) 15221 return rc; 15222 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); 15223 if (rc) 15224 goto err_free_hwrm_rx_ring; 15225 15226 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 15227 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15228 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 15229 15230 cpr = &rxr->bnapi->cp_ring; 15231 cpr->sw_stats->rx.rx_resets++; 15232 15233 for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { 15234 vnic = &bp->vnic_info[i]; 15235 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 15236 bnxt_hwrm_vnic_update(bp, vnic, 15237 VNIC_UPDATE_REQ_ENABLES_MRU_VALID); 15238 } 15239 15240 return 0; 15241 15242 err_free_hwrm_rx_ring: 15243 bnxt_hwrm_rx_ring_free(bp, rxr, false); 15244 return rc; 15245 } 15246 15247 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) 15248 { 15249 struct bnxt *bp = netdev_priv(dev); 15250 struct bnxt_rx_ring_info *rxr; 15251 struct bnxt_vnic_info *vnic; 15252 int i; 15253 15254 for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { 15255 vnic = &bp->vnic_info[i]; 15256 vnic->mru = 0; 15257 bnxt_hwrm_vnic_update(bp, vnic, 15258 VNIC_UPDATE_REQ_ENABLES_MRU_VALID); 15259 } 15260 15261 rxr = &bp->rx_ring[idx]; 15262 bnxt_hwrm_rx_ring_free(bp, rxr, false); 15263 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); 15264 rxr->rx_next_cons = 0; 15265 page_pool_disable_direct_recycling(rxr->page_pool); 15266 15267 memcpy(qmem, rxr, sizeof(*rxr)); 15268 bnxt_init_rx_ring_struct(bp, qmem); 15269 15270 return 0; 15271 } 15272 15273 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = { 15274 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info), 15275 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc, 15276 .ndo_queue_mem_free = bnxt_queue_mem_free, 15277 .ndo_queue_start = bnxt_queue_start, 15278 .ndo_queue_stop = bnxt_queue_stop, 15279 }; 15280 15281 static void bnxt_remove_one(struct pci_dev *pdev) 15282 { 15283 struct net_device *dev = pci_get_drvdata(pdev); 15284 struct bnxt *bp = netdev_priv(dev); 15285 15286 if (BNXT_PF(bp)) 15287 bnxt_sriov_disable(bp); 15288 15289 bnxt_rdma_aux_device_del(bp); 15290 15291 bnxt_ptp_clear(bp); 15292 unregister_netdev(dev); 15293 15294 bnxt_rdma_aux_device_uninit(bp); 15295 15296 bnxt_free_l2_filters(bp, true); 15297 bnxt_free_ntp_fltrs(bp, true); 15298 WARN_ON(bp->num_rss_ctx); 15299 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 15300 /* Flush any pending tasks */ 15301 cancel_work_sync(&bp->sp_task); 15302 cancel_delayed_work_sync(&bp->fw_reset_task); 15303 bp->sp_event = 0; 15304 15305 bnxt_dl_fw_reporters_destroy(bp); 15306 bnxt_dl_unregister(bp); 15307 bnxt_shutdown_tc(bp); 15308 15309 bnxt_clear_int_mode(bp); 15310 bnxt_hwrm_func_drv_unrgtr(bp); 15311 bnxt_free_hwrm_resources(bp); 15312 bnxt_hwmon_uninit(bp); 15313 bnxt_ethtool_free(bp); 15314 bnxt_dcb_free(bp); 15315 kfree(bp->ptp_cfg); 15316 bp->ptp_cfg = NULL; 15317 kfree(bp->fw_health); 15318 bp->fw_health = NULL; 15319 bnxt_cleanup_pci(bp); 15320 bnxt_free_ctx_mem(bp); 15321 bnxt_free_crash_dump_mem(bp); 15322 kfree(bp->rss_indir_tbl); 15323 bp->rss_indir_tbl = NULL; 15324 bnxt_free_port_stats(bp); 15325 free_netdev(dev); 15326 } 15327 15328 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 15329 { 15330 int rc = 0; 15331 struct bnxt_link_info *link_info = &bp->link_info; 15332 15333 bp->phy_flags = 0; 15334 rc = bnxt_hwrm_phy_qcaps(bp); 15335 if (rc) { 15336 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 15337 rc); 15338 return rc; 15339 } 15340 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 15341 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 15342 else 15343 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 15344 if (!fw_dflt) 15345 return 0; 15346 15347 mutex_lock(&bp->link_lock); 15348 rc = bnxt_update_link(bp, false); 15349 if (rc) { 15350 mutex_unlock(&bp->link_lock); 15351 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 15352 rc); 15353 return rc; 15354 } 15355 15356 /* Older firmware does not have supported_auto_speeds, so assume 15357 * that all supported speeds can be autonegotiated. 15358 */ 15359 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 15360 link_info->support_auto_speeds = link_info->support_speeds; 15361 15362 bnxt_init_ethtool_link_settings(bp); 15363 mutex_unlock(&bp->link_lock); 15364 return 0; 15365 } 15366 15367 static int bnxt_get_max_irq(struct pci_dev *pdev) 15368 { 15369 u16 ctrl; 15370 15371 if (!pdev->msix_cap) 15372 return 1; 15373 15374 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 15375 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 15376 } 15377 15378 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 15379 int *max_cp) 15380 { 15381 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 15382 int max_ring_grps = 0, max_irq; 15383 15384 *max_tx = hw_resc->max_tx_rings; 15385 *max_rx = hw_resc->max_rx_rings; 15386 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 15387 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 15388 bnxt_get_ulp_msix_num_in_use(bp), 15389 hw_resc->max_stat_ctxs - 15390 bnxt_get_ulp_stat_ctxs_in_use(bp)); 15391 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 15392 *max_cp = min_t(int, *max_cp, max_irq); 15393 max_ring_grps = hw_resc->max_hw_ring_grps; 15394 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 15395 *max_cp -= 1; 15396 *max_rx -= 2; 15397 } 15398 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15399 *max_rx >>= 1; 15400 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 15401 int rc; 15402 15403 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 15404 if (rc) { 15405 *max_rx = 0; 15406 *max_tx = 0; 15407 } 15408 /* On P5 chips, max_cp output param should be available NQs */ 15409 *max_cp = max_irq; 15410 } 15411 *max_rx = min_t(int, *max_rx, max_ring_grps); 15412 } 15413 15414 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 15415 { 15416 int rx, tx, cp; 15417 15418 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 15419 *max_rx = rx; 15420 *max_tx = tx; 15421 if (!rx || !tx || !cp) 15422 return -ENOMEM; 15423 15424 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 15425 } 15426 15427 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 15428 bool shared) 15429 { 15430 int rc; 15431 15432 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 15433 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 15434 /* Not enough rings, try disabling agg rings. */ 15435 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 15436 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 15437 if (rc) { 15438 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 15439 bp->flags |= BNXT_FLAG_AGG_RINGS; 15440 return rc; 15441 } 15442 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 15443 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 15444 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 15445 bnxt_set_ring_params(bp); 15446 } 15447 15448 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 15449 int max_cp, max_stat, max_irq; 15450 15451 /* Reserve minimum resources for RoCE */ 15452 max_cp = bnxt_get_max_func_cp_rings(bp); 15453 max_stat = bnxt_get_max_func_stat_ctxs(bp); 15454 max_irq = bnxt_get_max_func_irqs(bp); 15455 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 15456 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 15457 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 15458 return 0; 15459 15460 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 15461 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 15462 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 15463 max_cp = min_t(int, max_cp, max_irq); 15464 max_cp = min_t(int, max_cp, max_stat); 15465 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 15466 if (rc) 15467 rc = 0; 15468 } 15469 return rc; 15470 } 15471 15472 /* In initial default shared ring setting, each shared ring must have a 15473 * RX/TX ring pair. 15474 */ 15475 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 15476 { 15477 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 15478 bp->rx_nr_rings = bp->cp_nr_rings; 15479 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 15480 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 15481 } 15482 15483 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 15484 { 15485 int dflt_rings, max_rx_rings, max_tx_rings, rc; 15486 int avail_msix; 15487 15488 if (!bnxt_can_reserve_rings(bp)) 15489 return 0; 15490 15491 if (sh) 15492 bp->flags |= BNXT_FLAG_SHARED_RINGS; 15493 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 15494 /* Reduce default rings on multi-port cards so that total default 15495 * rings do not exceed CPU count. 15496 */ 15497 if (bp->port_count > 1) { 15498 int max_rings = 15499 max_t(int, num_online_cpus() / bp->port_count, 1); 15500 15501 dflt_rings = min_t(int, dflt_rings, max_rings); 15502 } 15503 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 15504 if (rc) 15505 return rc; 15506 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 15507 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 15508 if (sh) 15509 bnxt_trim_dflt_sh_rings(bp); 15510 else 15511 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 15512 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 15513 15514 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; 15515 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { 15516 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); 15517 15518 bnxt_set_ulp_msix_num(bp, ulp_num_msix); 15519 bnxt_set_dflt_ulp_stat_ctxs(bp); 15520 } 15521 15522 rc = __bnxt_reserve_rings(bp); 15523 if (rc && rc != -ENODEV) 15524 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 15525 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15526 if (sh) 15527 bnxt_trim_dflt_sh_rings(bp); 15528 15529 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 15530 if (bnxt_need_reserve_rings(bp)) { 15531 rc = __bnxt_reserve_rings(bp); 15532 if (rc && rc != -ENODEV) 15533 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 15534 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15535 } 15536 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 15537 bp->rx_nr_rings++; 15538 bp->cp_nr_rings++; 15539 } 15540 if (rc) { 15541 bp->tx_nr_rings = 0; 15542 bp->rx_nr_rings = 0; 15543 } 15544 return rc; 15545 } 15546 15547 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 15548 { 15549 int rc; 15550 15551 if (bp->tx_nr_rings) 15552 return 0; 15553 15554 bnxt_ulp_irq_stop(bp); 15555 bnxt_clear_int_mode(bp); 15556 rc = bnxt_set_dflt_rings(bp, true); 15557 if (rc) { 15558 if (BNXT_VF(bp) && rc == -ENODEV) 15559 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 15560 else 15561 netdev_err(bp->dev, "Not enough rings available.\n"); 15562 goto init_dflt_ring_err; 15563 } 15564 rc = bnxt_init_int_mode(bp); 15565 if (rc) 15566 goto init_dflt_ring_err; 15567 15568 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15569 15570 bnxt_set_dflt_rfs(bp); 15571 15572 init_dflt_ring_err: 15573 bnxt_ulp_irq_restart(bp, rc); 15574 return rc; 15575 } 15576 15577 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 15578 { 15579 int rc; 15580 15581 ASSERT_RTNL(); 15582 bnxt_hwrm_func_qcaps(bp); 15583 15584 if (netif_running(bp->dev)) 15585 __bnxt_close_nic(bp, true, false); 15586 15587 bnxt_ulp_irq_stop(bp); 15588 bnxt_clear_int_mode(bp); 15589 rc = bnxt_init_int_mode(bp); 15590 bnxt_ulp_irq_restart(bp, rc); 15591 15592 if (netif_running(bp->dev)) { 15593 if (rc) 15594 dev_close(bp->dev); 15595 else 15596 rc = bnxt_open_nic(bp, true, false); 15597 } 15598 15599 return rc; 15600 } 15601 15602 static int bnxt_init_mac_addr(struct bnxt *bp) 15603 { 15604 int rc = 0; 15605 15606 if (BNXT_PF(bp)) { 15607 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 15608 } else { 15609 #ifdef CONFIG_BNXT_SRIOV 15610 struct bnxt_vf_info *vf = &bp->vf; 15611 bool strict_approval = true; 15612 15613 if (is_valid_ether_addr(vf->mac_addr)) { 15614 /* overwrite netdev dev_addr with admin VF MAC */ 15615 eth_hw_addr_set(bp->dev, vf->mac_addr); 15616 /* Older PF driver or firmware may not approve this 15617 * correctly. 15618 */ 15619 strict_approval = false; 15620 } else { 15621 eth_hw_addr_random(bp->dev); 15622 } 15623 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 15624 #endif 15625 } 15626 return rc; 15627 } 15628 15629 static void bnxt_vpd_read_info(struct bnxt *bp) 15630 { 15631 struct pci_dev *pdev = bp->pdev; 15632 unsigned int vpd_size, kw_len; 15633 int pos, size; 15634 u8 *vpd_data; 15635 15636 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 15637 if (IS_ERR(vpd_data)) { 15638 pci_warn(pdev, "Unable to read VPD\n"); 15639 return; 15640 } 15641 15642 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 15643 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 15644 if (pos < 0) 15645 goto read_sn; 15646 15647 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 15648 memcpy(bp->board_partno, &vpd_data[pos], size); 15649 15650 read_sn: 15651 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 15652 PCI_VPD_RO_KEYWORD_SERIALNO, 15653 &kw_len); 15654 if (pos < 0) 15655 goto exit; 15656 15657 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 15658 memcpy(bp->board_serialno, &vpd_data[pos], size); 15659 exit: 15660 kfree(vpd_data); 15661 } 15662 15663 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 15664 { 15665 struct pci_dev *pdev = bp->pdev; 15666 u64 qword; 15667 15668 qword = pci_get_dsn(pdev); 15669 if (!qword) { 15670 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 15671 return -EOPNOTSUPP; 15672 } 15673 15674 put_unaligned_le64(qword, dsn); 15675 15676 bp->flags |= BNXT_FLAG_DSN_VALID; 15677 return 0; 15678 } 15679 15680 static int bnxt_map_db_bar(struct bnxt *bp) 15681 { 15682 if (!bp->db_size) 15683 return -ENODEV; 15684 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 15685 if (!bp->bar1) 15686 return -ENOMEM; 15687 return 0; 15688 } 15689 15690 void bnxt_print_device_info(struct bnxt *bp) 15691 { 15692 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 15693 board_info[bp->board_idx].name, 15694 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 15695 15696 pcie_print_link_status(bp->pdev); 15697 } 15698 15699 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 15700 { 15701 struct bnxt_hw_resc *hw_resc; 15702 struct net_device *dev; 15703 struct bnxt *bp; 15704 int rc, max_irqs; 15705 15706 if (pci_is_bridge(pdev)) 15707 return -ENODEV; 15708 15709 if (!pdev->msix_cap) { 15710 dev_err(&pdev->dev, "MSIX capability not found, aborting\n"); 15711 return -ENODEV; 15712 } 15713 15714 /* Clear any pending DMA transactions from crash kernel 15715 * while loading driver in capture kernel. 15716 */ 15717 if (is_kdump_kernel()) { 15718 pci_clear_master(pdev); 15719 pcie_flr(pdev); 15720 } 15721 15722 max_irqs = bnxt_get_max_irq(pdev); 15723 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 15724 max_irqs); 15725 if (!dev) 15726 return -ENOMEM; 15727 15728 bp = netdev_priv(dev); 15729 bp->board_idx = ent->driver_data; 15730 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 15731 bnxt_set_max_func_irqs(bp, max_irqs); 15732 15733 if (bnxt_vf_pciid(bp->board_idx)) 15734 bp->flags |= BNXT_FLAG_VF; 15735 15736 /* No devlink port registration in case of a VF */ 15737 if (BNXT_PF(bp)) 15738 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 15739 15740 rc = bnxt_init_board(pdev, dev); 15741 if (rc < 0) 15742 goto init_err_free; 15743 15744 dev->netdev_ops = &bnxt_netdev_ops; 15745 dev->stat_ops = &bnxt_stat_ops; 15746 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 15747 dev->ethtool_ops = &bnxt_ethtool_ops; 15748 pci_set_drvdata(pdev, dev); 15749 15750 rc = bnxt_alloc_hwrm_resources(bp); 15751 if (rc) 15752 goto init_err_pci_clean; 15753 15754 mutex_init(&bp->hwrm_cmd_lock); 15755 mutex_init(&bp->link_lock); 15756 15757 rc = bnxt_fw_init_one_p1(bp); 15758 if (rc) 15759 goto init_err_pci_clean; 15760 15761 if (BNXT_PF(bp)) 15762 bnxt_vpd_read_info(bp); 15763 15764 if (BNXT_CHIP_P5_PLUS(bp)) { 15765 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 15766 if (BNXT_CHIP_P7(bp)) 15767 bp->flags |= BNXT_FLAG_CHIP_P7; 15768 } 15769 15770 rc = bnxt_alloc_rss_indir_tbl(bp); 15771 if (rc) 15772 goto init_err_pci_clean; 15773 15774 rc = bnxt_fw_init_one_p2(bp); 15775 if (rc) 15776 goto init_err_pci_clean; 15777 15778 rc = bnxt_map_db_bar(bp); 15779 if (rc) { 15780 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 15781 rc); 15782 goto init_err_pci_clean; 15783 } 15784 15785 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 15786 NETIF_F_TSO | NETIF_F_TSO6 | 15787 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 15788 NETIF_F_GSO_IPXIP4 | 15789 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 15790 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 15791 NETIF_F_RXCSUM | NETIF_F_GRO; 15792 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 15793 dev->hw_features |= NETIF_F_GSO_UDP_L4; 15794 15795 if (BNXT_SUPPORTS_TPA(bp)) 15796 dev->hw_features |= NETIF_F_LRO; 15797 15798 dev->hw_enc_features = 15799 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 15800 NETIF_F_TSO | NETIF_F_TSO6 | 15801 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 15802 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 15803 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 15804 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 15805 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 15806 if (bp->flags & BNXT_FLAG_CHIP_P7) 15807 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; 15808 else 15809 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 15810 15811 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 15812 NETIF_F_GSO_GRE_CSUM; 15813 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 15814 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 15815 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 15816 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 15817 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 15818 if (BNXT_SUPPORTS_TPA(bp)) 15819 dev->hw_features |= NETIF_F_GRO_HW; 15820 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 15821 if (dev->features & NETIF_F_GRO_HW) 15822 dev->features &= ~NETIF_F_LRO; 15823 dev->priv_flags |= IFF_UNICAST_FLT; 15824 15825 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 15826 if (bp->tso_max_segs) 15827 netif_set_tso_max_segs(dev, bp->tso_max_segs); 15828 15829 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 15830 NETDEV_XDP_ACT_RX_SG; 15831 15832 #ifdef CONFIG_BNXT_SRIOV 15833 init_waitqueue_head(&bp->sriov_cfg_wait); 15834 #endif 15835 if (BNXT_SUPPORTS_TPA(bp)) { 15836 bp->gro_func = bnxt_gro_func_5730x; 15837 if (BNXT_CHIP_P4(bp)) 15838 bp->gro_func = bnxt_gro_func_5731x; 15839 else if (BNXT_CHIP_P5_PLUS(bp)) 15840 bp->gro_func = bnxt_gro_func_5750x; 15841 } 15842 if (!BNXT_CHIP_P4_PLUS(bp)) 15843 bp->flags |= BNXT_FLAG_DOUBLE_DB; 15844 15845 rc = bnxt_init_mac_addr(bp); 15846 if (rc) { 15847 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 15848 rc = -EADDRNOTAVAIL; 15849 goto init_err_pci_clean; 15850 } 15851 15852 if (BNXT_PF(bp)) { 15853 /* Read the adapter's DSN to use as the eswitch switch_id */ 15854 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 15855 } 15856 15857 /* MTU range: 60 - FW defined max */ 15858 dev->min_mtu = ETH_ZLEN; 15859 dev->max_mtu = bp->max_mtu; 15860 15861 rc = bnxt_probe_phy(bp, true); 15862 if (rc) 15863 goto init_err_pci_clean; 15864 15865 hw_resc = &bp->hw_resc; 15866 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + 15867 BNXT_L2_FLTR_MAX_FLTR; 15868 /* Older firmware may not report these filters properly */ 15869 if (bp->max_fltr < BNXT_MAX_FLTR) 15870 bp->max_fltr = BNXT_MAX_FLTR; 15871 bnxt_init_l2_fltr_tbl(bp); 15872 bnxt_set_rx_skb_mode(bp, false); 15873 bnxt_set_tpa_flags(bp); 15874 bnxt_set_ring_params(bp); 15875 bnxt_rdma_aux_device_init(bp); 15876 rc = bnxt_set_dflt_rings(bp, true); 15877 if (rc) { 15878 if (BNXT_VF(bp) && rc == -ENODEV) { 15879 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 15880 } else { 15881 netdev_err(bp->dev, "Not enough rings available.\n"); 15882 rc = -ENOMEM; 15883 } 15884 goto init_err_pci_clean; 15885 } 15886 15887 bnxt_fw_init_one_p3(bp); 15888 15889 bnxt_init_dflt_coal(bp); 15890 15891 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 15892 bp->flags |= BNXT_FLAG_STRIP_VLAN; 15893 15894 rc = bnxt_init_int_mode(bp); 15895 if (rc) 15896 goto init_err_pci_clean; 15897 15898 /* No TC has been set yet and rings may have been trimmed due to 15899 * limited MSIX, so we re-initialize the TX rings per TC. 15900 */ 15901 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15902 15903 if (BNXT_PF(bp)) { 15904 if (!bnxt_pf_wq) { 15905 bnxt_pf_wq = 15906 create_singlethread_workqueue("bnxt_pf_wq"); 15907 if (!bnxt_pf_wq) { 15908 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 15909 rc = -ENOMEM; 15910 goto init_err_pci_clean; 15911 } 15912 } 15913 rc = bnxt_init_tc(bp); 15914 if (rc) 15915 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 15916 rc); 15917 } 15918 15919 bnxt_inv_fw_health_reg(bp); 15920 rc = bnxt_dl_register(bp); 15921 if (rc) 15922 goto init_err_dl; 15923 15924 INIT_LIST_HEAD(&bp->usr_fltr_list); 15925 15926 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 15927 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; 15928 if (BNXT_SUPPORTS_QUEUE_API(bp)) 15929 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; 15930 15931 rc = register_netdev(dev); 15932 if (rc) 15933 goto init_err_cleanup; 15934 15935 bnxt_dl_fw_reporters_create(bp); 15936 15937 bnxt_rdma_aux_device_add(bp); 15938 15939 bnxt_print_device_info(bp); 15940 15941 pci_save_state(pdev); 15942 15943 return 0; 15944 init_err_cleanup: 15945 bnxt_rdma_aux_device_uninit(bp); 15946 bnxt_dl_unregister(bp); 15947 init_err_dl: 15948 bnxt_shutdown_tc(bp); 15949 bnxt_clear_int_mode(bp); 15950 15951 init_err_pci_clean: 15952 bnxt_hwrm_func_drv_unrgtr(bp); 15953 bnxt_free_hwrm_resources(bp); 15954 bnxt_hwmon_uninit(bp); 15955 bnxt_ethtool_free(bp); 15956 bnxt_ptp_clear(bp); 15957 kfree(bp->ptp_cfg); 15958 bp->ptp_cfg = NULL; 15959 kfree(bp->fw_health); 15960 bp->fw_health = NULL; 15961 bnxt_cleanup_pci(bp); 15962 bnxt_free_ctx_mem(bp); 15963 bnxt_free_crash_dump_mem(bp); 15964 kfree(bp->rss_indir_tbl); 15965 bp->rss_indir_tbl = NULL; 15966 15967 init_err_free: 15968 free_netdev(dev); 15969 return rc; 15970 } 15971 15972 static void bnxt_shutdown(struct pci_dev *pdev) 15973 { 15974 struct net_device *dev = pci_get_drvdata(pdev); 15975 struct bnxt *bp; 15976 15977 if (!dev) 15978 return; 15979 15980 rtnl_lock(); 15981 bp = netdev_priv(dev); 15982 if (!bp) 15983 goto shutdown_exit; 15984 15985 if (netif_running(dev)) 15986 dev_close(dev); 15987 15988 bnxt_clear_int_mode(bp); 15989 pci_disable_device(pdev); 15990 15991 if (system_state == SYSTEM_POWER_OFF) { 15992 pci_wake_from_d3(pdev, bp->wol); 15993 pci_set_power_state(pdev, PCI_D3hot); 15994 } 15995 15996 shutdown_exit: 15997 rtnl_unlock(); 15998 } 15999 16000 #ifdef CONFIG_PM_SLEEP 16001 static int bnxt_suspend(struct device *device) 16002 { 16003 struct net_device *dev = dev_get_drvdata(device); 16004 struct bnxt *bp = netdev_priv(dev); 16005 int rc = 0; 16006 16007 bnxt_ulp_stop(bp); 16008 16009 rtnl_lock(); 16010 if (netif_running(dev)) { 16011 netif_device_detach(dev); 16012 rc = bnxt_close(dev); 16013 } 16014 bnxt_hwrm_func_drv_unrgtr(bp); 16015 pci_disable_device(bp->pdev); 16016 bnxt_free_ctx_mem(bp); 16017 rtnl_unlock(); 16018 return rc; 16019 } 16020 16021 static int bnxt_resume(struct device *device) 16022 { 16023 struct net_device *dev = dev_get_drvdata(device); 16024 struct bnxt *bp = netdev_priv(dev); 16025 int rc = 0; 16026 16027 rtnl_lock(); 16028 rc = pci_enable_device(bp->pdev); 16029 if (rc) { 16030 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 16031 rc); 16032 goto resume_exit; 16033 } 16034 pci_set_master(bp->pdev); 16035 if (bnxt_hwrm_ver_get(bp)) { 16036 rc = -ENODEV; 16037 goto resume_exit; 16038 } 16039 rc = bnxt_hwrm_func_reset(bp); 16040 if (rc) { 16041 rc = -EBUSY; 16042 goto resume_exit; 16043 } 16044 16045 rc = bnxt_hwrm_func_qcaps(bp); 16046 if (rc) 16047 goto resume_exit; 16048 16049 bnxt_clear_reservations(bp, true); 16050 16051 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 16052 rc = -ENODEV; 16053 goto resume_exit; 16054 } 16055 if (bp->fw_crash_mem) 16056 bnxt_hwrm_crash_dump_mem_cfg(bp); 16057 16058 bnxt_get_wol_settings(bp); 16059 if (netif_running(dev)) { 16060 rc = bnxt_open(dev); 16061 if (!rc) 16062 netif_device_attach(dev); 16063 } 16064 16065 resume_exit: 16066 rtnl_unlock(); 16067 bnxt_ulp_start(bp, rc); 16068 if (!rc) 16069 bnxt_reenable_sriov(bp); 16070 return rc; 16071 } 16072 16073 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 16074 #define BNXT_PM_OPS (&bnxt_pm_ops) 16075 16076 #else 16077 16078 #define BNXT_PM_OPS NULL 16079 16080 #endif /* CONFIG_PM_SLEEP */ 16081 16082 /** 16083 * bnxt_io_error_detected - called when PCI error is detected 16084 * @pdev: Pointer to PCI device 16085 * @state: The current pci connection state 16086 * 16087 * This function is called after a PCI bus error affecting 16088 * this device has been detected. 16089 */ 16090 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 16091 pci_channel_state_t state) 16092 { 16093 struct net_device *netdev = pci_get_drvdata(pdev); 16094 struct bnxt *bp = netdev_priv(netdev); 16095 bool abort = false; 16096 16097 netdev_info(netdev, "PCI I/O error detected\n"); 16098 16099 bnxt_ulp_stop(bp); 16100 16101 rtnl_lock(); 16102 netif_device_detach(netdev); 16103 16104 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 16105 netdev_err(bp->dev, "Firmware reset already in progress\n"); 16106 abort = true; 16107 } 16108 16109 if (abort || state == pci_channel_io_perm_failure) { 16110 rtnl_unlock(); 16111 return PCI_ERS_RESULT_DISCONNECT; 16112 } 16113 16114 /* Link is not reliable anymore if state is pci_channel_io_frozen 16115 * so we disable bus master to prevent any potential bad DMAs before 16116 * freeing kernel memory. 16117 */ 16118 if (state == pci_channel_io_frozen) { 16119 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 16120 bnxt_fw_fatal_close(bp); 16121 } 16122 16123 if (netif_running(netdev)) 16124 __bnxt_close_nic(bp, true, true); 16125 16126 if (pci_is_enabled(pdev)) 16127 pci_disable_device(pdev); 16128 bnxt_free_ctx_mem(bp); 16129 rtnl_unlock(); 16130 16131 /* Request a slot slot reset. */ 16132 return PCI_ERS_RESULT_NEED_RESET; 16133 } 16134 16135 /** 16136 * bnxt_io_slot_reset - called after the pci bus has been reset. 16137 * @pdev: Pointer to PCI device 16138 * 16139 * Restart the card from scratch, as if from a cold-boot. 16140 * At this point, the card has experienced a hard reset, 16141 * followed by fixups by BIOS, and has its config space 16142 * set up identically to what it was at cold boot. 16143 */ 16144 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 16145 { 16146 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 16147 struct net_device *netdev = pci_get_drvdata(pdev); 16148 struct bnxt *bp = netdev_priv(netdev); 16149 int retry = 0; 16150 int err = 0; 16151 int off; 16152 16153 netdev_info(bp->dev, "PCI Slot Reset\n"); 16154 16155 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 16156 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) 16157 msleep(900); 16158 16159 rtnl_lock(); 16160 16161 if (pci_enable_device(pdev)) { 16162 dev_err(&pdev->dev, 16163 "Cannot re-enable PCI device after reset.\n"); 16164 } else { 16165 pci_set_master(pdev); 16166 /* Upon fatal error, our device internal logic that latches to 16167 * BAR value is getting reset and will restore only upon 16168 * rewriting the BARs. 16169 * 16170 * As pci_restore_state() does not re-write the BARs if the 16171 * value is same as saved value earlier, driver needs to 16172 * write the BARs to 0 to force restore, in case of fatal error. 16173 */ 16174 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 16175 &bp->state)) { 16176 for (off = PCI_BASE_ADDRESS_0; 16177 off <= PCI_BASE_ADDRESS_5; off += 4) 16178 pci_write_config_dword(bp->pdev, off, 0); 16179 } 16180 pci_restore_state(pdev); 16181 pci_save_state(pdev); 16182 16183 bnxt_inv_fw_health_reg(bp); 16184 bnxt_try_map_fw_health_reg(bp); 16185 16186 /* In some PCIe AER scenarios, firmware may take up to 16187 * 10 seconds to become ready in the worst case. 16188 */ 16189 do { 16190 err = bnxt_try_recover_fw(bp); 16191 if (!err) 16192 break; 16193 retry++; 16194 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 16195 16196 if (err) { 16197 dev_err(&pdev->dev, "Firmware not ready\n"); 16198 goto reset_exit; 16199 } 16200 16201 err = bnxt_hwrm_func_reset(bp); 16202 if (!err) 16203 result = PCI_ERS_RESULT_RECOVERED; 16204 16205 bnxt_ulp_irq_stop(bp); 16206 bnxt_clear_int_mode(bp); 16207 err = bnxt_init_int_mode(bp); 16208 bnxt_ulp_irq_restart(bp, err); 16209 } 16210 16211 reset_exit: 16212 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 16213 bnxt_clear_reservations(bp, true); 16214 rtnl_unlock(); 16215 16216 return result; 16217 } 16218 16219 /** 16220 * bnxt_io_resume - called when traffic can start flowing again. 16221 * @pdev: Pointer to PCI device 16222 * 16223 * This callback is called when the error recovery driver tells 16224 * us that its OK to resume normal operation. 16225 */ 16226 static void bnxt_io_resume(struct pci_dev *pdev) 16227 { 16228 struct net_device *netdev = pci_get_drvdata(pdev); 16229 struct bnxt *bp = netdev_priv(netdev); 16230 int err; 16231 16232 netdev_info(bp->dev, "PCI Slot Resume\n"); 16233 rtnl_lock(); 16234 16235 err = bnxt_hwrm_func_qcaps(bp); 16236 if (!err && netif_running(netdev)) 16237 err = bnxt_open(netdev); 16238 16239 if (!err) 16240 netif_device_attach(netdev); 16241 16242 rtnl_unlock(); 16243 bnxt_ulp_start(bp, err); 16244 if (!err) 16245 bnxt_reenable_sriov(bp); 16246 } 16247 16248 static const struct pci_error_handlers bnxt_err_handler = { 16249 .error_detected = bnxt_io_error_detected, 16250 .slot_reset = bnxt_io_slot_reset, 16251 .resume = bnxt_io_resume 16252 }; 16253 16254 static struct pci_driver bnxt_pci_driver = { 16255 .name = DRV_MODULE_NAME, 16256 .id_table = bnxt_pci_tbl, 16257 .probe = bnxt_init_one, 16258 .remove = bnxt_remove_one, 16259 .shutdown = bnxt_shutdown, 16260 .driver.pm = BNXT_PM_OPS, 16261 .err_handler = &bnxt_err_handler, 16262 #if defined(CONFIG_BNXT_SRIOV) 16263 .sriov_configure = bnxt_sriov_configure, 16264 #endif 16265 }; 16266 16267 static int __init bnxt_init(void) 16268 { 16269 int err; 16270 16271 bnxt_debug_init(); 16272 err = pci_register_driver(&bnxt_pci_driver); 16273 if (err) { 16274 bnxt_debug_exit(); 16275 return err; 16276 } 16277 16278 return 0; 16279 } 16280 16281 static void __exit bnxt_exit(void) 16282 { 16283 pci_unregister_driver(&bnxt_pci_driver); 16284 if (bnxt_pf_wq) 16285 destroy_workqueue(bnxt_pf_wq); 16286 bnxt_debug_exit(); 16287 } 16288 16289 module_init(bnxt_init); 16290 module_exit(bnxt_exit); 16291