1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_queues.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_hwrm.h" 62 #include "bnxt_ulp.h" 63 #include "bnxt_sriov.h" 64 #include "bnxt_ethtool.h" 65 #include "bnxt_dcb.h" 66 #include "bnxt_xdp.h" 67 #include "bnxt_ptp.h" 68 #include "bnxt_vfr.h" 69 #include "bnxt_tc.h" 70 #include "bnxt_devlink.h" 71 #include "bnxt_debugfs.h" 72 #include "bnxt_hwmon.h" 73 74 #define BNXT_TX_TIMEOUT (5 * HZ) 75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 76 NETIF_MSG_TX_ERR) 77 78 MODULE_LICENSE("GPL"); 79 MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); 80 81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 83 #define BNXT_RX_COPY_THRESH 256 84 85 #define BNXT_TX_PUSH_THRESH 164 86 87 /* indexed by enum board_idx */ 88 static const struct { 89 char *name; 90 } board_info[] = { 91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 123 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 124 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 125 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, 126 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 127 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 128 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 129 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 130 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 131 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 132 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 133 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 134 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 135 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 136 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 137 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 138 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 139 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 140 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, 141 }; 142 143 static const struct pci_device_id bnxt_pci_tbl[] = { 144 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 145 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 146 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 147 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 148 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 149 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 150 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 151 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 152 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 153 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 154 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 155 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 156 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 157 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 158 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 159 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 160 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 162 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 163 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 164 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 165 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 166 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 167 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 168 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 170 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 171 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 172 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 175 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 176 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 178 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 179 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 180 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 181 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 182 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, 183 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, 184 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, 185 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, 186 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 187 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 189 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 190 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 192 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 193 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 194 #ifdef CONFIG_BNXT_SRIOV 195 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 196 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 197 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 198 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 199 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 200 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 201 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 202 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 203 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 204 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 205 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 206 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 207 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 208 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 209 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 210 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 211 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 212 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 213 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 214 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 215 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, 216 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 217 #endif 218 { 0 } 219 }; 220 221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 222 223 static const u16 bnxt_vf_req_snif[] = { 224 HWRM_FUNC_CFG, 225 HWRM_FUNC_VF_CFG, 226 HWRM_PORT_PHY_QCFG, 227 HWRM_CFA_L2_FILTER_ALLOC, 228 }; 229 230 static const u16 bnxt_async_events_arr[] = { 231 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 233 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 234 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 235 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 236 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 237 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 238 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 239 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 240 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 241 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 242 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 243 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 244 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 245 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 246 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 247 }; 248 249 static struct workqueue_struct *bnxt_pf_wq; 250 251 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 252 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} 253 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} 254 255 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { 256 .ports = { 257 .src = 0, 258 .dst = 0, 259 }, 260 .addrs = { 261 .v6addrs = { 262 .src = BNXT_IPV6_MASK_NONE, 263 .dst = BNXT_IPV6_MASK_NONE, 264 }, 265 }, 266 }; 267 268 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { 269 .ports = { 270 .src = cpu_to_be16(0xffff), 271 .dst = cpu_to_be16(0xffff), 272 }, 273 .addrs = { 274 .v6addrs = { 275 .src = BNXT_IPV6_MASK_ALL, 276 .dst = BNXT_IPV6_MASK_ALL, 277 }, 278 }, 279 }; 280 281 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { 282 .ports = { 283 .src = cpu_to_be16(0xffff), 284 .dst = cpu_to_be16(0xffff), 285 }, 286 .addrs = { 287 .v4addrs = { 288 .src = cpu_to_be32(0xffffffff), 289 .dst = cpu_to_be32(0xffffffff), 290 }, 291 }, 292 }; 293 294 static bool bnxt_vf_pciid(enum board_idx idx) 295 { 296 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 297 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 298 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 299 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); 300 } 301 302 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 303 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 304 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 305 306 #define BNXT_CP_DB_IRQ_DIS(db) \ 307 writel(DB_CP_IRQ_DIS_FLAGS, db) 308 309 #define BNXT_DB_CQ(db, idx) \ 310 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 311 312 #define BNXT_DB_NQ_P5(db, idx) \ 313 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 314 (db)->doorbell) 315 316 #define BNXT_DB_NQ_P7(db, idx) \ 317 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ 318 DB_RING_IDX(db, idx), (db)->doorbell) 319 320 #define BNXT_DB_CQ_ARM(db, idx) \ 321 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 322 323 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 324 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 325 DB_RING_IDX(db, idx), (db)->doorbell) 326 327 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 328 { 329 if (bp->flags & BNXT_FLAG_CHIP_P7) 330 BNXT_DB_NQ_P7(db, idx); 331 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 332 BNXT_DB_NQ_P5(db, idx); 333 else 334 BNXT_DB_CQ(db, idx); 335 } 336 337 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 338 { 339 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 340 BNXT_DB_NQ_ARM_P5(db, idx); 341 else 342 BNXT_DB_CQ_ARM(db, idx); 343 } 344 345 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 346 { 347 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 348 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 349 DB_RING_IDX(db, idx), db->doorbell); 350 else 351 BNXT_DB_CQ(db, idx); 352 } 353 354 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 355 { 356 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 357 return; 358 359 if (BNXT_PF(bp)) 360 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 361 else 362 schedule_delayed_work(&bp->fw_reset_task, delay); 363 } 364 365 static void __bnxt_queue_sp_work(struct bnxt *bp) 366 { 367 if (BNXT_PF(bp)) 368 queue_work(bnxt_pf_wq, &bp->sp_task); 369 else 370 schedule_work(&bp->sp_task); 371 } 372 373 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 374 { 375 set_bit(event, &bp->sp_event); 376 __bnxt_queue_sp_work(bp); 377 } 378 379 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 380 { 381 if (!rxr->bnapi->in_reset) { 382 rxr->bnapi->in_reset = true; 383 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 384 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 385 else 386 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 387 __bnxt_queue_sp_work(bp); 388 } 389 rxr->rx_next_cons = 0xffff; 390 } 391 392 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 393 u16 curr) 394 { 395 struct bnxt_napi *bnapi = txr->bnapi; 396 397 if (bnapi->tx_fault) 398 return; 399 400 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 401 txr->txq_index, txr->tx_hw_cons, 402 txr->tx_cons, txr->tx_prod, curr); 403 WARN_ON_ONCE(1); 404 bnapi->tx_fault = 1; 405 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 406 } 407 408 const u16 bnxt_lhint_arr[] = { 409 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 410 TX_BD_FLAGS_LHINT_512_TO_1023, 411 TX_BD_FLAGS_LHINT_1024_TO_2047, 412 TX_BD_FLAGS_LHINT_1024_TO_2047, 413 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 414 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 415 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 416 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 417 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 418 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 419 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 420 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 421 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 422 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 423 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 424 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 425 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 426 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 427 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 428 }; 429 430 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 431 { 432 struct metadata_dst *md_dst = skb_metadata_dst(skb); 433 434 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 435 return 0; 436 437 return md_dst->u.port_info.port_id; 438 } 439 440 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 441 u16 prod) 442 { 443 /* Sync BD data before updating doorbell */ 444 wmb(); 445 bnxt_db_write(bp, &txr->tx_db, prod); 446 txr->kick_pending = 0; 447 } 448 449 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 450 { 451 struct bnxt *bp = netdev_priv(dev); 452 struct tx_bd *txbd, *txbd0; 453 struct tx_bd_ext *txbd1; 454 struct netdev_queue *txq; 455 int i; 456 dma_addr_t mapping; 457 unsigned int length, pad = 0; 458 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 459 u16 prod, last_frag; 460 struct pci_dev *pdev = bp->pdev; 461 struct bnxt_tx_ring_info *txr; 462 struct bnxt_sw_tx_bd *tx_buf; 463 __le32 lflags = 0; 464 465 i = skb_get_queue_mapping(skb); 466 if (unlikely(i >= bp->tx_nr_rings)) { 467 dev_kfree_skb_any(skb); 468 dev_core_stats_tx_dropped_inc(dev); 469 return NETDEV_TX_OK; 470 } 471 472 txq = netdev_get_tx_queue(dev, i); 473 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 474 prod = txr->tx_prod; 475 476 free_size = bnxt_tx_avail(bp, txr); 477 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 478 /* We must have raced with NAPI cleanup */ 479 if (net_ratelimit() && txr->kick_pending) 480 netif_warn(bp, tx_err, dev, 481 "bnxt: ring busy w/ flush pending!\n"); 482 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 483 bp->tx_wake_thresh)) 484 return NETDEV_TX_BUSY; 485 } 486 487 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 488 goto tx_free; 489 490 length = skb->len; 491 len = skb_headlen(skb); 492 last_frag = skb_shinfo(skb)->nr_frags; 493 494 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 495 496 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 497 tx_buf->skb = skb; 498 tx_buf->nr_frags = last_frag; 499 500 vlan_tag_flags = 0; 501 cfa_action = bnxt_xmit_get_cfa_action(skb); 502 if (skb_vlan_tag_present(skb)) { 503 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 504 skb_vlan_tag_get(skb); 505 /* Currently supports 8021Q, 8021AD vlan offloads 506 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 507 */ 508 if (skb->vlan_proto == htons(ETH_P_8021Q)) 509 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 510 } 511 512 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 513 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 514 515 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && 516 atomic_dec_if_positive(&ptp->tx_avail) >= 0) { 517 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, 518 &ptp->tx_hdr_off)) { 519 if (vlan_tag_flags) 520 ptp->tx_hdr_off += VLAN_HLEN; 521 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 522 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 523 } else { 524 atomic_inc(&bp->ptp_cfg->tx_avail); 525 } 526 } 527 } 528 529 if (unlikely(skb->no_fcs)) 530 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 531 532 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 533 !lflags) { 534 struct tx_push_buffer *tx_push_buf = txr->tx_push; 535 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 536 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 537 void __iomem *db = txr->tx_db.doorbell; 538 void *pdata = tx_push_buf->data; 539 u64 *end; 540 int j, push_len; 541 542 /* Set COAL_NOW to be ready quickly for the next push */ 543 tx_push->tx_bd_len_flags_type = 544 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 545 TX_BD_TYPE_LONG_TX_BD | 546 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 547 TX_BD_FLAGS_COAL_NOW | 548 TX_BD_FLAGS_PACKET_END | 549 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 550 551 if (skb->ip_summed == CHECKSUM_PARTIAL) 552 tx_push1->tx_bd_hsize_lflags = 553 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 554 else 555 tx_push1->tx_bd_hsize_lflags = 0; 556 557 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 558 tx_push1->tx_bd_cfa_action = 559 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 560 561 end = pdata + length; 562 end = PTR_ALIGN(end, 8) - 1; 563 *end = 0; 564 565 skb_copy_from_linear_data(skb, pdata, len); 566 pdata += len; 567 for (j = 0; j < last_frag; j++) { 568 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 569 void *fptr; 570 571 fptr = skb_frag_address_safe(frag); 572 if (!fptr) 573 goto normal_tx; 574 575 memcpy(pdata, fptr, skb_frag_size(frag)); 576 pdata += skb_frag_size(frag); 577 } 578 579 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 580 txbd->tx_bd_haddr = txr->data_mapping; 581 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 582 prod = NEXT_TX(prod); 583 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 584 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 585 memcpy(txbd, tx_push1, sizeof(*txbd)); 586 prod = NEXT_TX(prod); 587 tx_push->doorbell = 588 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 589 DB_RING_IDX(&txr->tx_db, prod)); 590 WRITE_ONCE(txr->tx_prod, prod); 591 592 tx_buf->is_push = 1; 593 netdev_tx_sent_queue(txq, skb->len); 594 wmb(); /* Sync is_push and byte queue before pushing data */ 595 596 push_len = (length + sizeof(*tx_push) + 7) / 8; 597 if (push_len > 16) { 598 __iowrite64_copy(db, tx_push_buf, 16); 599 __iowrite32_copy(db + 4, tx_push_buf + 1, 600 (push_len - 16) << 1); 601 } else { 602 __iowrite64_copy(db, tx_push_buf, push_len); 603 } 604 605 goto tx_done; 606 } 607 608 normal_tx: 609 if (length < BNXT_MIN_PKT_SIZE) { 610 pad = BNXT_MIN_PKT_SIZE - length; 611 if (skb_pad(skb, pad)) 612 /* SKB already freed. */ 613 goto tx_kick_pending; 614 length = BNXT_MIN_PKT_SIZE; 615 } 616 617 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 618 619 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 620 goto tx_free; 621 622 dma_unmap_addr_set(tx_buf, mapping, mapping); 623 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 624 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 625 626 txbd->tx_bd_haddr = cpu_to_le64(mapping); 627 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 628 629 prod = NEXT_TX(prod); 630 txbd1 = (struct tx_bd_ext *) 631 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 632 633 txbd1->tx_bd_hsize_lflags = lflags; 634 if (skb_is_gso(skb)) { 635 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); 636 u32 hdr_len; 637 638 if (skb->encapsulation) { 639 if (udp_gso) 640 hdr_len = skb_inner_transport_offset(skb) + 641 sizeof(struct udphdr); 642 else 643 hdr_len = skb_inner_tcp_all_headers(skb); 644 } else if (udp_gso) { 645 hdr_len = skb_transport_offset(skb) + 646 sizeof(struct udphdr); 647 } else { 648 hdr_len = skb_tcp_all_headers(skb); 649 } 650 651 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 652 TX_BD_FLAGS_T_IPID | 653 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 654 length = skb_shinfo(skb)->gso_size; 655 txbd1->tx_bd_mss = cpu_to_le32(length); 656 length += hdr_len; 657 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 658 txbd1->tx_bd_hsize_lflags |= 659 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 660 txbd1->tx_bd_mss = 0; 661 } 662 663 length >>= 9; 664 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 665 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 666 skb->len); 667 i = 0; 668 goto tx_dma_error; 669 } 670 flags |= bnxt_lhint_arr[length]; 671 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 672 673 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 674 txbd1->tx_bd_cfa_action = 675 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 676 txbd0 = txbd; 677 for (i = 0; i < last_frag; i++) { 678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 679 680 prod = NEXT_TX(prod); 681 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 682 683 len = skb_frag_size(frag); 684 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 685 DMA_TO_DEVICE); 686 687 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 688 goto tx_dma_error; 689 690 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 691 dma_unmap_addr_set(tx_buf, mapping, mapping); 692 693 txbd->tx_bd_haddr = cpu_to_le64(mapping); 694 695 flags = len << TX_BD_LEN_SHIFT; 696 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 697 } 698 699 flags &= ~TX_BD_LEN; 700 txbd->tx_bd_len_flags_type = 701 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 702 TX_BD_FLAGS_PACKET_END); 703 704 netdev_tx_sent_queue(txq, skb->len); 705 706 skb_tx_timestamp(skb); 707 708 prod = NEXT_TX(prod); 709 WRITE_ONCE(txr->tx_prod, prod); 710 711 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 712 bnxt_txr_db_kick(bp, txr, prod); 713 } else { 714 if (free_size >= bp->tx_wake_thresh) 715 txbd0->tx_bd_len_flags_type |= 716 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 717 txr->kick_pending = 1; 718 } 719 720 tx_done: 721 722 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 723 if (netdev_xmit_more() && !tx_buf->is_push) { 724 txbd0->tx_bd_len_flags_type &= 725 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); 726 bnxt_txr_db_kick(bp, txr, prod); 727 } 728 729 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 730 bp->tx_wake_thresh); 731 } 732 return NETDEV_TX_OK; 733 734 tx_dma_error: 735 if (BNXT_TX_PTP_IS_SET(lflags)) 736 atomic_inc(&bp->ptp_cfg->tx_avail); 737 738 last_frag = i; 739 740 /* start back at beginning and unmap skb */ 741 prod = txr->tx_prod; 742 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 743 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 744 skb_headlen(skb), DMA_TO_DEVICE); 745 prod = NEXT_TX(prod); 746 747 /* unmap remaining mapped pages */ 748 for (i = 0; i < last_frag; i++) { 749 prod = NEXT_TX(prod); 750 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 751 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 752 skb_frag_size(&skb_shinfo(skb)->frags[i]), 753 DMA_TO_DEVICE); 754 } 755 756 tx_free: 757 dev_kfree_skb_any(skb); 758 tx_kick_pending: 759 if (txr->kick_pending) 760 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 761 txr->tx_buf_ring[txr->tx_prod].skb = NULL; 762 dev_core_stats_tx_dropped_inc(dev); 763 return NETDEV_TX_OK; 764 } 765 766 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 767 int budget) 768 { 769 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 770 struct pci_dev *pdev = bp->pdev; 771 u16 hw_cons = txr->tx_hw_cons; 772 unsigned int tx_bytes = 0; 773 u16 cons = txr->tx_cons; 774 int tx_pkts = 0; 775 776 while (RING_TX(bp, cons) != hw_cons) { 777 struct bnxt_sw_tx_bd *tx_buf; 778 struct sk_buff *skb; 779 int j, last; 780 781 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 782 cons = NEXT_TX(cons); 783 skb = tx_buf->skb; 784 tx_buf->skb = NULL; 785 786 if (unlikely(!skb)) { 787 bnxt_sched_reset_txr(bp, txr, cons); 788 return; 789 } 790 791 tx_pkts++; 792 tx_bytes += skb->len; 793 794 if (tx_buf->is_push) { 795 tx_buf->is_push = 0; 796 goto next_tx_int; 797 } 798 799 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 800 skb_headlen(skb), DMA_TO_DEVICE); 801 last = tx_buf->nr_frags; 802 803 for (j = 0; j < last; j++) { 804 cons = NEXT_TX(cons); 805 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 806 dma_unmap_page( 807 &pdev->dev, 808 dma_unmap_addr(tx_buf, mapping), 809 skb_frag_size(&skb_shinfo(skb)->frags[j]), 810 DMA_TO_DEVICE); 811 } 812 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 813 if (BNXT_CHIP_P5(bp)) { 814 /* PTP worker takes ownership of the skb */ 815 if (!bnxt_get_tx_ts_p5(bp, skb)) 816 skb = NULL; 817 else 818 atomic_inc(&bp->ptp_cfg->tx_avail); 819 } 820 } 821 822 next_tx_int: 823 cons = NEXT_TX(cons); 824 825 dev_consume_skb_any(skb); 826 } 827 828 WRITE_ONCE(txr->tx_cons, cons); 829 830 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 831 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 832 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 833 } 834 835 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 836 { 837 struct bnxt_tx_ring_info *txr; 838 int i; 839 840 bnxt_for_each_napi_tx(i, bnapi, txr) { 841 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) 842 __bnxt_tx_int(bp, txr, budget); 843 } 844 bnapi->events &= ~BNXT_TX_CMP_EVENT; 845 } 846 847 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 848 struct bnxt_rx_ring_info *rxr, 849 unsigned int *offset, 850 gfp_t gfp) 851 { 852 struct page *page; 853 854 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 855 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 856 BNXT_RX_PAGE_SIZE); 857 } else { 858 page = page_pool_dev_alloc_pages(rxr->page_pool); 859 *offset = 0; 860 } 861 if (!page) 862 return NULL; 863 864 *mapping = page_pool_get_dma_addr(page) + *offset; 865 return page; 866 } 867 868 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 869 gfp_t gfp) 870 { 871 u8 *data; 872 struct pci_dev *pdev = bp->pdev; 873 874 if (gfp == GFP_ATOMIC) 875 data = napi_alloc_frag(bp->rx_buf_size); 876 else 877 data = netdev_alloc_frag(bp->rx_buf_size); 878 if (!data) 879 return NULL; 880 881 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 882 bp->rx_buf_use_size, bp->rx_dir, 883 DMA_ATTR_WEAK_ORDERING); 884 885 if (dma_mapping_error(&pdev->dev, *mapping)) { 886 skb_free_frag(data); 887 data = NULL; 888 } 889 return data; 890 } 891 892 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 893 u16 prod, gfp_t gfp) 894 { 895 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 896 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 897 dma_addr_t mapping; 898 899 if (BNXT_RX_PAGE_MODE(bp)) { 900 unsigned int offset; 901 struct page *page = 902 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 903 904 if (!page) 905 return -ENOMEM; 906 907 mapping += bp->rx_dma_offset; 908 rx_buf->data = page; 909 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 910 } else { 911 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); 912 913 if (!data) 914 return -ENOMEM; 915 916 rx_buf->data = data; 917 rx_buf->data_ptr = data + bp->rx_offset; 918 } 919 rx_buf->mapping = mapping; 920 921 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 922 return 0; 923 } 924 925 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 926 { 927 u16 prod = rxr->rx_prod; 928 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 929 struct bnxt *bp = rxr->bnapi->bp; 930 struct rx_bd *cons_bd, *prod_bd; 931 932 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 933 cons_rx_buf = &rxr->rx_buf_ring[cons]; 934 935 prod_rx_buf->data = data; 936 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 937 938 prod_rx_buf->mapping = cons_rx_buf->mapping; 939 940 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 941 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 942 943 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 944 } 945 946 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 947 { 948 u16 next, max = rxr->rx_agg_bmap_size; 949 950 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 951 if (next >= max) 952 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 953 return next; 954 } 955 956 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 957 struct bnxt_rx_ring_info *rxr, 958 u16 prod, gfp_t gfp) 959 { 960 struct rx_bd *rxbd = 961 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 962 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 963 struct page *page; 964 dma_addr_t mapping; 965 u16 sw_prod = rxr->rx_sw_agg_prod; 966 unsigned int offset = 0; 967 968 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 969 970 if (!page) 971 return -ENOMEM; 972 973 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 974 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 975 976 __set_bit(sw_prod, rxr->rx_agg_bmap); 977 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 978 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 979 980 rx_agg_buf->page = page; 981 rx_agg_buf->offset = offset; 982 rx_agg_buf->mapping = mapping; 983 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 984 rxbd->rx_bd_opaque = sw_prod; 985 return 0; 986 } 987 988 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 989 struct bnxt_cp_ring_info *cpr, 990 u16 cp_cons, u16 curr) 991 { 992 struct rx_agg_cmp *agg; 993 994 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 995 agg = (struct rx_agg_cmp *) 996 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 997 return agg; 998 } 999 1000 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 1001 struct bnxt_rx_ring_info *rxr, 1002 u16 agg_id, u16 curr) 1003 { 1004 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 1005 1006 return &tpa_info->agg_arr[curr]; 1007 } 1008 1009 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 1010 u16 start, u32 agg_bufs, bool tpa) 1011 { 1012 struct bnxt_napi *bnapi = cpr->bnapi; 1013 struct bnxt *bp = bnapi->bp; 1014 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1015 u16 prod = rxr->rx_agg_prod; 1016 u16 sw_prod = rxr->rx_sw_agg_prod; 1017 bool p5_tpa = false; 1018 u32 i; 1019 1020 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1021 p5_tpa = true; 1022 1023 for (i = 0; i < agg_bufs; i++) { 1024 u16 cons; 1025 struct rx_agg_cmp *agg; 1026 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 1027 struct rx_bd *prod_bd; 1028 struct page *page; 1029 1030 if (p5_tpa) 1031 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 1032 else 1033 agg = bnxt_get_agg(bp, cpr, idx, start + i); 1034 cons = agg->rx_agg_cmp_opaque; 1035 __clear_bit(cons, rxr->rx_agg_bmap); 1036 1037 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1038 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1039 1040 __set_bit(sw_prod, rxr->rx_agg_bmap); 1041 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 1042 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1043 1044 /* It is possible for sw_prod to be equal to cons, so 1045 * set cons_rx_buf->page to NULL first. 1046 */ 1047 page = cons_rx_buf->page; 1048 cons_rx_buf->page = NULL; 1049 prod_rx_buf->page = page; 1050 prod_rx_buf->offset = cons_rx_buf->offset; 1051 1052 prod_rx_buf->mapping = cons_rx_buf->mapping; 1053 1054 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1055 1056 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 1057 prod_bd->rx_bd_opaque = sw_prod; 1058 1059 prod = NEXT_RX_AGG(prod); 1060 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1061 } 1062 rxr->rx_agg_prod = prod; 1063 rxr->rx_sw_agg_prod = sw_prod; 1064 } 1065 1066 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 1067 struct bnxt_rx_ring_info *rxr, 1068 u16 cons, void *data, u8 *data_ptr, 1069 dma_addr_t dma_addr, 1070 unsigned int offset_and_len) 1071 { 1072 unsigned int len = offset_and_len & 0xffff; 1073 struct page *page = data; 1074 u16 prod = rxr->rx_prod; 1075 struct sk_buff *skb; 1076 int err; 1077 1078 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1079 if (unlikely(err)) { 1080 bnxt_reuse_rx_data(rxr, cons, data); 1081 return NULL; 1082 } 1083 dma_addr -= bp->rx_dma_offset; 1084 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1085 bp->rx_dir); 1086 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1087 if (!skb) { 1088 page_pool_recycle_direct(rxr->page_pool, page); 1089 return NULL; 1090 } 1091 skb_mark_for_recycle(skb); 1092 skb_reserve(skb, bp->rx_offset); 1093 __skb_put(skb, len); 1094 1095 return skb; 1096 } 1097 1098 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1099 struct bnxt_rx_ring_info *rxr, 1100 u16 cons, void *data, u8 *data_ptr, 1101 dma_addr_t dma_addr, 1102 unsigned int offset_and_len) 1103 { 1104 unsigned int payload = offset_and_len >> 16; 1105 unsigned int len = offset_and_len & 0xffff; 1106 skb_frag_t *frag; 1107 struct page *page = data; 1108 u16 prod = rxr->rx_prod; 1109 struct sk_buff *skb; 1110 int off, err; 1111 1112 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1113 if (unlikely(err)) { 1114 bnxt_reuse_rx_data(rxr, cons, data); 1115 return NULL; 1116 } 1117 dma_addr -= bp->rx_dma_offset; 1118 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1119 bp->rx_dir); 1120 1121 if (unlikely(!payload)) 1122 payload = eth_get_headlen(bp->dev, data_ptr, len); 1123 1124 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1125 if (!skb) { 1126 page_pool_recycle_direct(rxr->page_pool, page); 1127 return NULL; 1128 } 1129 1130 skb_mark_for_recycle(skb); 1131 off = (void *)data_ptr - page_address(page); 1132 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1133 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1134 payload + NET_IP_ALIGN); 1135 1136 frag = &skb_shinfo(skb)->frags[0]; 1137 skb_frag_size_sub(frag, payload); 1138 skb_frag_off_add(frag, payload); 1139 skb->data_len -= payload; 1140 skb->tail += payload; 1141 1142 return skb; 1143 } 1144 1145 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1146 struct bnxt_rx_ring_info *rxr, u16 cons, 1147 void *data, u8 *data_ptr, 1148 dma_addr_t dma_addr, 1149 unsigned int offset_and_len) 1150 { 1151 u16 prod = rxr->rx_prod; 1152 struct sk_buff *skb; 1153 int err; 1154 1155 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1156 if (unlikely(err)) { 1157 bnxt_reuse_rx_data(rxr, cons, data); 1158 return NULL; 1159 } 1160 1161 skb = napi_build_skb(data, bp->rx_buf_size); 1162 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1163 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 1164 if (!skb) { 1165 skb_free_frag(data); 1166 return NULL; 1167 } 1168 1169 skb_reserve(skb, bp->rx_offset); 1170 skb_put(skb, offset_and_len & 0xffff); 1171 return skb; 1172 } 1173 1174 static u32 __bnxt_rx_agg_pages(struct bnxt *bp, 1175 struct bnxt_cp_ring_info *cpr, 1176 struct skb_shared_info *shinfo, 1177 u16 idx, u32 agg_bufs, bool tpa, 1178 struct xdp_buff *xdp) 1179 { 1180 struct bnxt_napi *bnapi = cpr->bnapi; 1181 struct pci_dev *pdev = bp->pdev; 1182 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1183 u16 prod = rxr->rx_agg_prod; 1184 u32 i, total_frag_len = 0; 1185 bool p5_tpa = false; 1186 1187 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1188 p5_tpa = true; 1189 1190 for (i = 0; i < agg_bufs; i++) { 1191 skb_frag_t *frag = &shinfo->frags[i]; 1192 u16 cons, frag_len; 1193 struct rx_agg_cmp *agg; 1194 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1195 struct page *page; 1196 dma_addr_t mapping; 1197 1198 if (p5_tpa) 1199 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1200 else 1201 agg = bnxt_get_agg(bp, cpr, idx, i); 1202 cons = agg->rx_agg_cmp_opaque; 1203 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1204 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1205 1206 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1207 skb_frag_fill_page_desc(frag, cons_rx_buf->page, 1208 cons_rx_buf->offset, frag_len); 1209 shinfo->nr_frags = i + 1; 1210 __clear_bit(cons, rxr->rx_agg_bmap); 1211 1212 /* It is possible for bnxt_alloc_rx_page() to allocate 1213 * a sw_prod index that equals the cons index, so we 1214 * need to clear the cons entry now. 1215 */ 1216 mapping = cons_rx_buf->mapping; 1217 page = cons_rx_buf->page; 1218 cons_rx_buf->page = NULL; 1219 1220 if (xdp && page_is_pfmemalloc(page)) 1221 xdp_buff_set_frag_pfmemalloc(xdp); 1222 1223 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1224 --shinfo->nr_frags; 1225 cons_rx_buf->page = page; 1226 1227 /* Update prod since possibly some pages have been 1228 * allocated already. 1229 */ 1230 rxr->rx_agg_prod = prod; 1231 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1232 return 0; 1233 } 1234 1235 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1236 bp->rx_dir); 1237 1238 total_frag_len += frag_len; 1239 prod = NEXT_RX_AGG(prod); 1240 } 1241 rxr->rx_agg_prod = prod; 1242 return total_frag_len; 1243 } 1244 1245 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, 1246 struct bnxt_cp_ring_info *cpr, 1247 struct sk_buff *skb, u16 idx, 1248 u32 agg_bufs, bool tpa) 1249 { 1250 struct skb_shared_info *shinfo = skb_shinfo(skb); 1251 u32 total_frag_len = 0; 1252 1253 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, 1254 agg_bufs, tpa, NULL); 1255 if (!total_frag_len) { 1256 skb_mark_for_recycle(skb); 1257 dev_kfree_skb(skb); 1258 return NULL; 1259 } 1260 1261 skb->data_len += total_frag_len; 1262 skb->len += total_frag_len; 1263 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; 1264 return skb; 1265 } 1266 1267 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, 1268 struct bnxt_cp_ring_info *cpr, 1269 struct xdp_buff *xdp, u16 idx, 1270 u32 agg_bufs, bool tpa) 1271 { 1272 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1273 u32 total_frag_len = 0; 1274 1275 if (!xdp_buff_has_frags(xdp)) 1276 shinfo->nr_frags = 0; 1277 1278 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, 1279 idx, agg_bufs, tpa, xdp); 1280 if (total_frag_len) { 1281 xdp_buff_set_frags_flag(xdp); 1282 shinfo->nr_frags = agg_bufs; 1283 shinfo->xdp_frags_size = total_frag_len; 1284 } 1285 return total_frag_len; 1286 } 1287 1288 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1289 u8 agg_bufs, u32 *raw_cons) 1290 { 1291 u16 last; 1292 struct rx_agg_cmp *agg; 1293 1294 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1295 last = RING_CMP(*raw_cons); 1296 agg = (struct rx_agg_cmp *) 1297 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1298 return RX_AGG_CMP_VALID(agg, *raw_cons); 1299 } 1300 1301 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, 1302 unsigned int len, 1303 dma_addr_t mapping) 1304 { 1305 struct bnxt *bp = bnapi->bp; 1306 struct pci_dev *pdev = bp->pdev; 1307 struct sk_buff *skb; 1308 1309 skb = napi_alloc_skb(&bnapi->napi, len); 1310 if (!skb) 1311 return NULL; 1312 1313 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1314 bp->rx_dir); 1315 1316 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1317 len + NET_IP_ALIGN); 1318 1319 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1320 bp->rx_dir); 1321 1322 skb_put(skb, len); 1323 1324 return skb; 1325 } 1326 1327 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1328 unsigned int len, 1329 dma_addr_t mapping) 1330 { 1331 return bnxt_copy_data(bnapi, data, len, mapping); 1332 } 1333 1334 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, 1335 struct xdp_buff *xdp, 1336 unsigned int len, 1337 dma_addr_t mapping) 1338 { 1339 unsigned int metasize = 0; 1340 u8 *data = xdp->data; 1341 struct sk_buff *skb; 1342 1343 len = xdp->data_end - xdp->data_meta; 1344 metasize = xdp->data - xdp->data_meta; 1345 data = xdp->data_meta; 1346 1347 skb = bnxt_copy_data(bnapi, data, len, mapping); 1348 if (!skb) 1349 return skb; 1350 1351 if (metasize) { 1352 skb_metadata_set(skb, metasize); 1353 __skb_pull(skb, metasize); 1354 } 1355 1356 return skb; 1357 } 1358 1359 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1360 u32 *raw_cons, void *cmp) 1361 { 1362 struct rx_cmp *rxcmp = cmp; 1363 u32 tmp_raw_cons = *raw_cons; 1364 u8 cmp_type, agg_bufs = 0; 1365 1366 cmp_type = RX_CMP_TYPE(rxcmp); 1367 1368 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1369 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1370 RX_CMP_AGG_BUFS) >> 1371 RX_CMP_AGG_BUFS_SHIFT; 1372 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1373 struct rx_tpa_end_cmp *tpa_end = cmp; 1374 1375 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1376 return 0; 1377 1378 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1379 } 1380 1381 if (agg_bufs) { 1382 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1383 return -EBUSY; 1384 } 1385 *raw_cons = tmp_raw_cons; 1386 return 0; 1387 } 1388 1389 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1390 { 1391 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1392 u16 idx = agg_id & MAX_TPA_P5_MASK; 1393 1394 if (test_bit(idx, map->agg_idx_bmap)) 1395 idx = find_first_zero_bit(map->agg_idx_bmap, 1396 BNXT_AGG_IDX_BMAP_SIZE); 1397 __set_bit(idx, map->agg_idx_bmap); 1398 map->agg_id_tbl[agg_id] = idx; 1399 return idx; 1400 } 1401 1402 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1403 { 1404 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1405 1406 __clear_bit(idx, map->agg_idx_bmap); 1407 } 1408 1409 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1410 { 1411 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1412 1413 return map->agg_id_tbl[agg_id]; 1414 } 1415 1416 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, 1417 struct rx_tpa_start_cmp *tpa_start, 1418 struct rx_tpa_start_cmp_ext *tpa_start1) 1419 { 1420 tpa_info->cfa_code_valid = 1; 1421 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1422 tpa_info->vlan_valid = 0; 1423 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1424 tpa_info->vlan_valid = 1; 1425 tpa_info->metadata = 1426 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1427 } 1428 } 1429 1430 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, 1431 struct rx_tpa_start_cmp *tpa_start, 1432 struct rx_tpa_start_cmp_ext *tpa_start1) 1433 { 1434 tpa_info->vlan_valid = 0; 1435 if (TPA_START_VLAN_VALID(tpa_start)) { 1436 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 1437 u32 vlan_proto = ETH_P_8021Q; 1438 1439 tpa_info->vlan_valid = 1; 1440 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 1441 vlan_proto = ETH_P_8021AD; 1442 tpa_info->metadata = vlan_proto << 16 | 1443 TPA_START_METADATA0_TCI(tpa_start1); 1444 } 1445 } 1446 1447 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1448 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 1449 struct rx_tpa_start_cmp_ext *tpa_start1) 1450 { 1451 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1452 struct bnxt_tpa_info *tpa_info; 1453 u16 cons, prod, agg_id; 1454 struct rx_bd *prod_bd; 1455 dma_addr_t mapping; 1456 1457 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1458 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1459 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1460 } else { 1461 agg_id = TPA_START_AGG_ID(tpa_start); 1462 } 1463 cons = tpa_start->rx_tpa_start_cmp_opaque; 1464 prod = rxr->rx_prod; 1465 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1466 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1467 tpa_info = &rxr->rx_tpa[agg_id]; 1468 1469 if (unlikely(cons != rxr->rx_next_cons || 1470 TPA_START_ERROR(tpa_start))) { 1471 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1472 cons, rxr->rx_next_cons, 1473 TPA_START_ERROR_CODE(tpa_start1)); 1474 bnxt_sched_reset_rxr(bp, rxr); 1475 return; 1476 } 1477 prod_rx_buf->data = tpa_info->data; 1478 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1479 1480 mapping = tpa_info->mapping; 1481 prod_rx_buf->mapping = mapping; 1482 1483 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1484 1485 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1486 1487 tpa_info->data = cons_rx_buf->data; 1488 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1489 cons_rx_buf->data = NULL; 1490 tpa_info->mapping = cons_rx_buf->mapping; 1491 1492 tpa_info->len = 1493 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1494 RX_TPA_START_CMP_LEN_SHIFT; 1495 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1496 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1497 tpa_info->gso_type = SKB_GSO_TCPV4; 1498 if (TPA_START_IS_IPV6(tpa_start1)) 1499 tpa_info->gso_type = SKB_GSO_TCPV6; 1500 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1501 else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && 1502 TPA_START_HASH_TYPE(tpa_start) == 3) 1503 tpa_info->gso_type = SKB_GSO_TCPV6; 1504 tpa_info->rss_hash = 1505 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1506 } else { 1507 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1508 tpa_info->gso_type = 0; 1509 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1510 } 1511 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1512 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1513 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 1514 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); 1515 else 1516 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 1517 tpa_info->agg_count = 0; 1518 1519 rxr->rx_prod = NEXT_RX(prod); 1520 cons = RING_RX(bp, NEXT_RX(cons)); 1521 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1522 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1523 1524 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1525 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1526 cons_rx_buf->data = NULL; 1527 } 1528 1529 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1530 { 1531 if (agg_bufs) 1532 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1533 } 1534 1535 #ifdef CONFIG_INET 1536 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1537 { 1538 struct udphdr *uh = NULL; 1539 1540 if (ip_proto == htons(ETH_P_IP)) { 1541 struct iphdr *iph = (struct iphdr *)skb->data; 1542 1543 if (iph->protocol == IPPROTO_UDP) 1544 uh = (struct udphdr *)(iph + 1); 1545 } else { 1546 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1547 1548 if (iph->nexthdr == IPPROTO_UDP) 1549 uh = (struct udphdr *)(iph + 1); 1550 } 1551 if (uh) { 1552 if (uh->check) 1553 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1554 else 1555 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1556 } 1557 } 1558 #endif 1559 1560 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1561 int payload_off, int tcp_ts, 1562 struct sk_buff *skb) 1563 { 1564 #ifdef CONFIG_INET 1565 struct tcphdr *th; 1566 int len, nw_off; 1567 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1568 u32 hdr_info = tpa_info->hdr_info; 1569 bool loopback = false; 1570 1571 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1572 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1573 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1574 1575 /* If the packet is an internal loopback packet, the offsets will 1576 * have an extra 4 bytes. 1577 */ 1578 if (inner_mac_off == 4) { 1579 loopback = true; 1580 } else if (inner_mac_off > 4) { 1581 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1582 ETH_HLEN - 2)); 1583 1584 /* We only support inner iPv4/ipv6. If we don't see the 1585 * correct protocol ID, it must be a loopback packet where 1586 * the offsets are off by 4. 1587 */ 1588 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1589 loopback = true; 1590 } 1591 if (loopback) { 1592 /* internal loopback packet, subtract all offsets by 4 */ 1593 inner_ip_off -= 4; 1594 inner_mac_off -= 4; 1595 outer_ip_off -= 4; 1596 } 1597 1598 nw_off = inner_ip_off - ETH_HLEN; 1599 skb_set_network_header(skb, nw_off); 1600 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1601 struct ipv6hdr *iph = ipv6_hdr(skb); 1602 1603 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1604 len = skb->len - skb_transport_offset(skb); 1605 th = tcp_hdr(skb); 1606 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1607 } else { 1608 struct iphdr *iph = ip_hdr(skb); 1609 1610 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1611 len = skb->len - skb_transport_offset(skb); 1612 th = tcp_hdr(skb); 1613 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1614 } 1615 1616 if (inner_mac_off) { /* tunnel */ 1617 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1618 ETH_HLEN - 2)); 1619 1620 bnxt_gro_tunnel(skb, proto); 1621 } 1622 #endif 1623 return skb; 1624 } 1625 1626 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1627 int payload_off, int tcp_ts, 1628 struct sk_buff *skb) 1629 { 1630 #ifdef CONFIG_INET 1631 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1632 u32 hdr_info = tpa_info->hdr_info; 1633 int iphdr_len, nw_off; 1634 1635 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1636 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1637 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1638 1639 nw_off = inner_ip_off - ETH_HLEN; 1640 skb_set_network_header(skb, nw_off); 1641 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1642 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1643 skb_set_transport_header(skb, nw_off + iphdr_len); 1644 1645 if (inner_mac_off) { /* tunnel */ 1646 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1647 ETH_HLEN - 2)); 1648 1649 bnxt_gro_tunnel(skb, proto); 1650 } 1651 #endif 1652 return skb; 1653 } 1654 1655 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1656 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1657 1658 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1659 int payload_off, int tcp_ts, 1660 struct sk_buff *skb) 1661 { 1662 #ifdef CONFIG_INET 1663 struct tcphdr *th; 1664 int len, nw_off, tcp_opt_len = 0; 1665 1666 if (tcp_ts) 1667 tcp_opt_len = 12; 1668 1669 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1670 struct iphdr *iph; 1671 1672 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1673 ETH_HLEN; 1674 skb_set_network_header(skb, nw_off); 1675 iph = ip_hdr(skb); 1676 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1677 len = skb->len - skb_transport_offset(skb); 1678 th = tcp_hdr(skb); 1679 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1680 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1681 struct ipv6hdr *iph; 1682 1683 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1684 ETH_HLEN; 1685 skb_set_network_header(skb, nw_off); 1686 iph = ipv6_hdr(skb); 1687 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1688 len = skb->len - skb_transport_offset(skb); 1689 th = tcp_hdr(skb); 1690 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1691 } else { 1692 dev_kfree_skb_any(skb); 1693 return NULL; 1694 } 1695 1696 if (nw_off) /* tunnel */ 1697 bnxt_gro_tunnel(skb, skb->protocol); 1698 #endif 1699 return skb; 1700 } 1701 1702 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1703 struct bnxt_tpa_info *tpa_info, 1704 struct rx_tpa_end_cmp *tpa_end, 1705 struct rx_tpa_end_cmp_ext *tpa_end1, 1706 struct sk_buff *skb) 1707 { 1708 #ifdef CONFIG_INET 1709 int payload_off; 1710 u16 segs; 1711 1712 segs = TPA_END_TPA_SEGS(tpa_end); 1713 if (segs == 1) 1714 return skb; 1715 1716 NAPI_GRO_CB(skb)->count = segs; 1717 skb_shinfo(skb)->gso_size = 1718 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1719 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1720 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1721 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1722 else 1723 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1724 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1725 if (likely(skb)) 1726 tcp_gro_complete(skb); 1727 #endif 1728 return skb; 1729 } 1730 1731 /* Given the cfa_code of a received packet determine which 1732 * netdev (vf-rep or PF) the packet is destined to. 1733 */ 1734 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1735 { 1736 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1737 1738 /* if vf-rep dev is NULL, the must belongs to the PF */ 1739 return dev ? dev : bp->dev; 1740 } 1741 1742 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1743 struct bnxt_cp_ring_info *cpr, 1744 u32 *raw_cons, 1745 struct rx_tpa_end_cmp *tpa_end, 1746 struct rx_tpa_end_cmp_ext *tpa_end1, 1747 u8 *event) 1748 { 1749 struct bnxt_napi *bnapi = cpr->bnapi; 1750 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1751 struct net_device *dev = bp->dev; 1752 u8 *data_ptr, agg_bufs; 1753 unsigned int len; 1754 struct bnxt_tpa_info *tpa_info; 1755 dma_addr_t mapping; 1756 struct sk_buff *skb; 1757 u16 idx = 0, agg_id; 1758 void *data; 1759 bool gro; 1760 1761 if (unlikely(bnapi->in_reset)) { 1762 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1763 1764 if (rc < 0) 1765 return ERR_PTR(-EBUSY); 1766 return NULL; 1767 } 1768 1769 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1770 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1771 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1772 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1773 tpa_info = &rxr->rx_tpa[agg_id]; 1774 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1775 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1776 agg_bufs, tpa_info->agg_count); 1777 agg_bufs = tpa_info->agg_count; 1778 } 1779 tpa_info->agg_count = 0; 1780 *event |= BNXT_AGG_EVENT; 1781 bnxt_free_agg_idx(rxr, agg_id); 1782 idx = agg_id; 1783 gro = !!(bp->flags & BNXT_FLAG_GRO); 1784 } else { 1785 agg_id = TPA_END_AGG_ID(tpa_end); 1786 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1787 tpa_info = &rxr->rx_tpa[agg_id]; 1788 idx = RING_CMP(*raw_cons); 1789 if (agg_bufs) { 1790 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1791 return ERR_PTR(-EBUSY); 1792 1793 *event |= BNXT_AGG_EVENT; 1794 idx = NEXT_CMP(idx); 1795 } 1796 gro = !!TPA_END_GRO(tpa_end); 1797 } 1798 data = tpa_info->data; 1799 data_ptr = tpa_info->data_ptr; 1800 prefetch(data_ptr); 1801 len = tpa_info->len; 1802 mapping = tpa_info->mapping; 1803 1804 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1805 bnxt_abort_tpa(cpr, idx, agg_bufs); 1806 if (agg_bufs > MAX_SKB_FRAGS) 1807 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1808 agg_bufs, (int)MAX_SKB_FRAGS); 1809 return NULL; 1810 } 1811 1812 if (len <= bp->rx_copy_thresh) { 1813 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1814 if (!skb) { 1815 bnxt_abort_tpa(cpr, idx, agg_bufs); 1816 cpr->sw_stats->rx.rx_oom_discards += 1; 1817 return NULL; 1818 } 1819 } else { 1820 u8 *new_data; 1821 dma_addr_t new_mapping; 1822 1823 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); 1824 if (!new_data) { 1825 bnxt_abort_tpa(cpr, idx, agg_bufs); 1826 cpr->sw_stats->rx.rx_oom_discards += 1; 1827 return NULL; 1828 } 1829 1830 tpa_info->data = new_data; 1831 tpa_info->data_ptr = new_data + bp->rx_offset; 1832 tpa_info->mapping = new_mapping; 1833 1834 skb = napi_build_skb(data, bp->rx_buf_size); 1835 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1836 bp->rx_buf_use_size, bp->rx_dir, 1837 DMA_ATTR_WEAK_ORDERING); 1838 1839 if (!skb) { 1840 skb_free_frag(data); 1841 bnxt_abort_tpa(cpr, idx, agg_bufs); 1842 cpr->sw_stats->rx.rx_oom_discards += 1; 1843 return NULL; 1844 } 1845 skb_reserve(skb, bp->rx_offset); 1846 skb_put(skb, len); 1847 } 1848 1849 if (agg_bufs) { 1850 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); 1851 if (!skb) { 1852 /* Page reuse already handled by bnxt_rx_pages(). */ 1853 cpr->sw_stats->rx.rx_oom_discards += 1; 1854 return NULL; 1855 } 1856 } 1857 1858 if (tpa_info->cfa_code_valid) 1859 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); 1860 skb->protocol = eth_type_trans(skb, dev); 1861 1862 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1863 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1864 1865 if (tpa_info->vlan_valid && 1866 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1867 __be16 vlan_proto = htons(tpa_info->metadata >> 1868 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1869 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1870 1871 if (eth_type_vlan(vlan_proto)) { 1872 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1873 } else { 1874 dev_kfree_skb(skb); 1875 return NULL; 1876 } 1877 } 1878 1879 skb_checksum_none_assert(skb); 1880 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1881 skb->ip_summed = CHECKSUM_UNNECESSARY; 1882 skb->csum_level = 1883 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1884 } 1885 1886 if (gro) 1887 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1888 1889 return skb; 1890 } 1891 1892 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1893 struct rx_agg_cmp *rx_agg) 1894 { 1895 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1896 struct bnxt_tpa_info *tpa_info; 1897 1898 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1899 tpa_info = &rxr->rx_tpa[agg_id]; 1900 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1901 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1902 } 1903 1904 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1905 struct sk_buff *skb) 1906 { 1907 skb_mark_for_recycle(skb); 1908 1909 if (skb->dev != bp->dev) { 1910 /* this packet belongs to a vf-rep */ 1911 bnxt_vf_rep_rx(bp, skb); 1912 return; 1913 } 1914 skb_record_rx_queue(skb, bnapi->index); 1915 napi_gro_receive(&bnapi->napi, skb); 1916 } 1917 1918 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, 1919 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) 1920 { 1921 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 1922 1923 if (BNXT_PTP_RX_TS_VALID(flags)) 1924 goto ts_valid; 1925 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) 1926 return false; 1927 1928 ts_valid: 1929 *cmpl_ts = ts; 1930 return true; 1931 } 1932 1933 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, 1934 struct rx_cmp *rxcmp, 1935 struct rx_cmp_ext *rxcmp1) 1936 { 1937 __be16 vlan_proto; 1938 u16 vtag; 1939 1940 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1941 __le32 flags2 = rxcmp1->rx_cmp_flags2; 1942 u32 meta_data; 1943 1944 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 1945 return skb; 1946 1947 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1948 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1949 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 1950 if (eth_type_vlan(vlan_proto)) 1951 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1952 else 1953 goto vlan_err; 1954 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 1955 if (RX_CMP_VLAN_VALID(rxcmp)) { 1956 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 1957 1958 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 1959 vlan_proto = htons(ETH_P_8021Q); 1960 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 1961 vlan_proto = htons(ETH_P_8021AD); 1962 else 1963 goto vlan_err; 1964 vtag = RX_CMP_METADATA0_TCI(rxcmp1); 1965 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1966 } 1967 } 1968 return skb; 1969 vlan_err: 1970 dev_kfree_skb(skb); 1971 return NULL; 1972 } 1973 1974 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, 1975 struct rx_cmp *rxcmp) 1976 { 1977 u8 ext_op; 1978 1979 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); 1980 switch (ext_op) { 1981 case EXT_OP_INNER_4: 1982 case EXT_OP_OUTER_4: 1983 case EXT_OP_INNFL_3: 1984 case EXT_OP_OUTFL_3: 1985 return PKT_HASH_TYPE_L4; 1986 default: 1987 return PKT_HASH_TYPE_L3; 1988 } 1989 } 1990 1991 /* returns the following: 1992 * 1 - 1 packet successfully received 1993 * 0 - successful TPA_START, packet not completed yet 1994 * -EBUSY - completion ring does not have all the agg buffers yet 1995 * -ENOMEM - packet aborted due to out of memory 1996 * -EIO - packet aborted due to hw error indicated in BD 1997 */ 1998 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1999 u32 *raw_cons, u8 *event) 2000 { 2001 struct bnxt_napi *bnapi = cpr->bnapi; 2002 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2003 struct net_device *dev = bp->dev; 2004 struct rx_cmp *rxcmp; 2005 struct rx_cmp_ext *rxcmp1; 2006 u32 tmp_raw_cons = *raw_cons; 2007 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 2008 struct bnxt_sw_rx_bd *rx_buf; 2009 unsigned int len; 2010 u8 *data_ptr, agg_bufs, cmp_type; 2011 bool xdp_active = false; 2012 dma_addr_t dma_addr; 2013 struct sk_buff *skb; 2014 struct xdp_buff xdp; 2015 u32 flags, misc; 2016 u32 cmpl_ts; 2017 void *data; 2018 int rc = 0; 2019 2020 rxcmp = (struct rx_cmp *) 2021 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2022 2023 cmp_type = RX_CMP_TYPE(rxcmp); 2024 2025 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 2026 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 2027 goto next_rx_no_prod_no_len; 2028 } 2029 2030 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2031 cp_cons = RING_CMP(tmp_raw_cons); 2032 rxcmp1 = (struct rx_cmp_ext *) 2033 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2034 2035 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2036 return -EBUSY; 2037 2038 /* The valid test of the entry must be done first before 2039 * reading any further. 2040 */ 2041 dma_rmb(); 2042 prod = rxr->rx_prod; 2043 2044 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 2045 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2046 bnxt_tpa_start(bp, rxr, cmp_type, 2047 (struct rx_tpa_start_cmp *)rxcmp, 2048 (struct rx_tpa_start_cmp_ext *)rxcmp1); 2049 2050 *event |= BNXT_RX_EVENT; 2051 goto next_rx_no_prod_no_len; 2052 2053 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2054 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 2055 (struct rx_tpa_end_cmp *)rxcmp, 2056 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 2057 2058 if (IS_ERR(skb)) 2059 return -EBUSY; 2060 2061 rc = -ENOMEM; 2062 if (likely(skb)) { 2063 bnxt_deliver_skb(bp, bnapi, skb); 2064 rc = 1; 2065 } 2066 *event |= BNXT_RX_EVENT; 2067 goto next_rx_no_prod_no_len; 2068 } 2069 2070 cons = rxcmp->rx_cmp_opaque; 2071 if (unlikely(cons != rxr->rx_next_cons)) { 2072 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 2073 2074 /* 0xffff is forced error, don't print it */ 2075 if (rxr->rx_next_cons != 0xffff) 2076 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 2077 cons, rxr->rx_next_cons); 2078 bnxt_sched_reset_rxr(bp, rxr); 2079 if (rc1) 2080 return rc1; 2081 goto next_rx_no_prod_no_len; 2082 } 2083 rx_buf = &rxr->rx_buf_ring[cons]; 2084 data = rx_buf->data; 2085 data_ptr = rx_buf->data_ptr; 2086 prefetch(data_ptr); 2087 2088 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 2089 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 2090 2091 if (agg_bufs) { 2092 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 2093 return -EBUSY; 2094 2095 cp_cons = NEXT_CMP(cp_cons); 2096 *event |= BNXT_AGG_EVENT; 2097 } 2098 *event |= BNXT_RX_EVENT; 2099 2100 rx_buf->data = NULL; 2101 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 2102 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 2103 2104 bnxt_reuse_rx_data(rxr, cons, data); 2105 if (agg_bufs) 2106 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 2107 false); 2108 2109 rc = -EIO; 2110 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 2111 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; 2112 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 2113 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 2114 netdev_warn_once(bp->dev, "RX buffer error %x\n", 2115 rx_err); 2116 bnxt_sched_reset_rxr(bp, rxr); 2117 } 2118 } 2119 goto next_rx_no_len; 2120 } 2121 2122 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 2123 len = flags >> RX_CMP_LEN_SHIFT; 2124 dma_addr = rx_buf->mapping; 2125 2126 if (bnxt_xdp_attached(bp, rxr)) { 2127 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 2128 if (agg_bufs) { 2129 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, 2130 cp_cons, agg_bufs, 2131 false); 2132 if (!frag_len) 2133 goto oom_next_rx; 2134 } 2135 xdp_active = true; 2136 } 2137 2138 if (xdp_active) { 2139 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { 2140 rc = 1; 2141 goto next_rx; 2142 } 2143 } 2144 2145 if (len <= bp->rx_copy_thresh) { 2146 if (!xdp_active) 2147 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 2148 else 2149 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); 2150 bnxt_reuse_rx_data(rxr, cons, data); 2151 if (!skb) { 2152 if (agg_bufs) { 2153 if (!xdp_active) 2154 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 2155 agg_bufs, false); 2156 else 2157 bnxt_xdp_buff_frags_free(rxr, &xdp); 2158 } 2159 goto oom_next_rx; 2160 } 2161 } else { 2162 u32 payload; 2163 2164 if (rx_buf->data_ptr == data_ptr) 2165 payload = misc & RX_CMP_PAYLOAD_OFFSET; 2166 else 2167 payload = 0; 2168 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 2169 payload | len); 2170 if (!skb) 2171 goto oom_next_rx; 2172 } 2173 2174 if (agg_bufs) { 2175 if (!xdp_active) { 2176 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); 2177 if (!skb) 2178 goto oom_next_rx; 2179 } else { 2180 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); 2181 if (!skb) { 2182 /* we should be able to free the old skb here */ 2183 bnxt_xdp_buff_frags_free(rxr, &xdp); 2184 goto oom_next_rx; 2185 } 2186 } 2187 } 2188 2189 if (RX_CMP_HASH_VALID(rxcmp)) { 2190 enum pkt_hash_types type; 2191 2192 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2193 type = bnxt_rss_ext_op(bp, rxcmp); 2194 } else { 2195 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 2196 2197 /* RSS profiles 1 and 3 with extract code 0 for inner 2198 * 4-tuple 2199 */ 2200 if (hash_type != 1 && hash_type != 3) 2201 type = PKT_HASH_TYPE_L3; 2202 else 2203 type = PKT_HASH_TYPE_L4; 2204 } 2205 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 2206 } 2207 2208 if (cmp_type == CMP_TYPE_RX_L2_CMP) 2209 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); 2210 skb->protocol = eth_type_trans(skb, dev); 2211 2212 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { 2213 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 2214 if (!skb) 2215 goto next_rx; 2216 } 2217 2218 skb_checksum_none_assert(skb); 2219 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2220 if (dev->features & NETIF_F_RXCSUM) { 2221 skb->ip_summed = CHECKSUM_UNNECESSARY; 2222 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2223 } 2224 } else { 2225 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2226 if (dev->features & NETIF_F_RXCSUM) 2227 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; 2228 } 2229 } 2230 2231 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { 2232 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2233 u64 ns, ts; 2234 2235 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2236 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2237 2238 spin_lock_bh(&ptp->ptp_lock); 2239 ns = timecounter_cyc2time(&ptp->tc, ts); 2240 spin_unlock_bh(&ptp->ptp_lock); 2241 memset(skb_hwtstamps(skb), 0, 2242 sizeof(*skb_hwtstamps(skb))); 2243 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2244 } 2245 } 2246 } 2247 bnxt_deliver_skb(bp, bnapi, skb); 2248 rc = 1; 2249 2250 next_rx: 2251 cpr->rx_packets += 1; 2252 cpr->rx_bytes += len; 2253 2254 next_rx_no_len: 2255 rxr->rx_prod = NEXT_RX(prod); 2256 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2257 2258 next_rx_no_prod_no_len: 2259 *raw_cons = tmp_raw_cons; 2260 2261 return rc; 2262 2263 oom_next_rx: 2264 cpr->sw_stats->rx.rx_oom_discards += 1; 2265 rc = -ENOMEM; 2266 goto next_rx; 2267 } 2268 2269 /* In netpoll mode, if we are using a combined completion ring, we need to 2270 * discard the rx packets and recycle the buffers. 2271 */ 2272 static int bnxt_force_rx_discard(struct bnxt *bp, 2273 struct bnxt_cp_ring_info *cpr, 2274 u32 *raw_cons, u8 *event) 2275 { 2276 u32 tmp_raw_cons = *raw_cons; 2277 struct rx_cmp_ext *rxcmp1; 2278 struct rx_cmp *rxcmp; 2279 u16 cp_cons; 2280 u8 cmp_type; 2281 int rc; 2282 2283 cp_cons = RING_CMP(tmp_raw_cons); 2284 rxcmp = (struct rx_cmp *) 2285 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2286 2287 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2288 cp_cons = RING_CMP(tmp_raw_cons); 2289 rxcmp1 = (struct rx_cmp_ext *) 2290 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2291 2292 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2293 return -EBUSY; 2294 2295 /* The valid test of the entry must be done first before 2296 * reading any further. 2297 */ 2298 dma_rmb(); 2299 cmp_type = RX_CMP_TYPE(rxcmp); 2300 if (cmp_type == CMP_TYPE_RX_L2_CMP || 2301 cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2302 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2303 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2304 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2305 struct rx_tpa_end_cmp_ext *tpa_end1; 2306 2307 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2308 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2309 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2310 } 2311 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2312 if (rc && rc != -EBUSY) 2313 cpr->sw_stats->rx.rx_netpoll_discards += 1; 2314 return rc; 2315 } 2316 2317 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2318 { 2319 struct bnxt_fw_health *fw_health = bp->fw_health; 2320 u32 reg = fw_health->regs[reg_idx]; 2321 u32 reg_type, reg_off, val = 0; 2322 2323 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2324 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2325 switch (reg_type) { 2326 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2327 pci_read_config_dword(bp->pdev, reg_off, &val); 2328 break; 2329 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2330 reg_off = fw_health->mapped_regs[reg_idx]; 2331 fallthrough; 2332 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2333 val = readl(bp->bar0 + reg_off); 2334 break; 2335 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2336 val = readl(bp->bar1 + reg_off); 2337 break; 2338 } 2339 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2340 val &= fw_health->fw_reset_inprog_reg_mask; 2341 return val; 2342 } 2343 2344 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2345 { 2346 int i; 2347 2348 for (i = 0; i < bp->rx_nr_rings; i++) { 2349 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2350 struct bnxt_ring_grp_info *grp_info; 2351 2352 grp_info = &bp->grp_info[grp_idx]; 2353 if (grp_info->agg_fw_ring_id == ring_id) 2354 return grp_idx; 2355 } 2356 return INVALID_HW_RING_ID; 2357 } 2358 2359 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2360 { 2361 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2362 2363 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 2364 return link_info->force_link_speed2; 2365 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2366 return link_info->force_pam4_link_speed; 2367 return link_info->force_link_speed; 2368 } 2369 2370 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2371 { 2372 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2373 2374 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2375 link_info->req_link_speed = link_info->force_link_speed2; 2376 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2377 switch (link_info->req_link_speed) { 2378 case BNXT_LINK_SPEED_50GB_PAM4: 2379 case BNXT_LINK_SPEED_100GB_PAM4: 2380 case BNXT_LINK_SPEED_200GB_PAM4: 2381 case BNXT_LINK_SPEED_400GB_PAM4: 2382 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2383 break; 2384 case BNXT_LINK_SPEED_100GB_PAM4_112: 2385 case BNXT_LINK_SPEED_200GB_PAM4_112: 2386 case BNXT_LINK_SPEED_400GB_PAM4_112: 2387 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; 2388 break; 2389 default: 2390 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2391 } 2392 return; 2393 } 2394 link_info->req_link_speed = link_info->force_link_speed; 2395 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2396 if (link_info->force_pam4_link_speed) { 2397 link_info->req_link_speed = link_info->force_pam4_link_speed; 2398 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2399 } 2400 } 2401 2402 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2403 { 2404 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2405 2406 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2407 link_info->advertising = link_info->auto_link_speeds2; 2408 return; 2409 } 2410 link_info->advertising = link_info->auto_link_speeds; 2411 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2412 } 2413 2414 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2415 { 2416 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2417 2418 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2419 if (link_info->req_link_speed != link_info->force_link_speed2) 2420 return true; 2421 return false; 2422 } 2423 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2424 link_info->req_link_speed != link_info->force_link_speed) 2425 return true; 2426 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2427 link_info->req_link_speed != link_info->force_pam4_link_speed) 2428 return true; 2429 return false; 2430 } 2431 2432 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2433 { 2434 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2435 2436 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2437 if (link_info->advertising != link_info->auto_link_speeds2) 2438 return true; 2439 return false; 2440 } 2441 if (link_info->advertising != link_info->auto_link_speeds || 2442 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2443 return true; 2444 return false; 2445 } 2446 2447 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2448 ((data2) & \ 2449 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2450 2451 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2452 (((data2) & \ 2453 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2454 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2455 2456 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2457 ((data1) & \ 2458 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2459 2460 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2461 (((data1) & \ 2462 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2463 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2464 2465 /* Return true if the workqueue has to be scheduled */ 2466 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2467 { 2468 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2469 2470 switch (err_type) { 2471 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2472 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2473 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2474 break; 2475 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2476 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2477 break; 2478 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2479 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2480 break; 2481 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2482 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2483 char *threshold_type; 2484 bool notify = false; 2485 char *dir_str; 2486 2487 switch (type) { 2488 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2489 threshold_type = "warning"; 2490 break; 2491 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2492 threshold_type = "critical"; 2493 break; 2494 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2495 threshold_type = "fatal"; 2496 break; 2497 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2498 threshold_type = "shutdown"; 2499 break; 2500 default: 2501 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2502 return false; 2503 } 2504 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2505 dir_str = "above"; 2506 notify = true; 2507 } else { 2508 dir_str = "below"; 2509 } 2510 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2511 dir_str, threshold_type); 2512 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2513 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2514 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2515 if (notify) { 2516 bp->thermal_threshold_type = type; 2517 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2518 return true; 2519 } 2520 return false; 2521 } 2522 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: 2523 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); 2524 break; 2525 default: 2526 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2527 err_type); 2528 break; 2529 } 2530 return false; 2531 } 2532 2533 #define BNXT_GET_EVENT_PORT(data) \ 2534 ((data) & \ 2535 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2536 2537 #define BNXT_EVENT_RING_TYPE(data2) \ 2538 ((data2) & \ 2539 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2540 2541 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2542 (BNXT_EVENT_RING_TYPE(data2) == \ 2543 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2544 2545 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2546 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2547 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2548 2549 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2550 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2551 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2552 2553 #define BNXT_PHC_BITS 48 2554 2555 static int bnxt_async_event_process(struct bnxt *bp, 2556 struct hwrm_async_event_cmpl *cmpl) 2557 { 2558 u16 event_id = le16_to_cpu(cmpl->event_id); 2559 u32 data1 = le32_to_cpu(cmpl->event_data1); 2560 u32 data2 = le32_to_cpu(cmpl->event_data2); 2561 2562 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2563 event_id, data1, data2); 2564 2565 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2566 switch (event_id) { 2567 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2568 struct bnxt_link_info *link_info = &bp->link_info; 2569 2570 if (BNXT_VF(bp)) 2571 goto async_event_process_exit; 2572 2573 /* print unsupported speed warning in forced speed mode only */ 2574 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2575 (data1 & 0x20000)) { 2576 u16 fw_speed = bnxt_get_force_speed(link_info); 2577 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2578 2579 if (speed != SPEED_UNKNOWN) 2580 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2581 speed); 2582 } 2583 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2584 } 2585 fallthrough; 2586 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2587 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2588 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2589 fallthrough; 2590 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2591 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2592 break; 2593 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2594 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2595 break; 2596 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2597 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2598 2599 if (BNXT_VF(bp)) 2600 break; 2601 2602 if (bp->pf.port_id != port_id) 2603 break; 2604 2605 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2606 break; 2607 } 2608 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2609 if (BNXT_PF(bp)) 2610 goto async_event_process_exit; 2611 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2612 break; 2613 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2614 char *type_str = "Solicited"; 2615 2616 if (!bp->fw_health) 2617 goto async_event_process_exit; 2618 2619 bp->fw_reset_timestamp = jiffies; 2620 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2621 if (!bp->fw_reset_min_dsecs) 2622 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2623 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2624 if (!bp->fw_reset_max_dsecs) 2625 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2626 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2627 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2628 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2629 type_str = "Fatal"; 2630 bp->fw_health->fatalities++; 2631 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2632 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2633 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2634 type_str = "Non-fatal"; 2635 bp->fw_health->survivals++; 2636 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2637 } 2638 netif_warn(bp, hw, bp->dev, 2639 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2640 type_str, data1, data2, 2641 bp->fw_reset_min_dsecs * 100, 2642 bp->fw_reset_max_dsecs * 100); 2643 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2644 break; 2645 } 2646 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2647 struct bnxt_fw_health *fw_health = bp->fw_health; 2648 char *status_desc = "healthy"; 2649 u32 status; 2650 2651 if (!fw_health) 2652 goto async_event_process_exit; 2653 2654 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2655 fw_health->enabled = false; 2656 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2657 break; 2658 } 2659 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2660 fw_health->tmr_multiplier = 2661 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2662 bp->current_interval * 10); 2663 fw_health->tmr_counter = fw_health->tmr_multiplier; 2664 if (!fw_health->enabled) 2665 fw_health->last_fw_heartbeat = 2666 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2667 fw_health->last_fw_reset_cnt = 2668 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2669 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2670 if (status != BNXT_FW_STATUS_HEALTHY) 2671 status_desc = "unhealthy"; 2672 netif_info(bp, drv, bp->dev, 2673 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2674 fw_health->primary ? "primary" : "backup", status, 2675 status_desc, fw_health->last_fw_reset_cnt); 2676 if (!fw_health->enabled) { 2677 /* Make sure tmr_counter is set and visible to 2678 * bnxt_health_check() before setting enabled to true. 2679 */ 2680 smp_wmb(); 2681 fw_health->enabled = true; 2682 } 2683 goto async_event_process_exit; 2684 } 2685 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2686 netif_notice(bp, hw, bp->dev, 2687 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2688 data1, data2); 2689 goto async_event_process_exit; 2690 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2691 struct bnxt_rx_ring_info *rxr; 2692 u16 grp_idx; 2693 2694 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2695 goto async_event_process_exit; 2696 2697 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2698 BNXT_EVENT_RING_TYPE(data2), data1); 2699 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2700 goto async_event_process_exit; 2701 2702 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2703 if (grp_idx == INVALID_HW_RING_ID) { 2704 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2705 data1); 2706 goto async_event_process_exit; 2707 } 2708 rxr = bp->bnapi[grp_idx]->rx_ring; 2709 bnxt_sched_reset_rxr(bp, rxr); 2710 goto async_event_process_exit; 2711 } 2712 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2713 struct bnxt_fw_health *fw_health = bp->fw_health; 2714 2715 netif_notice(bp, hw, bp->dev, 2716 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2717 data1, data2); 2718 if (fw_health) { 2719 fw_health->echo_req_data1 = data1; 2720 fw_health->echo_req_data2 = data2; 2721 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2722 break; 2723 } 2724 goto async_event_process_exit; 2725 } 2726 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2727 bnxt_ptp_pps_event(bp, data1, data2); 2728 goto async_event_process_exit; 2729 } 2730 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2731 if (bnxt_event_error_report(bp, data1, data2)) 2732 break; 2733 goto async_event_process_exit; 2734 } 2735 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2736 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2737 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2738 if (BNXT_PTP_USE_RTC(bp)) { 2739 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2740 u64 ns; 2741 2742 if (!ptp) 2743 goto async_event_process_exit; 2744 2745 spin_lock_bh(&ptp->ptp_lock); 2746 bnxt_ptp_update_current_time(bp); 2747 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2748 BNXT_PHC_BITS) | ptp->current_time); 2749 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2750 spin_unlock_bh(&ptp->ptp_lock); 2751 } 2752 break; 2753 } 2754 goto async_event_process_exit; 2755 } 2756 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2757 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2758 2759 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2760 goto async_event_process_exit; 2761 } 2762 default: 2763 goto async_event_process_exit; 2764 } 2765 __bnxt_queue_sp_work(bp); 2766 async_event_process_exit: 2767 return 0; 2768 } 2769 2770 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2771 { 2772 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2773 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2774 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2775 (struct hwrm_fwd_req_cmpl *)txcmp; 2776 2777 switch (cmpl_type) { 2778 case CMPL_BASE_TYPE_HWRM_DONE: 2779 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2780 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2781 break; 2782 2783 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2784 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2785 2786 if ((vf_id < bp->pf.first_vf_id) || 2787 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2788 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2789 vf_id); 2790 return -EINVAL; 2791 } 2792 2793 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2794 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2795 break; 2796 2797 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2798 bnxt_async_event_process(bp, 2799 (struct hwrm_async_event_cmpl *)txcmp); 2800 break; 2801 2802 default: 2803 break; 2804 } 2805 2806 return 0; 2807 } 2808 2809 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2810 { 2811 struct bnxt_napi *bnapi = dev_instance; 2812 struct bnxt *bp = bnapi->bp; 2813 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2814 u32 cons = RING_CMP(cpr->cp_raw_cons); 2815 2816 cpr->event_ctr++; 2817 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2818 napi_schedule(&bnapi->napi); 2819 return IRQ_HANDLED; 2820 } 2821 2822 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2823 { 2824 u32 raw_cons = cpr->cp_raw_cons; 2825 u16 cons = RING_CMP(raw_cons); 2826 struct tx_cmp *txcmp; 2827 2828 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2829 2830 return TX_CMP_VALID(txcmp, raw_cons); 2831 } 2832 2833 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2834 { 2835 struct bnxt_napi *bnapi = dev_instance; 2836 struct bnxt *bp = bnapi->bp; 2837 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2838 u32 cons = RING_CMP(cpr->cp_raw_cons); 2839 u32 int_status; 2840 2841 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2842 2843 if (!bnxt_has_work(bp, cpr)) { 2844 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2845 /* return if erroneous interrupt */ 2846 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2847 return IRQ_NONE; 2848 } 2849 2850 /* disable ring IRQ */ 2851 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2852 2853 /* Return here if interrupt is shared and is disabled. */ 2854 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2855 return IRQ_HANDLED; 2856 2857 napi_schedule(&bnapi->napi); 2858 return IRQ_HANDLED; 2859 } 2860 2861 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2862 int budget) 2863 { 2864 struct bnxt_napi *bnapi = cpr->bnapi; 2865 u32 raw_cons = cpr->cp_raw_cons; 2866 u32 cons; 2867 int rx_pkts = 0; 2868 u8 event = 0; 2869 struct tx_cmp *txcmp; 2870 2871 cpr->has_more_work = 0; 2872 cpr->had_work_done = 1; 2873 while (1) { 2874 u8 cmp_type; 2875 int rc; 2876 2877 cons = RING_CMP(raw_cons); 2878 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2879 2880 if (!TX_CMP_VALID(txcmp, raw_cons)) 2881 break; 2882 2883 /* The valid test of the entry must be done first before 2884 * reading any further. 2885 */ 2886 dma_rmb(); 2887 cmp_type = TX_CMP_TYPE(txcmp); 2888 if (cmp_type == CMP_TYPE_TX_L2_CMP || 2889 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { 2890 u32 opaque = txcmp->tx_cmp_opaque; 2891 struct bnxt_tx_ring_info *txr; 2892 u16 tx_freed; 2893 2894 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 2895 event |= BNXT_TX_CMP_EVENT; 2896 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) 2897 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); 2898 else 2899 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 2900 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 2901 bp->tx_ring_mask; 2902 /* return full budget so NAPI will complete. */ 2903 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 2904 rx_pkts = budget; 2905 raw_cons = NEXT_RAW_CMP(raw_cons); 2906 if (budget) 2907 cpr->has_more_work = 1; 2908 break; 2909 } 2910 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 2911 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2912 if (likely(budget)) 2913 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2914 else 2915 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2916 &event); 2917 if (likely(rc >= 0)) 2918 rx_pkts += rc; 2919 /* Increment rx_pkts when rc is -ENOMEM to count towards 2920 * the NAPI budget. Otherwise, we may potentially loop 2921 * here forever if we consistently cannot allocate 2922 * buffers. 2923 */ 2924 else if (rc == -ENOMEM && budget) 2925 rx_pkts++; 2926 else if (rc == -EBUSY) /* partial completion */ 2927 break; 2928 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 2929 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 2930 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { 2931 bnxt_hwrm_handler(bp, txcmp); 2932 } 2933 raw_cons = NEXT_RAW_CMP(raw_cons); 2934 2935 if (rx_pkts && rx_pkts == budget) { 2936 cpr->has_more_work = 1; 2937 break; 2938 } 2939 } 2940 2941 if (event & BNXT_REDIRECT_EVENT) 2942 xdp_do_flush(); 2943 2944 if (event & BNXT_TX_EVENT) { 2945 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 2946 u16 prod = txr->tx_prod; 2947 2948 /* Sync BD data before updating doorbell */ 2949 wmb(); 2950 2951 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2952 } 2953 2954 cpr->cp_raw_cons = raw_cons; 2955 bnapi->events |= event; 2956 return rx_pkts; 2957 } 2958 2959 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2960 int budget) 2961 { 2962 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 2963 bnapi->tx_int(bp, bnapi, budget); 2964 2965 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 2966 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2967 2968 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2969 } 2970 if (bnapi->events & BNXT_AGG_EVENT) { 2971 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2972 2973 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2974 } 2975 bnapi->events &= BNXT_TX_CMP_EVENT; 2976 } 2977 2978 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2979 int budget) 2980 { 2981 struct bnxt_napi *bnapi = cpr->bnapi; 2982 int rx_pkts; 2983 2984 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2985 2986 /* ACK completion ring before freeing tx ring and producing new 2987 * buffers in rx/agg rings to prevent overflowing the completion 2988 * ring. 2989 */ 2990 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2991 2992 __bnxt_poll_work_done(bp, bnapi, budget); 2993 return rx_pkts; 2994 } 2995 2996 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2997 { 2998 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2999 struct bnxt *bp = bnapi->bp; 3000 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3001 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3002 struct tx_cmp *txcmp; 3003 struct rx_cmp_ext *rxcmp1; 3004 u32 cp_cons, tmp_raw_cons; 3005 u32 raw_cons = cpr->cp_raw_cons; 3006 bool flush_xdp = false; 3007 u32 rx_pkts = 0; 3008 u8 event = 0; 3009 3010 while (1) { 3011 int rc; 3012 3013 cp_cons = RING_CMP(raw_cons); 3014 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3015 3016 if (!TX_CMP_VALID(txcmp, raw_cons)) 3017 break; 3018 3019 /* The valid test of the entry must be done first before 3020 * reading any further. 3021 */ 3022 dma_rmb(); 3023 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 3024 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 3025 cp_cons = RING_CMP(tmp_raw_cons); 3026 rxcmp1 = (struct rx_cmp_ext *) 3027 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3028 3029 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 3030 break; 3031 3032 /* force an error to recycle the buffer */ 3033 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 3034 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 3035 3036 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3037 if (likely(rc == -EIO) && budget) 3038 rx_pkts++; 3039 else if (rc == -EBUSY) /* partial completion */ 3040 break; 3041 if (event & BNXT_REDIRECT_EVENT) 3042 flush_xdp = true; 3043 } else if (unlikely(TX_CMP_TYPE(txcmp) == 3044 CMPL_BASE_TYPE_HWRM_DONE)) { 3045 bnxt_hwrm_handler(bp, txcmp); 3046 } else { 3047 netdev_err(bp->dev, 3048 "Invalid completion received on special ring\n"); 3049 } 3050 raw_cons = NEXT_RAW_CMP(raw_cons); 3051 3052 if (rx_pkts == budget) 3053 break; 3054 } 3055 3056 cpr->cp_raw_cons = raw_cons; 3057 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 3058 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3059 3060 if (event & BNXT_AGG_EVENT) 3061 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3062 if (flush_xdp) 3063 xdp_do_flush(); 3064 3065 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 3066 napi_complete_done(napi, rx_pkts); 3067 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3068 } 3069 return rx_pkts; 3070 } 3071 3072 static int bnxt_poll(struct napi_struct *napi, int budget) 3073 { 3074 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3075 struct bnxt *bp = bnapi->bp; 3076 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3077 int work_done = 0; 3078 3079 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3080 napi_complete(napi); 3081 return 0; 3082 } 3083 while (1) { 3084 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 3085 3086 if (work_done >= budget) { 3087 if (!budget) 3088 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3089 break; 3090 } 3091 3092 if (!bnxt_has_work(bp, cpr)) { 3093 if (napi_complete_done(napi, work_done)) 3094 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3095 break; 3096 } 3097 } 3098 if (bp->flags & BNXT_FLAG_DIM) { 3099 struct dim_sample dim_sample = {}; 3100 3101 dim_update_sample(cpr->event_ctr, 3102 cpr->rx_packets, 3103 cpr->rx_bytes, 3104 &dim_sample); 3105 net_dim(&cpr->dim, dim_sample); 3106 } 3107 return work_done; 3108 } 3109 3110 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 3111 { 3112 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3113 int i, work_done = 0; 3114 3115 for (i = 0; i < cpr->cp_ring_count; i++) { 3116 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3117 3118 if (cpr2->had_nqe_notify) { 3119 work_done += __bnxt_poll_work(bp, cpr2, 3120 budget - work_done); 3121 cpr->has_more_work |= cpr2->has_more_work; 3122 } 3123 } 3124 return work_done; 3125 } 3126 3127 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3128 u64 dbr_type, int budget) 3129 { 3130 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3131 int i; 3132 3133 for (i = 0; i < cpr->cp_ring_count; i++) { 3134 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3135 struct bnxt_db_info *db; 3136 3137 if (cpr2->had_work_done) { 3138 u32 tgl = 0; 3139 3140 if (dbr_type == DBR_TYPE_CQ_ARMALL) { 3141 cpr2->had_nqe_notify = 0; 3142 tgl = cpr2->toggle; 3143 } 3144 db = &cpr2->cp_db; 3145 bnxt_writeq(bp, 3146 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 3147 DB_RING_IDX(db, cpr2->cp_raw_cons), 3148 db->doorbell); 3149 cpr2->had_work_done = 0; 3150 } 3151 } 3152 __bnxt_poll_work_done(bp, bnapi, budget); 3153 } 3154 3155 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 3156 { 3157 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3158 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3159 struct bnxt_cp_ring_info *cpr_rx; 3160 u32 raw_cons = cpr->cp_raw_cons; 3161 struct bnxt *bp = bnapi->bp; 3162 struct nqe_cn *nqcmp; 3163 int work_done = 0; 3164 u32 cons; 3165 3166 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3167 napi_complete(napi); 3168 return 0; 3169 } 3170 if (cpr->has_more_work) { 3171 cpr->has_more_work = 0; 3172 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 3173 } 3174 while (1) { 3175 u16 type; 3176 3177 cons = RING_CMP(raw_cons); 3178 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3179 3180 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 3181 if (cpr->has_more_work) 3182 break; 3183 3184 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 3185 budget); 3186 cpr->cp_raw_cons = raw_cons; 3187 if (napi_complete_done(napi, work_done)) 3188 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 3189 cpr->cp_raw_cons); 3190 goto poll_done; 3191 } 3192 3193 /* The valid test of the entry must be done first before 3194 * reading any further. 3195 */ 3196 dma_rmb(); 3197 3198 type = le16_to_cpu(nqcmp->type); 3199 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 3200 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 3201 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 3202 struct bnxt_cp_ring_info *cpr2; 3203 3204 /* No more budget for RX work */ 3205 if (budget && work_done >= budget && 3206 cq_type == BNXT_NQ_HDL_TYPE_RX) 3207 break; 3208 3209 idx = BNXT_NQ_HDL_IDX(idx); 3210 cpr2 = &cpr->cp_ring_arr[idx]; 3211 cpr2->had_nqe_notify = 1; 3212 cpr2->toggle = NQE_CN_TOGGLE(type); 3213 work_done += __bnxt_poll_work(bp, cpr2, 3214 budget - work_done); 3215 cpr->has_more_work |= cpr2->has_more_work; 3216 } else { 3217 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 3218 } 3219 raw_cons = NEXT_RAW_CMP(raw_cons); 3220 } 3221 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 3222 if (raw_cons != cpr->cp_raw_cons) { 3223 cpr->cp_raw_cons = raw_cons; 3224 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 3225 } 3226 poll_done: 3227 cpr_rx = &cpr->cp_ring_arr[0]; 3228 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 3229 (bp->flags & BNXT_FLAG_DIM)) { 3230 struct dim_sample dim_sample = {}; 3231 3232 dim_update_sample(cpr->event_ctr, 3233 cpr_rx->rx_packets, 3234 cpr_rx->rx_bytes, 3235 &dim_sample); 3236 net_dim(&cpr->dim, dim_sample); 3237 } 3238 return work_done; 3239 } 3240 3241 static void bnxt_free_tx_skbs(struct bnxt *bp) 3242 { 3243 int i, max_idx; 3244 struct pci_dev *pdev = bp->pdev; 3245 3246 if (!bp->tx_ring) 3247 return; 3248 3249 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 3250 for (i = 0; i < bp->tx_nr_rings; i++) { 3251 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3252 int j; 3253 3254 if (!txr->tx_buf_ring) 3255 continue; 3256 3257 for (j = 0; j < max_idx;) { 3258 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 3259 struct sk_buff *skb; 3260 int k, last; 3261 3262 if (i < bp->tx_nr_rings_xdp && 3263 tx_buf->action == XDP_REDIRECT) { 3264 dma_unmap_single(&pdev->dev, 3265 dma_unmap_addr(tx_buf, mapping), 3266 dma_unmap_len(tx_buf, len), 3267 DMA_TO_DEVICE); 3268 xdp_return_frame(tx_buf->xdpf); 3269 tx_buf->action = 0; 3270 tx_buf->xdpf = NULL; 3271 j++; 3272 continue; 3273 } 3274 3275 skb = tx_buf->skb; 3276 if (!skb) { 3277 j++; 3278 continue; 3279 } 3280 3281 tx_buf->skb = NULL; 3282 3283 if (tx_buf->is_push) { 3284 dev_kfree_skb(skb); 3285 j += 2; 3286 continue; 3287 } 3288 3289 dma_unmap_single(&pdev->dev, 3290 dma_unmap_addr(tx_buf, mapping), 3291 skb_headlen(skb), 3292 DMA_TO_DEVICE); 3293 3294 last = tx_buf->nr_frags; 3295 j += 2; 3296 for (k = 0; k < last; k++, j++) { 3297 int ring_idx = j & bp->tx_ring_mask; 3298 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 3299 3300 tx_buf = &txr->tx_buf_ring[ring_idx]; 3301 dma_unmap_page( 3302 &pdev->dev, 3303 dma_unmap_addr(tx_buf, mapping), 3304 skb_frag_size(frag), DMA_TO_DEVICE); 3305 } 3306 dev_kfree_skb(skb); 3307 } 3308 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 3309 } 3310 } 3311 3312 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) 3313 { 3314 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3315 struct pci_dev *pdev = bp->pdev; 3316 struct bnxt_tpa_idx_map *map; 3317 int i, max_idx, max_agg_idx; 3318 3319 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3320 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3321 if (!rxr->rx_tpa) 3322 goto skip_rx_tpa_free; 3323 3324 for (i = 0; i < bp->max_tpa; i++) { 3325 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3326 u8 *data = tpa_info->data; 3327 3328 if (!data) 3329 continue; 3330 3331 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, 3332 bp->rx_buf_use_size, bp->rx_dir, 3333 DMA_ATTR_WEAK_ORDERING); 3334 3335 tpa_info->data = NULL; 3336 3337 skb_free_frag(data); 3338 } 3339 3340 skip_rx_tpa_free: 3341 if (!rxr->rx_buf_ring) 3342 goto skip_rx_buf_free; 3343 3344 for (i = 0; i < max_idx; i++) { 3345 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3346 dma_addr_t mapping = rx_buf->mapping; 3347 void *data = rx_buf->data; 3348 3349 if (!data) 3350 continue; 3351 3352 rx_buf->data = NULL; 3353 if (BNXT_RX_PAGE_MODE(bp)) { 3354 page_pool_recycle_direct(rxr->page_pool, data); 3355 } else { 3356 dma_unmap_single_attrs(&pdev->dev, mapping, 3357 bp->rx_buf_use_size, bp->rx_dir, 3358 DMA_ATTR_WEAK_ORDERING); 3359 skb_free_frag(data); 3360 } 3361 } 3362 3363 skip_rx_buf_free: 3364 if (!rxr->rx_agg_ring) 3365 goto skip_rx_agg_free; 3366 3367 for (i = 0; i < max_agg_idx; i++) { 3368 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3369 struct page *page = rx_agg_buf->page; 3370 3371 if (!page) 3372 continue; 3373 3374 rx_agg_buf->page = NULL; 3375 __clear_bit(i, rxr->rx_agg_bmap); 3376 3377 page_pool_recycle_direct(rxr->page_pool, page); 3378 } 3379 3380 skip_rx_agg_free: 3381 map = rxr->rx_tpa_idx_map; 3382 if (map) 3383 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3384 } 3385 3386 static void bnxt_free_rx_skbs(struct bnxt *bp) 3387 { 3388 int i; 3389 3390 if (!bp->rx_ring) 3391 return; 3392 3393 for (i = 0; i < bp->rx_nr_rings; i++) 3394 bnxt_free_one_rx_ring_skbs(bp, i); 3395 } 3396 3397 static void bnxt_free_skbs(struct bnxt *bp) 3398 { 3399 bnxt_free_tx_skbs(bp); 3400 bnxt_free_rx_skbs(bp); 3401 } 3402 3403 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3404 { 3405 u8 init_val = ctxm->init_value; 3406 u16 offset = ctxm->init_offset; 3407 u8 *p2 = p; 3408 int i; 3409 3410 if (!init_val) 3411 return; 3412 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3413 memset(p, init_val, len); 3414 return; 3415 } 3416 for (i = 0; i < len; i += ctxm->entry_size) 3417 *(p2 + i + offset) = init_val; 3418 } 3419 3420 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3421 { 3422 struct pci_dev *pdev = bp->pdev; 3423 int i; 3424 3425 if (!rmem->pg_arr) 3426 goto skip_pages; 3427 3428 for (i = 0; i < rmem->nr_pages; i++) { 3429 if (!rmem->pg_arr[i]) 3430 continue; 3431 3432 dma_free_coherent(&pdev->dev, rmem->page_size, 3433 rmem->pg_arr[i], rmem->dma_arr[i]); 3434 3435 rmem->pg_arr[i] = NULL; 3436 } 3437 skip_pages: 3438 if (rmem->pg_tbl) { 3439 size_t pg_tbl_size = rmem->nr_pages * 8; 3440 3441 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3442 pg_tbl_size = rmem->page_size; 3443 dma_free_coherent(&pdev->dev, pg_tbl_size, 3444 rmem->pg_tbl, rmem->pg_tbl_map); 3445 rmem->pg_tbl = NULL; 3446 } 3447 if (rmem->vmem_size && *rmem->vmem) { 3448 vfree(*rmem->vmem); 3449 *rmem->vmem = NULL; 3450 } 3451 } 3452 3453 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3454 { 3455 struct pci_dev *pdev = bp->pdev; 3456 u64 valid_bit = 0; 3457 int i; 3458 3459 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3460 valid_bit = PTU_PTE_VALID; 3461 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3462 size_t pg_tbl_size = rmem->nr_pages * 8; 3463 3464 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3465 pg_tbl_size = rmem->page_size; 3466 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3467 &rmem->pg_tbl_map, 3468 GFP_KERNEL); 3469 if (!rmem->pg_tbl) 3470 return -ENOMEM; 3471 } 3472 3473 for (i = 0; i < rmem->nr_pages; i++) { 3474 u64 extra_bits = valid_bit; 3475 3476 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3477 rmem->page_size, 3478 &rmem->dma_arr[i], 3479 GFP_KERNEL); 3480 if (!rmem->pg_arr[i]) 3481 return -ENOMEM; 3482 3483 if (rmem->ctx_mem) 3484 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3485 rmem->page_size); 3486 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3487 if (i == rmem->nr_pages - 2 && 3488 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3489 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3490 else if (i == rmem->nr_pages - 1 && 3491 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3492 extra_bits |= PTU_PTE_LAST; 3493 rmem->pg_tbl[i] = 3494 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3495 } 3496 } 3497 3498 if (rmem->vmem_size) { 3499 *rmem->vmem = vzalloc(rmem->vmem_size); 3500 if (!(*rmem->vmem)) 3501 return -ENOMEM; 3502 } 3503 return 0; 3504 } 3505 3506 static void bnxt_free_tpa_info(struct bnxt *bp) 3507 { 3508 int i, j; 3509 3510 for (i = 0; i < bp->rx_nr_rings; i++) { 3511 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3512 3513 kfree(rxr->rx_tpa_idx_map); 3514 rxr->rx_tpa_idx_map = NULL; 3515 if (rxr->rx_tpa) { 3516 for (j = 0; j < bp->max_tpa; j++) { 3517 kfree(rxr->rx_tpa[j].agg_arr); 3518 rxr->rx_tpa[j].agg_arr = NULL; 3519 } 3520 } 3521 kfree(rxr->rx_tpa); 3522 rxr->rx_tpa = NULL; 3523 } 3524 } 3525 3526 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3527 { 3528 int i, j; 3529 3530 bp->max_tpa = MAX_TPA; 3531 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3532 if (!bp->max_tpa_v2) 3533 return 0; 3534 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3535 } 3536 3537 for (i = 0; i < bp->rx_nr_rings; i++) { 3538 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3539 struct rx_agg_cmp *agg; 3540 3541 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3542 GFP_KERNEL); 3543 if (!rxr->rx_tpa) 3544 return -ENOMEM; 3545 3546 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3547 continue; 3548 for (j = 0; j < bp->max_tpa; j++) { 3549 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3550 if (!agg) 3551 return -ENOMEM; 3552 rxr->rx_tpa[j].agg_arr = agg; 3553 } 3554 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3555 GFP_KERNEL); 3556 if (!rxr->rx_tpa_idx_map) 3557 return -ENOMEM; 3558 } 3559 return 0; 3560 } 3561 3562 static void bnxt_free_rx_rings(struct bnxt *bp) 3563 { 3564 int i; 3565 3566 if (!bp->rx_ring) 3567 return; 3568 3569 bnxt_free_tpa_info(bp); 3570 for (i = 0; i < bp->rx_nr_rings; i++) { 3571 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3572 struct bnxt_ring_struct *ring; 3573 3574 if (rxr->xdp_prog) 3575 bpf_prog_put(rxr->xdp_prog); 3576 3577 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3578 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3579 3580 page_pool_destroy(rxr->page_pool); 3581 rxr->page_pool = NULL; 3582 3583 kfree(rxr->rx_agg_bmap); 3584 rxr->rx_agg_bmap = NULL; 3585 3586 ring = &rxr->rx_ring_struct; 3587 bnxt_free_ring(bp, &ring->ring_mem); 3588 3589 ring = &rxr->rx_agg_ring_struct; 3590 bnxt_free_ring(bp, &ring->ring_mem); 3591 } 3592 } 3593 3594 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3595 struct bnxt_rx_ring_info *rxr, 3596 int numa_node) 3597 { 3598 struct page_pool_params pp = { 0 }; 3599 3600 pp.pool_size = bp->rx_agg_ring_size; 3601 if (BNXT_RX_PAGE_MODE(bp)) 3602 pp.pool_size += bp->rx_ring_size; 3603 pp.nid = numa_node; 3604 pp.napi = &rxr->bnapi->napi; 3605 pp.netdev = bp->dev; 3606 pp.dev = &bp->pdev->dev; 3607 pp.dma_dir = bp->rx_dir; 3608 pp.max_len = PAGE_SIZE; 3609 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3610 3611 rxr->page_pool = page_pool_create(&pp); 3612 if (IS_ERR(rxr->page_pool)) { 3613 int err = PTR_ERR(rxr->page_pool); 3614 3615 rxr->page_pool = NULL; 3616 return err; 3617 } 3618 return 0; 3619 } 3620 3621 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3622 { 3623 int numa_node = dev_to_node(&bp->pdev->dev); 3624 int i, rc = 0, agg_rings = 0, cpu; 3625 3626 if (!bp->rx_ring) 3627 return -ENOMEM; 3628 3629 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3630 agg_rings = 1; 3631 3632 for (i = 0; i < bp->rx_nr_rings; i++) { 3633 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3634 struct bnxt_ring_struct *ring; 3635 int cpu_node; 3636 3637 ring = &rxr->rx_ring_struct; 3638 3639 cpu = cpumask_local_spread(i, numa_node); 3640 cpu_node = cpu_to_node(cpu); 3641 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", 3642 i, cpu_node); 3643 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); 3644 if (rc) 3645 return rc; 3646 3647 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3648 if (rc < 0) 3649 return rc; 3650 3651 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3652 MEM_TYPE_PAGE_POOL, 3653 rxr->page_pool); 3654 if (rc) { 3655 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3656 return rc; 3657 } 3658 3659 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3660 if (rc) 3661 return rc; 3662 3663 ring->grp_idx = i; 3664 if (agg_rings) { 3665 u16 mem_size; 3666 3667 ring = &rxr->rx_agg_ring_struct; 3668 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3669 if (rc) 3670 return rc; 3671 3672 ring->grp_idx = i; 3673 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3674 mem_size = rxr->rx_agg_bmap_size / 8; 3675 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3676 if (!rxr->rx_agg_bmap) 3677 return -ENOMEM; 3678 } 3679 } 3680 if (bp->flags & BNXT_FLAG_TPA) 3681 rc = bnxt_alloc_tpa_info(bp); 3682 return rc; 3683 } 3684 3685 static void bnxt_free_tx_rings(struct bnxt *bp) 3686 { 3687 int i; 3688 struct pci_dev *pdev = bp->pdev; 3689 3690 if (!bp->tx_ring) 3691 return; 3692 3693 for (i = 0; i < bp->tx_nr_rings; i++) { 3694 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3695 struct bnxt_ring_struct *ring; 3696 3697 if (txr->tx_push) { 3698 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3699 txr->tx_push, txr->tx_push_mapping); 3700 txr->tx_push = NULL; 3701 } 3702 3703 ring = &txr->tx_ring_struct; 3704 3705 bnxt_free_ring(bp, &ring->ring_mem); 3706 } 3707 } 3708 3709 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3710 ((tc) * (bp)->tx_nr_rings_per_tc) 3711 3712 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3713 ((tx) % (bp)->tx_nr_rings_per_tc) 3714 3715 #define BNXT_RING_TO_TC(bp, tx) \ 3716 ((tx) / (bp)->tx_nr_rings_per_tc) 3717 3718 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3719 { 3720 int i, j, rc; 3721 struct pci_dev *pdev = bp->pdev; 3722 3723 bp->tx_push_size = 0; 3724 if (bp->tx_push_thresh) { 3725 int push_size; 3726 3727 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3728 bp->tx_push_thresh); 3729 3730 if (push_size > 256) { 3731 push_size = 0; 3732 bp->tx_push_thresh = 0; 3733 } 3734 3735 bp->tx_push_size = push_size; 3736 } 3737 3738 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3739 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3740 struct bnxt_ring_struct *ring; 3741 u8 qidx; 3742 3743 ring = &txr->tx_ring_struct; 3744 3745 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3746 if (rc) 3747 return rc; 3748 3749 ring->grp_idx = txr->bnapi->index; 3750 if (bp->tx_push_size) { 3751 dma_addr_t mapping; 3752 3753 /* One pre-allocated DMA buffer to backup 3754 * TX push operation 3755 */ 3756 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3757 bp->tx_push_size, 3758 &txr->tx_push_mapping, 3759 GFP_KERNEL); 3760 3761 if (!txr->tx_push) 3762 return -ENOMEM; 3763 3764 mapping = txr->tx_push_mapping + 3765 sizeof(struct tx_push_bd); 3766 txr->data_mapping = cpu_to_le64(mapping); 3767 } 3768 qidx = bp->tc_to_qidx[j]; 3769 ring->queue_id = bp->q_info[qidx].queue_id; 3770 spin_lock_init(&txr->xdp_tx_lock); 3771 if (i < bp->tx_nr_rings_xdp) 3772 continue; 3773 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 3774 j++; 3775 } 3776 return 0; 3777 } 3778 3779 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3780 { 3781 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3782 3783 kfree(cpr->cp_desc_ring); 3784 cpr->cp_desc_ring = NULL; 3785 ring->ring_mem.pg_arr = NULL; 3786 kfree(cpr->cp_desc_mapping); 3787 cpr->cp_desc_mapping = NULL; 3788 ring->ring_mem.dma_arr = NULL; 3789 } 3790 3791 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 3792 { 3793 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 3794 if (!cpr->cp_desc_ring) 3795 return -ENOMEM; 3796 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 3797 GFP_KERNEL); 3798 if (!cpr->cp_desc_mapping) 3799 return -ENOMEM; 3800 return 0; 3801 } 3802 3803 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 3804 { 3805 int i; 3806 3807 if (!bp->bnapi) 3808 return; 3809 for (i = 0; i < bp->cp_nr_rings; i++) { 3810 struct bnxt_napi *bnapi = bp->bnapi[i]; 3811 3812 if (!bnapi) 3813 continue; 3814 bnxt_free_cp_arrays(&bnapi->cp_ring); 3815 } 3816 } 3817 3818 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 3819 { 3820 int i, n = bp->cp_nr_pages; 3821 3822 for (i = 0; i < bp->cp_nr_rings; i++) { 3823 struct bnxt_napi *bnapi = bp->bnapi[i]; 3824 int rc; 3825 3826 if (!bnapi) 3827 continue; 3828 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 3829 if (rc) 3830 return rc; 3831 } 3832 return 0; 3833 } 3834 3835 static void bnxt_free_cp_rings(struct bnxt *bp) 3836 { 3837 int i; 3838 3839 if (!bp->bnapi) 3840 return; 3841 3842 for (i = 0; i < bp->cp_nr_rings; i++) { 3843 struct bnxt_napi *bnapi = bp->bnapi[i]; 3844 struct bnxt_cp_ring_info *cpr; 3845 struct bnxt_ring_struct *ring; 3846 int j; 3847 3848 if (!bnapi) 3849 continue; 3850 3851 cpr = &bnapi->cp_ring; 3852 ring = &cpr->cp_ring_struct; 3853 3854 bnxt_free_ring(bp, &ring->ring_mem); 3855 3856 if (!cpr->cp_ring_arr) 3857 continue; 3858 3859 for (j = 0; j < cpr->cp_ring_count; j++) { 3860 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 3861 3862 ring = &cpr2->cp_ring_struct; 3863 bnxt_free_ring(bp, &ring->ring_mem); 3864 bnxt_free_cp_arrays(cpr2); 3865 } 3866 kfree(cpr->cp_ring_arr); 3867 cpr->cp_ring_arr = NULL; 3868 cpr->cp_ring_count = 0; 3869 } 3870 } 3871 3872 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 3873 struct bnxt_cp_ring_info *cpr) 3874 { 3875 struct bnxt_ring_mem_info *rmem; 3876 struct bnxt_ring_struct *ring; 3877 int rc; 3878 3879 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 3880 if (rc) { 3881 bnxt_free_cp_arrays(cpr); 3882 return -ENOMEM; 3883 } 3884 ring = &cpr->cp_ring_struct; 3885 rmem = &ring->ring_mem; 3886 rmem->nr_pages = bp->cp_nr_pages; 3887 rmem->page_size = HW_CMPD_RING_SIZE; 3888 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3889 rmem->dma_arr = cpr->cp_desc_mapping; 3890 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3891 rc = bnxt_alloc_ring(bp, rmem); 3892 if (rc) { 3893 bnxt_free_ring(bp, rmem); 3894 bnxt_free_cp_arrays(cpr); 3895 } 3896 return rc; 3897 } 3898 3899 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3900 { 3901 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3902 int i, j, rc, ulp_msix; 3903 int tcs = bp->num_tc; 3904 3905 if (!tcs) 3906 tcs = 1; 3907 ulp_msix = bnxt_get_ulp_msix_num(bp); 3908 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 3909 struct bnxt_napi *bnapi = bp->bnapi[i]; 3910 struct bnxt_cp_ring_info *cpr, *cpr2; 3911 struct bnxt_ring_struct *ring; 3912 int cp_count = 0, k; 3913 int rx = 0, tx = 0; 3914 3915 if (!bnapi) 3916 continue; 3917 3918 cpr = &bnapi->cp_ring; 3919 cpr->bnapi = bnapi; 3920 ring = &cpr->cp_ring_struct; 3921 3922 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3923 if (rc) 3924 return rc; 3925 3926 ring->map_idx = ulp_msix + i; 3927 3928 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3929 continue; 3930 3931 if (i < bp->rx_nr_rings) { 3932 cp_count++; 3933 rx = 1; 3934 } 3935 if (i < bp->tx_nr_rings_xdp) { 3936 cp_count++; 3937 tx = 1; 3938 } else if ((sh && i < bp->tx_nr_rings) || 3939 (!sh && i >= bp->rx_nr_rings)) { 3940 cp_count += tcs; 3941 tx = 1; 3942 } 3943 3944 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 3945 GFP_KERNEL); 3946 if (!cpr->cp_ring_arr) 3947 return -ENOMEM; 3948 cpr->cp_ring_count = cp_count; 3949 3950 for (k = 0; k < cp_count; k++) { 3951 cpr2 = &cpr->cp_ring_arr[k]; 3952 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 3953 if (rc) 3954 return rc; 3955 cpr2->bnapi = bnapi; 3956 cpr2->sw_stats = cpr->sw_stats; 3957 cpr2->cp_idx = k; 3958 if (!k && rx) { 3959 bp->rx_ring[i].rx_cpr = cpr2; 3960 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 3961 } else { 3962 int n, tc = k - rx; 3963 3964 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 3965 bp->tx_ring[n].tx_cpr = cpr2; 3966 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 3967 } 3968 } 3969 if (tx) 3970 j++; 3971 } 3972 return 0; 3973 } 3974 3975 static void bnxt_init_ring_struct(struct bnxt *bp) 3976 { 3977 int i, j; 3978 3979 for (i = 0; i < bp->cp_nr_rings; i++) { 3980 struct bnxt_napi *bnapi = bp->bnapi[i]; 3981 struct bnxt_ring_mem_info *rmem; 3982 struct bnxt_cp_ring_info *cpr; 3983 struct bnxt_rx_ring_info *rxr; 3984 struct bnxt_tx_ring_info *txr; 3985 struct bnxt_ring_struct *ring; 3986 3987 if (!bnapi) 3988 continue; 3989 3990 cpr = &bnapi->cp_ring; 3991 ring = &cpr->cp_ring_struct; 3992 rmem = &ring->ring_mem; 3993 rmem->nr_pages = bp->cp_nr_pages; 3994 rmem->page_size = HW_CMPD_RING_SIZE; 3995 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3996 rmem->dma_arr = cpr->cp_desc_mapping; 3997 rmem->vmem_size = 0; 3998 3999 rxr = bnapi->rx_ring; 4000 if (!rxr) 4001 goto skip_rx; 4002 4003 ring = &rxr->rx_ring_struct; 4004 rmem = &ring->ring_mem; 4005 rmem->nr_pages = bp->rx_nr_pages; 4006 rmem->page_size = HW_RXBD_RING_SIZE; 4007 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4008 rmem->dma_arr = rxr->rx_desc_mapping; 4009 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4010 rmem->vmem = (void **)&rxr->rx_buf_ring; 4011 4012 ring = &rxr->rx_agg_ring_struct; 4013 rmem = &ring->ring_mem; 4014 rmem->nr_pages = bp->rx_agg_nr_pages; 4015 rmem->page_size = HW_RXBD_RING_SIZE; 4016 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4017 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4018 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4019 rmem->vmem = (void **)&rxr->rx_agg_ring; 4020 4021 skip_rx: 4022 bnxt_for_each_napi_tx(j, bnapi, txr) { 4023 ring = &txr->tx_ring_struct; 4024 rmem = &ring->ring_mem; 4025 rmem->nr_pages = bp->tx_nr_pages; 4026 rmem->page_size = HW_TXBD_RING_SIZE; 4027 rmem->pg_arr = (void **)txr->tx_desc_ring; 4028 rmem->dma_arr = txr->tx_desc_mapping; 4029 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 4030 rmem->vmem = (void **)&txr->tx_buf_ring; 4031 } 4032 } 4033 } 4034 4035 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 4036 { 4037 int i; 4038 u32 prod; 4039 struct rx_bd **rx_buf_ring; 4040 4041 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 4042 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 4043 int j; 4044 struct rx_bd *rxbd; 4045 4046 rxbd = rx_buf_ring[i]; 4047 if (!rxbd) 4048 continue; 4049 4050 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 4051 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 4052 rxbd->rx_bd_opaque = prod; 4053 } 4054 } 4055 } 4056 4057 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 4058 { 4059 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 4060 struct net_device *dev = bp->dev; 4061 u32 prod; 4062 int i; 4063 4064 prod = rxr->rx_prod; 4065 for (i = 0; i < bp->rx_ring_size; i++) { 4066 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 4067 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 4068 ring_nr, i, bp->rx_ring_size); 4069 break; 4070 } 4071 prod = NEXT_RX(prod); 4072 } 4073 rxr->rx_prod = prod; 4074 4075 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 4076 return 0; 4077 4078 prod = rxr->rx_agg_prod; 4079 for (i = 0; i < bp->rx_agg_ring_size; i++) { 4080 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 4081 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 4082 ring_nr, i, bp->rx_ring_size); 4083 break; 4084 } 4085 prod = NEXT_RX_AGG(prod); 4086 } 4087 rxr->rx_agg_prod = prod; 4088 4089 if (rxr->rx_tpa) { 4090 dma_addr_t mapping; 4091 u8 *data; 4092 4093 for (i = 0; i < bp->max_tpa; i++) { 4094 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); 4095 if (!data) 4096 return -ENOMEM; 4097 4098 rxr->rx_tpa[i].data = data; 4099 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 4100 rxr->rx_tpa[i].mapping = mapping; 4101 } 4102 } 4103 return 0; 4104 } 4105 4106 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 4107 { 4108 struct bnxt_rx_ring_info *rxr; 4109 struct bnxt_ring_struct *ring; 4110 u32 type; 4111 4112 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 4113 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 4114 4115 if (NET_IP_ALIGN == 2) 4116 type |= RX_BD_FLAGS_SOP; 4117 4118 rxr = &bp->rx_ring[ring_nr]; 4119 ring = &rxr->rx_ring_struct; 4120 bnxt_init_rxbd_pages(ring, type); 4121 4122 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, 4123 &rxr->bnapi->napi); 4124 4125 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 4126 bpf_prog_add(bp->xdp_prog, 1); 4127 rxr->xdp_prog = bp->xdp_prog; 4128 } 4129 ring->fw_ring_id = INVALID_HW_RING_ID; 4130 4131 ring = &rxr->rx_agg_ring_struct; 4132 ring->fw_ring_id = INVALID_HW_RING_ID; 4133 4134 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 4135 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 4136 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 4137 4138 bnxt_init_rxbd_pages(ring, type); 4139 } 4140 4141 return bnxt_alloc_one_rx_ring(bp, ring_nr); 4142 } 4143 4144 static void bnxt_init_cp_rings(struct bnxt *bp) 4145 { 4146 int i, j; 4147 4148 for (i = 0; i < bp->cp_nr_rings; i++) { 4149 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 4150 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4151 4152 ring->fw_ring_id = INVALID_HW_RING_ID; 4153 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4154 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4155 if (!cpr->cp_ring_arr) 4156 continue; 4157 for (j = 0; j < cpr->cp_ring_count; j++) { 4158 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4159 4160 ring = &cpr2->cp_ring_struct; 4161 ring->fw_ring_id = INVALID_HW_RING_ID; 4162 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4163 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4164 } 4165 } 4166 } 4167 4168 static int bnxt_init_rx_rings(struct bnxt *bp) 4169 { 4170 int i, rc = 0; 4171 4172 if (BNXT_RX_PAGE_MODE(bp)) { 4173 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 4174 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 4175 } else { 4176 bp->rx_offset = BNXT_RX_OFFSET; 4177 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 4178 } 4179 4180 for (i = 0; i < bp->rx_nr_rings; i++) { 4181 rc = bnxt_init_one_rx_ring(bp, i); 4182 if (rc) 4183 break; 4184 } 4185 4186 return rc; 4187 } 4188 4189 static int bnxt_init_tx_rings(struct bnxt *bp) 4190 { 4191 u16 i; 4192 4193 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 4194 BNXT_MIN_TX_DESC_CNT); 4195 4196 for (i = 0; i < bp->tx_nr_rings; i++) { 4197 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4198 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4199 4200 ring->fw_ring_id = INVALID_HW_RING_ID; 4201 4202 if (i >= bp->tx_nr_rings_xdp) 4203 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, 4204 NETDEV_QUEUE_TYPE_TX, 4205 &txr->bnapi->napi); 4206 } 4207 4208 return 0; 4209 } 4210 4211 static void bnxt_free_ring_grps(struct bnxt *bp) 4212 { 4213 kfree(bp->grp_info); 4214 bp->grp_info = NULL; 4215 } 4216 4217 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 4218 { 4219 int i; 4220 4221 if (irq_re_init) { 4222 bp->grp_info = kcalloc(bp->cp_nr_rings, 4223 sizeof(struct bnxt_ring_grp_info), 4224 GFP_KERNEL); 4225 if (!bp->grp_info) 4226 return -ENOMEM; 4227 } 4228 for (i = 0; i < bp->cp_nr_rings; i++) { 4229 if (irq_re_init) 4230 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 4231 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4232 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 4233 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 4234 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4235 } 4236 return 0; 4237 } 4238 4239 static void bnxt_free_vnics(struct bnxt *bp) 4240 { 4241 kfree(bp->vnic_info); 4242 bp->vnic_info = NULL; 4243 bp->nr_vnics = 0; 4244 } 4245 4246 static int bnxt_alloc_vnics(struct bnxt *bp) 4247 { 4248 int num_vnics = 1; 4249 4250 #ifdef CONFIG_RFS_ACCEL 4251 if (bp->flags & BNXT_FLAG_RFS) { 4252 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 4253 num_vnics++; 4254 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4255 num_vnics += bp->rx_nr_rings; 4256 } 4257 #endif 4258 4259 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4260 num_vnics++; 4261 4262 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 4263 GFP_KERNEL); 4264 if (!bp->vnic_info) 4265 return -ENOMEM; 4266 4267 bp->nr_vnics = num_vnics; 4268 return 0; 4269 } 4270 4271 static void bnxt_init_vnics(struct bnxt *bp) 4272 { 4273 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 4274 int i; 4275 4276 for (i = 0; i < bp->nr_vnics; i++) { 4277 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4278 int j; 4279 4280 vnic->fw_vnic_id = INVALID_HW_RING_ID; 4281 vnic->vnic_id = i; 4282 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 4283 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 4284 4285 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4286 4287 if (bp->vnic_info[i].rss_hash_key) { 4288 if (i == BNXT_VNIC_DEFAULT) { 4289 u8 *key = (void *)vnic->rss_hash_key; 4290 int k; 4291 4292 if (!bp->rss_hash_key_valid && 4293 !bp->rss_hash_key_updated) { 4294 get_random_bytes(bp->rss_hash_key, 4295 HW_HASH_KEY_SIZE); 4296 bp->rss_hash_key_updated = true; 4297 } 4298 4299 memcpy(vnic->rss_hash_key, bp->rss_hash_key, 4300 HW_HASH_KEY_SIZE); 4301 4302 if (!bp->rss_hash_key_updated) 4303 continue; 4304 4305 bp->rss_hash_key_updated = false; 4306 bp->rss_hash_key_valid = true; 4307 4308 bp->toeplitz_prefix = 0; 4309 for (k = 0; k < 8; k++) { 4310 bp->toeplitz_prefix <<= 8; 4311 bp->toeplitz_prefix |= key[k]; 4312 } 4313 } else { 4314 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, 4315 HW_HASH_KEY_SIZE); 4316 } 4317 } 4318 } 4319 } 4320 4321 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4322 { 4323 int pages; 4324 4325 pages = ring_size / desc_per_pg; 4326 4327 if (!pages) 4328 return 1; 4329 4330 pages++; 4331 4332 while (pages & (pages - 1)) 4333 pages++; 4334 4335 return pages; 4336 } 4337 4338 void bnxt_set_tpa_flags(struct bnxt *bp) 4339 { 4340 bp->flags &= ~BNXT_FLAG_TPA; 4341 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4342 return; 4343 if (bp->dev->features & NETIF_F_LRO) 4344 bp->flags |= BNXT_FLAG_LRO; 4345 else if (bp->dev->features & NETIF_F_GRO_HW) 4346 bp->flags |= BNXT_FLAG_GRO; 4347 } 4348 4349 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4350 * be set on entry. 4351 */ 4352 void bnxt_set_ring_params(struct bnxt *bp) 4353 { 4354 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4355 u32 agg_factor = 0, agg_ring_size = 0; 4356 4357 /* 8 for CRC and VLAN */ 4358 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4359 4360 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4361 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4362 4363 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 4364 ring_size = bp->rx_ring_size; 4365 bp->rx_agg_ring_size = 0; 4366 bp->rx_agg_nr_pages = 0; 4367 4368 if (bp->flags & BNXT_FLAG_TPA) 4369 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4370 4371 bp->flags &= ~BNXT_FLAG_JUMBO; 4372 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4373 u32 jumbo_factor; 4374 4375 bp->flags |= BNXT_FLAG_JUMBO; 4376 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4377 if (jumbo_factor > agg_factor) 4378 agg_factor = jumbo_factor; 4379 } 4380 if (agg_factor) { 4381 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4382 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4383 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4384 bp->rx_ring_size, ring_size); 4385 bp->rx_ring_size = ring_size; 4386 } 4387 agg_ring_size = ring_size * agg_factor; 4388 4389 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4390 RX_DESC_CNT); 4391 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4392 u32 tmp = agg_ring_size; 4393 4394 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4395 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4396 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4397 tmp, agg_ring_size); 4398 } 4399 bp->rx_agg_ring_size = agg_ring_size; 4400 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4401 4402 if (BNXT_RX_PAGE_MODE(bp)) { 4403 rx_space = PAGE_SIZE; 4404 rx_size = PAGE_SIZE - 4405 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4406 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4407 } else { 4408 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 4409 rx_space = rx_size + NET_SKB_PAD + 4410 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4411 } 4412 } 4413 4414 bp->rx_buf_use_size = rx_size; 4415 bp->rx_buf_size = rx_space; 4416 4417 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4418 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4419 4420 ring_size = bp->tx_ring_size; 4421 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4422 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4423 4424 max_rx_cmpl = bp->rx_ring_size; 4425 /* MAX TPA needs to be added because TPA_START completions are 4426 * immediately recycled, so the TPA completions are not bound by 4427 * the RX ring size. 4428 */ 4429 if (bp->flags & BNXT_FLAG_TPA) 4430 max_rx_cmpl += bp->max_tpa; 4431 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4432 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4433 bp->cp_ring_size = ring_size; 4434 4435 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4436 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4437 bp->cp_nr_pages = MAX_CP_PAGES; 4438 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4439 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4440 ring_size, bp->cp_ring_size); 4441 } 4442 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4443 bp->cp_ring_mask = bp->cp_bit - 1; 4444 } 4445 4446 /* Changing allocation mode of RX rings. 4447 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4448 */ 4449 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4450 { 4451 struct net_device *dev = bp->dev; 4452 4453 if (page_mode) { 4454 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 4455 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4456 4457 if (bp->xdp_prog->aux->xdp_has_frags) 4458 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4459 else 4460 dev->max_mtu = 4461 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4462 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4463 bp->flags |= BNXT_FLAG_JUMBO; 4464 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4465 } else { 4466 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4467 bp->rx_skb_func = bnxt_rx_page_skb; 4468 } 4469 bp->rx_dir = DMA_BIDIRECTIONAL; 4470 /* Disable LRO or GRO_HW */ 4471 netdev_update_features(dev); 4472 } else { 4473 dev->max_mtu = bp->max_mtu; 4474 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4475 bp->rx_dir = DMA_FROM_DEVICE; 4476 bp->rx_skb_func = bnxt_rx_skb; 4477 } 4478 return 0; 4479 } 4480 4481 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4482 { 4483 int i; 4484 struct bnxt_vnic_info *vnic; 4485 struct pci_dev *pdev = bp->pdev; 4486 4487 if (!bp->vnic_info) 4488 return; 4489 4490 for (i = 0; i < bp->nr_vnics; i++) { 4491 vnic = &bp->vnic_info[i]; 4492 4493 kfree(vnic->fw_grp_ids); 4494 vnic->fw_grp_ids = NULL; 4495 4496 kfree(vnic->uc_list); 4497 vnic->uc_list = NULL; 4498 4499 if (vnic->mc_list) { 4500 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4501 vnic->mc_list, vnic->mc_list_mapping); 4502 vnic->mc_list = NULL; 4503 } 4504 4505 if (vnic->rss_table) { 4506 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4507 vnic->rss_table, 4508 vnic->rss_table_dma_addr); 4509 vnic->rss_table = NULL; 4510 } 4511 4512 vnic->rss_hash_key = NULL; 4513 vnic->flags = 0; 4514 } 4515 } 4516 4517 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4518 { 4519 int i, rc = 0, size; 4520 struct bnxt_vnic_info *vnic; 4521 struct pci_dev *pdev = bp->pdev; 4522 int max_rings; 4523 4524 for (i = 0; i < bp->nr_vnics; i++) { 4525 vnic = &bp->vnic_info[i]; 4526 4527 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4528 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4529 4530 if (mem_size > 0) { 4531 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4532 if (!vnic->uc_list) { 4533 rc = -ENOMEM; 4534 goto out; 4535 } 4536 } 4537 } 4538 4539 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4540 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4541 vnic->mc_list = 4542 dma_alloc_coherent(&pdev->dev, 4543 vnic->mc_list_size, 4544 &vnic->mc_list_mapping, 4545 GFP_KERNEL); 4546 if (!vnic->mc_list) { 4547 rc = -ENOMEM; 4548 goto out; 4549 } 4550 } 4551 4552 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4553 goto vnic_skip_grps; 4554 4555 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4556 max_rings = bp->rx_nr_rings; 4557 else 4558 max_rings = 1; 4559 4560 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4561 if (!vnic->fw_grp_ids) { 4562 rc = -ENOMEM; 4563 goto out; 4564 } 4565 vnic_skip_grps: 4566 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 4567 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4568 continue; 4569 4570 /* Allocate rss table and hash key */ 4571 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4572 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4573 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4574 4575 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4576 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4577 vnic->rss_table_size, 4578 &vnic->rss_table_dma_addr, 4579 GFP_KERNEL); 4580 if (!vnic->rss_table) { 4581 rc = -ENOMEM; 4582 goto out; 4583 } 4584 4585 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4586 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4587 } 4588 return 0; 4589 4590 out: 4591 return rc; 4592 } 4593 4594 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4595 { 4596 struct bnxt_hwrm_wait_token *token; 4597 4598 dma_pool_destroy(bp->hwrm_dma_pool); 4599 bp->hwrm_dma_pool = NULL; 4600 4601 rcu_read_lock(); 4602 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4603 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4604 rcu_read_unlock(); 4605 } 4606 4607 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4608 { 4609 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4610 BNXT_HWRM_DMA_SIZE, 4611 BNXT_HWRM_DMA_ALIGN, 0); 4612 if (!bp->hwrm_dma_pool) 4613 return -ENOMEM; 4614 4615 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4616 4617 return 0; 4618 } 4619 4620 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 4621 { 4622 kfree(stats->hw_masks); 4623 stats->hw_masks = NULL; 4624 kfree(stats->sw_stats); 4625 stats->sw_stats = NULL; 4626 if (stats->hw_stats) { 4627 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 4628 stats->hw_stats_map); 4629 stats->hw_stats = NULL; 4630 } 4631 } 4632 4633 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 4634 bool alloc_masks) 4635 { 4636 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 4637 &stats->hw_stats_map, GFP_KERNEL); 4638 if (!stats->hw_stats) 4639 return -ENOMEM; 4640 4641 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 4642 if (!stats->sw_stats) 4643 goto stats_mem_err; 4644 4645 if (alloc_masks) { 4646 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 4647 if (!stats->hw_masks) 4648 goto stats_mem_err; 4649 } 4650 return 0; 4651 4652 stats_mem_err: 4653 bnxt_free_stats_mem(bp, stats); 4654 return -ENOMEM; 4655 } 4656 4657 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 4658 { 4659 int i; 4660 4661 for (i = 0; i < count; i++) 4662 mask_arr[i] = mask; 4663 } 4664 4665 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 4666 { 4667 int i; 4668 4669 for (i = 0; i < count; i++) 4670 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 4671 } 4672 4673 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 4674 struct bnxt_stats_mem *stats) 4675 { 4676 struct hwrm_func_qstats_ext_output *resp; 4677 struct hwrm_func_qstats_ext_input *req; 4678 __le64 *hw_masks; 4679 int rc; 4680 4681 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 4682 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4683 return -EOPNOTSUPP; 4684 4685 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 4686 if (rc) 4687 return rc; 4688 4689 req->fid = cpu_to_le16(0xffff); 4690 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4691 4692 resp = hwrm_req_hold(bp, req); 4693 rc = hwrm_req_send(bp, req); 4694 if (!rc) { 4695 hw_masks = &resp->rx_ucast_pkts; 4696 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 4697 } 4698 hwrm_req_drop(bp, req); 4699 return rc; 4700 } 4701 4702 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 4703 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 4704 4705 static void bnxt_init_stats(struct bnxt *bp) 4706 { 4707 struct bnxt_napi *bnapi = bp->bnapi[0]; 4708 struct bnxt_cp_ring_info *cpr; 4709 struct bnxt_stats_mem *stats; 4710 __le64 *rx_stats, *tx_stats; 4711 int rc, rx_count, tx_count; 4712 u64 *rx_masks, *tx_masks; 4713 u64 mask; 4714 u8 flags; 4715 4716 cpr = &bnapi->cp_ring; 4717 stats = &cpr->stats; 4718 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 4719 if (rc) { 4720 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4721 mask = (1ULL << 48) - 1; 4722 else 4723 mask = -1ULL; 4724 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 4725 } 4726 if (bp->flags & BNXT_FLAG_PORT_STATS) { 4727 stats = &bp->port_stats; 4728 rx_stats = stats->hw_stats; 4729 rx_masks = stats->hw_masks; 4730 rx_count = sizeof(struct rx_port_stats) / 8; 4731 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4732 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4733 tx_count = sizeof(struct tx_port_stats) / 8; 4734 4735 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 4736 rc = bnxt_hwrm_port_qstats(bp, flags); 4737 if (rc) { 4738 mask = (1ULL << 40) - 1; 4739 4740 bnxt_fill_masks(rx_masks, mask, rx_count); 4741 bnxt_fill_masks(tx_masks, mask, tx_count); 4742 } else { 4743 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4744 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 4745 bnxt_hwrm_port_qstats(bp, 0); 4746 } 4747 } 4748 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 4749 stats = &bp->rx_port_stats_ext; 4750 rx_stats = stats->hw_stats; 4751 rx_masks = stats->hw_masks; 4752 rx_count = sizeof(struct rx_port_stats_ext) / 8; 4753 stats = &bp->tx_port_stats_ext; 4754 tx_stats = stats->hw_stats; 4755 tx_masks = stats->hw_masks; 4756 tx_count = sizeof(struct tx_port_stats_ext) / 8; 4757 4758 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4759 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 4760 if (rc) { 4761 mask = (1ULL << 40) - 1; 4762 4763 bnxt_fill_masks(rx_masks, mask, rx_count); 4764 if (tx_stats) 4765 bnxt_fill_masks(tx_masks, mask, tx_count); 4766 } else { 4767 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4768 if (tx_stats) 4769 bnxt_copy_hw_masks(tx_masks, tx_stats, 4770 tx_count); 4771 bnxt_hwrm_port_qstats_ext(bp, 0); 4772 } 4773 } 4774 } 4775 4776 static void bnxt_free_port_stats(struct bnxt *bp) 4777 { 4778 bp->flags &= ~BNXT_FLAG_PORT_STATS; 4779 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 4780 4781 bnxt_free_stats_mem(bp, &bp->port_stats); 4782 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 4783 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 4784 } 4785 4786 static void bnxt_free_ring_stats(struct bnxt *bp) 4787 { 4788 int i; 4789 4790 if (!bp->bnapi) 4791 return; 4792 4793 for (i = 0; i < bp->cp_nr_rings; i++) { 4794 struct bnxt_napi *bnapi = bp->bnapi[i]; 4795 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4796 4797 bnxt_free_stats_mem(bp, &cpr->stats); 4798 4799 kfree(cpr->sw_stats); 4800 cpr->sw_stats = NULL; 4801 } 4802 } 4803 4804 static int bnxt_alloc_stats(struct bnxt *bp) 4805 { 4806 u32 size, i; 4807 int rc; 4808 4809 size = bp->hw_ring_stats_size; 4810 4811 for (i = 0; i < bp->cp_nr_rings; i++) { 4812 struct bnxt_napi *bnapi = bp->bnapi[i]; 4813 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4814 4815 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); 4816 if (!cpr->sw_stats) 4817 return -ENOMEM; 4818 4819 cpr->stats.len = size; 4820 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 4821 if (rc) 4822 return rc; 4823 4824 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4825 } 4826 4827 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 4828 return 0; 4829 4830 if (bp->port_stats.hw_stats) 4831 goto alloc_ext_stats; 4832 4833 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 4834 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 4835 if (rc) 4836 return rc; 4837 4838 bp->flags |= BNXT_FLAG_PORT_STATS; 4839 4840 alloc_ext_stats: 4841 /* Display extended statistics only if FW supports it */ 4842 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 4843 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 4844 return 0; 4845 4846 if (bp->rx_port_stats_ext.hw_stats) 4847 goto alloc_tx_ext_stats; 4848 4849 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 4850 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 4851 /* Extended stats are optional */ 4852 if (rc) 4853 return 0; 4854 4855 alloc_tx_ext_stats: 4856 if (bp->tx_port_stats_ext.hw_stats) 4857 return 0; 4858 4859 if (bp->hwrm_spec_code >= 0x10902 || 4860 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 4861 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 4862 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 4863 /* Extended stats are optional */ 4864 if (rc) 4865 return 0; 4866 } 4867 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 4868 return 0; 4869 } 4870 4871 static void bnxt_clear_ring_indices(struct bnxt *bp) 4872 { 4873 int i, j; 4874 4875 if (!bp->bnapi) 4876 return; 4877 4878 for (i = 0; i < bp->cp_nr_rings; i++) { 4879 struct bnxt_napi *bnapi = bp->bnapi[i]; 4880 struct bnxt_cp_ring_info *cpr; 4881 struct bnxt_rx_ring_info *rxr; 4882 struct bnxt_tx_ring_info *txr; 4883 4884 if (!bnapi) 4885 continue; 4886 4887 cpr = &bnapi->cp_ring; 4888 cpr->cp_raw_cons = 0; 4889 4890 bnxt_for_each_napi_tx(j, bnapi, txr) { 4891 txr->tx_prod = 0; 4892 txr->tx_cons = 0; 4893 txr->tx_hw_cons = 0; 4894 } 4895 4896 rxr = bnapi->rx_ring; 4897 if (rxr) { 4898 rxr->rx_prod = 0; 4899 rxr->rx_agg_prod = 0; 4900 rxr->rx_sw_agg_prod = 0; 4901 rxr->rx_next_cons = 0; 4902 } 4903 bnapi->events = 0; 4904 } 4905 } 4906 4907 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 4908 { 4909 u8 type = fltr->type, flags = fltr->flags; 4910 4911 INIT_LIST_HEAD(&fltr->list); 4912 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) || 4913 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING)) 4914 list_add_tail(&fltr->list, &bp->usr_fltr_list); 4915 } 4916 4917 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 4918 { 4919 if (!list_empty(&fltr->list)) 4920 list_del_init(&fltr->list); 4921 } 4922 4923 void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) 4924 { 4925 struct bnxt_filter_base *usr_fltr, *tmp; 4926 4927 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 4928 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) 4929 continue; 4930 bnxt_del_one_usr_fltr(bp, usr_fltr); 4931 } 4932 } 4933 4934 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 4935 { 4936 hlist_del(&fltr->hash); 4937 bnxt_del_one_usr_fltr(bp, fltr); 4938 if (fltr->flags) { 4939 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 4940 bp->ntp_fltr_count--; 4941 } 4942 kfree(fltr); 4943 } 4944 4945 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) 4946 { 4947 int i; 4948 4949 /* Under rtnl_lock and all our NAPIs have been disabled. It's 4950 * safe to delete the hash table. 4951 */ 4952 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 4953 struct hlist_head *head; 4954 struct hlist_node *tmp; 4955 struct bnxt_ntuple_filter *fltr; 4956 4957 head = &bp->ntp_fltr_hash_tbl[i]; 4958 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 4959 bnxt_del_l2_filter(bp, fltr->l2_fltr); 4960 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 4961 !list_empty(&fltr->base.list))) 4962 continue; 4963 bnxt_del_fltr(bp, &fltr->base); 4964 } 4965 } 4966 if (!all) 4967 return; 4968 4969 bitmap_free(bp->ntp_fltr_bmap); 4970 bp->ntp_fltr_bmap = NULL; 4971 bp->ntp_fltr_count = 0; 4972 } 4973 4974 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 4975 { 4976 int i, rc = 0; 4977 4978 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) 4979 return 0; 4980 4981 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 4982 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 4983 4984 bp->ntp_fltr_count = 0; 4985 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); 4986 4987 if (!bp->ntp_fltr_bmap) 4988 rc = -ENOMEM; 4989 4990 return rc; 4991 } 4992 4993 static void bnxt_free_l2_filters(struct bnxt *bp, bool all) 4994 { 4995 int i; 4996 4997 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { 4998 struct hlist_head *head; 4999 struct hlist_node *tmp; 5000 struct bnxt_l2_filter *fltr; 5001 5002 head = &bp->l2_fltr_hash_tbl[i]; 5003 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5004 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5005 !list_empty(&fltr->base.list))) 5006 continue; 5007 bnxt_del_fltr(bp, &fltr->base); 5008 } 5009 } 5010 } 5011 5012 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) 5013 { 5014 int i; 5015 5016 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) 5017 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); 5018 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); 5019 } 5020 5021 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 5022 { 5023 bnxt_free_vnic_attributes(bp); 5024 bnxt_free_tx_rings(bp); 5025 bnxt_free_rx_rings(bp); 5026 bnxt_free_cp_rings(bp); 5027 bnxt_free_all_cp_arrays(bp); 5028 bnxt_free_ntp_fltrs(bp, false); 5029 bnxt_free_l2_filters(bp, false); 5030 if (irq_re_init) { 5031 bnxt_free_ring_stats(bp); 5032 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 5033 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 5034 bnxt_free_port_stats(bp); 5035 bnxt_free_ring_grps(bp); 5036 bnxt_free_vnics(bp); 5037 kfree(bp->tx_ring_map); 5038 bp->tx_ring_map = NULL; 5039 kfree(bp->tx_ring); 5040 bp->tx_ring = NULL; 5041 kfree(bp->rx_ring); 5042 bp->rx_ring = NULL; 5043 kfree(bp->bnapi); 5044 bp->bnapi = NULL; 5045 } else { 5046 bnxt_clear_ring_indices(bp); 5047 } 5048 } 5049 5050 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 5051 { 5052 int i, j, rc, size, arr_size; 5053 void *bnapi; 5054 5055 if (irq_re_init) { 5056 /* Allocate bnapi mem pointer array and mem block for 5057 * all queues 5058 */ 5059 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 5060 bp->cp_nr_rings); 5061 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 5062 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 5063 if (!bnapi) 5064 return -ENOMEM; 5065 5066 bp->bnapi = bnapi; 5067 bnapi += arr_size; 5068 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 5069 bp->bnapi[i] = bnapi; 5070 bp->bnapi[i]->index = i; 5071 bp->bnapi[i]->bp = bp; 5072 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5073 struct bnxt_cp_ring_info *cpr = 5074 &bp->bnapi[i]->cp_ring; 5075 5076 cpr->cp_ring_struct.ring_mem.flags = 5077 BNXT_RMEM_RING_PTE_FLAG; 5078 } 5079 } 5080 5081 bp->rx_ring = kcalloc(bp->rx_nr_rings, 5082 sizeof(struct bnxt_rx_ring_info), 5083 GFP_KERNEL); 5084 if (!bp->rx_ring) 5085 return -ENOMEM; 5086 5087 for (i = 0; i < bp->rx_nr_rings; i++) { 5088 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5089 5090 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5091 rxr->rx_ring_struct.ring_mem.flags = 5092 BNXT_RMEM_RING_PTE_FLAG; 5093 rxr->rx_agg_ring_struct.ring_mem.flags = 5094 BNXT_RMEM_RING_PTE_FLAG; 5095 } else { 5096 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 5097 } 5098 rxr->bnapi = bp->bnapi[i]; 5099 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 5100 } 5101 5102 bp->tx_ring = kcalloc(bp->tx_nr_rings, 5103 sizeof(struct bnxt_tx_ring_info), 5104 GFP_KERNEL); 5105 if (!bp->tx_ring) 5106 return -ENOMEM; 5107 5108 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 5109 GFP_KERNEL); 5110 5111 if (!bp->tx_ring_map) 5112 return -ENOMEM; 5113 5114 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5115 j = 0; 5116 else 5117 j = bp->rx_nr_rings; 5118 5119 for (i = 0; i < bp->tx_nr_rings; i++) { 5120 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5121 struct bnxt_napi *bnapi2; 5122 5123 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5124 txr->tx_ring_struct.ring_mem.flags = 5125 BNXT_RMEM_RING_PTE_FLAG; 5126 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 5127 if (i >= bp->tx_nr_rings_xdp) { 5128 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 5129 5130 bnapi2 = bp->bnapi[k]; 5131 txr->txq_index = i - bp->tx_nr_rings_xdp; 5132 txr->tx_napi_idx = 5133 BNXT_RING_TO_TC(bp, txr->txq_index); 5134 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 5135 bnapi2->tx_int = bnxt_tx_int; 5136 } else { 5137 bnapi2 = bp->bnapi[j]; 5138 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 5139 bnapi2->tx_ring[0] = txr; 5140 bnapi2->tx_int = bnxt_tx_int_xdp; 5141 j++; 5142 } 5143 txr->bnapi = bnapi2; 5144 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5145 txr->tx_cpr = &bnapi2->cp_ring; 5146 } 5147 5148 rc = bnxt_alloc_stats(bp); 5149 if (rc) 5150 goto alloc_mem_err; 5151 bnxt_init_stats(bp); 5152 5153 rc = bnxt_alloc_ntp_fltrs(bp); 5154 if (rc) 5155 goto alloc_mem_err; 5156 5157 rc = bnxt_alloc_vnics(bp); 5158 if (rc) 5159 goto alloc_mem_err; 5160 } 5161 5162 rc = bnxt_alloc_all_cp_arrays(bp); 5163 if (rc) 5164 goto alloc_mem_err; 5165 5166 bnxt_init_ring_struct(bp); 5167 5168 rc = bnxt_alloc_rx_rings(bp); 5169 if (rc) 5170 goto alloc_mem_err; 5171 5172 rc = bnxt_alloc_tx_rings(bp); 5173 if (rc) 5174 goto alloc_mem_err; 5175 5176 rc = bnxt_alloc_cp_rings(bp); 5177 if (rc) 5178 goto alloc_mem_err; 5179 5180 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | 5181 BNXT_VNIC_MCAST_FLAG | 5182 BNXT_VNIC_UCAST_FLAG; 5183 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) 5184 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= 5185 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; 5186 5187 rc = bnxt_alloc_vnic_attributes(bp); 5188 if (rc) 5189 goto alloc_mem_err; 5190 return 0; 5191 5192 alloc_mem_err: 5193 bnxt_free_mem(bp, true); 5194 return rc; 5195 } 5196 5197 static void bnxt_disable_int(struct bnxt *bp) 5198 { 5199 int i; 5200 5201 if (!bp->bnapi) 5202 return; 5203 5204 for (i = 0; i < bp->cp_nr_rings; i++) { 5205 struct bnxt_napi *bnapi = bp->bnapi[i]; 5206 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5207 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5208 5209 if (ring->fw_ring_id != INVALID_HW_RING_ID) 5210 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5211 } 5212 } 5213 5214 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 5215 { 5216 struct bnxt_napi *bnapi = bp->bnapi[n]; 5217 struct bnxt_cp_ring_info *cpr; 5218 5219 cpr = &bnapi->cp_ring; 5220 return cpr->cp_ring_struct.map_idx; 5221 } 5222 5223 static void bnxt_disable_int_sync(struct bnxt *bp) 5224 { 5225 int i; 5226 5227 if (!bp->irq_tbl) 5228 return; 5229 5230 atomic_inc(&bp->intr_sem); 5231 5232 bnxt_disable_int(bp); 5233 for (i = 0; i < bp->cp_nr_rings; i++) { 5234 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 5235 5236 synchronize_irq(bp->irq_tbl[map_idx].vector); 5237 } 5238 } 5239 5240 static void bnxt_enable_int(struct bnxt *bp) 5241 { 5242 int i; 5243 5244 atomic_set(&bp->intr_sem, 0); 5245 for (i = 0; i < bp->cp_nr_rings; i++) { 5246 struct bnxt_napi *bnapi = bp->bnapi[i]; 5247 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5248 5249 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 5250 } 5251 } 5252 5253 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 5254 bool async_only) 5255 { 5256 DECLARE_BITMAP(async_events_bmap, 256); 5257 u32 *events = (u32 *)async_events_bmap; 5258 struct hwrm_func_drv_rgtr_output *resp; 5259 struct hwrm_func_drv_rgtr_input *req; 5260 u32 flags; 5261 int rc, i; 5262 5263 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 5264 if (rc) 5265 return rc; 5266 5267 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 5268 FUNC_DRV_RGTR_REQ_ENABLES_VER | 5269 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5270 5271 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 5272 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 5273 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 5274 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 5275 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 5276 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 5277 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 5278 req->flags = cpu_to_le32(flags); 5279 req->ver_maj_8b = DRV_VER_MAJ; 5280 req->ver_min_8b = DRV_VER_MIN; 5281 req->ver_upd_8b = DRV_VER_UPD; 5282 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 5283 req->ver_min = cpu_to_le16(DRV_VER_MIN); 5284 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 5285 5286 if (BNXT_PF(bp)) { 5287 u32 data[8]; 5288 int i; 5289 5290 memset(data, 0, sizeof(data)); 5291 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 5292 u16 cmd = bnxt_vf_req_snif[i]; 5293 unsigned int bit, idx; 5294 5295 idx = cmd / 32; 5296 bit = cmd % 32; 5297 data[idx] |= 1 << bit; 5298 } 5299 5300 for (i = 0; i < 8; i++) 5301 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 5302 5303 req->enables |= 5304 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 5305 } 5306 5307 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 5308 req->flags |= cpu_to_le32( 5309 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 5310 5311 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 5312 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 5313 u16 event_id = bnxt_async_events_arr[i]; 5314 5315 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 5316 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5317 continue; 5318 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 5319 !bp->ptp_cfg) 5320 continue; 5321 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 5322 } 5323 if (bmap && bmap_size) { 5324 for (i = 0; i < bmap_size; i++) { 5325 if (test_bit(i, bmap)) 5326 __set_bit(i, async_events_bmap); 5327 } 5328 } 5329 for (i = 0; i < 8; i++) 5330 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 5331 5332 if (async_only) 5333 req->enables = 5334 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5335 5336 resp = hwrm_req_hold(bp, req); 5337 rc = hwrm_req_send(bp, req); 5338 if (!rc) { 5339 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 5340 if (resp->flags & 5341 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 5342 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 5343 } 5344 hwrm_req_drop(bp, req); 5345 return rc; 5346 } 5347 5348 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 5349 { 5350 struct hwrm_func_drv_unrgtr_input *req; 5351 int rc; 5352 5353 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 5354 return 0; 5355 5356 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 5357 if (rc) 5358 return rc; 5359 return hwrm_req_send(bp, req); 5360 } 5361 5362 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); 5363 5364 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 5365 { 5366 struct hwrm_tunnel_dst_port_free_input *req; 5367 int rc; 5368 5369 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 5370 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 5371 return 0; 5372 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 5373 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 5374 return 0; 5375 5376 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 5377 if (rc) 5378 return rc; 5379 5380 req->tunnel_type = tunnel_type; 5381 5382 switch (tunnel_type) { 5383 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 5384 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 5385 bp->vxlan_port = 0; 5386 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 5387 break; 5388 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5389 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5390 bp->nge_port = 0; 5391 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5392 break; 5393 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: 5394 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); 5395 bp->vxlan_gpe_port = 0; 5396 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; 5397 break; 5398 default: 5399 break; 5400 } 5401 5402 rc = hwrm_req_send(bp, req); 5403 if (rc) 5404 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5405 rc); 5406 if (bp->flags & BNXT_FLAG_TPA) 5407 bnxt_set_tpa(bp, true); 5408 return rc; 5409 } 5410 5411 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5412 u8 tunnel_type) 5413 { 5414 struct hwrm_tunnel_dst_port_alloc_output *resp; 5415 struct hwrm_tunnel_dst_port_alloc_input *req; 5416 int rc; 5417 5418 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5419 if (rc) 5420 return rc; 5421 5422 req->tunnel_type = tunnel_type; 5423 req->tunnel_dst_port_val = port; 5424 5425 resp = hwrm_req_hold(bp, req); 5426 rc = hwrm_req_send(bp, req); 5427 if (rc) { 5428 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5429 rc); 5430 goto err_out; 5431 } 5432 5433 switch (tunnel_type) { 5434 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5435 bp->vxlan_port = port; 5436 bp->vxlan_fw_dst_port_id = 5437 le16_to_cpu(resp->tunnel_dst_port_id); 5438 break; 5439 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5440 bp->nge_port = port; 5441 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5442 break; 5443 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: 5444 bp->vxlan_gpe_port = port; 5445 bp->vxlan_gpe_fw_dst_port_id = 5446 le16_to_cpu(resp->tunnel_dst_port_id); 5447 break; 5448 default: 5449 break; 5450 } 5451 if (bp->flags & BNXT_FLAG_TPA) 5452 bnxt_set_tpa(bp, true); 5453 5454 err_out: 5455 hwrm_req_drop(bp, req); 5456 return rc; 5457 } 5458 5459 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5460 { 5461 struct hwrm_cfa_l2_set_rx_mask_input *req; 5462 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5463 int rc; 5464 5465 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5466 if (rc) 5467 return rc; 5468 5469 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5470 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5471 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5472 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5473 } 5474 req->mask = cpu_to_le32(vnic->rx_mask); 5475 return hwrm_req_send_silent(bp, req); 5476 } 5477 5478 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5479 { 5480 if (!atomic_dec_and_test(&fltr->refcnt)) 5481 return; 5482 spin_lock_bh(&bp->ntp_fltr_lock); 5483 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 5484 spin_unlock_bh(&bp->ntp_fltr_lock); 5485 return; 5486 } 5487 hlist_del_rcu(&fltr->base.hash); 5488 bnxt_del_one_usr_fltr(bp, &fltr->base); 5489 if (fltr->base.flags) { 5490 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 5491 bp->ntp_fltr_count--; 5492 } 5493 spin_unlock_bh(&bp->ntp_fltr_lock); 5494 kfree_rcu(fltr, base.rcu); 5495 } 5496 5497 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, 5498 struct bnxt_l2_key *key, 5499 u32 idx) 5500 { 5501 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; 5502 struct bnxt_l2_filter *fltr; 5503 5504 hlist_for_each_entry_rcu(fltr, head, base.hash) { 5505 struct bnxt_l2_key *l2_key = &fltr->l2_key; 5506 5507 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && 5508 l2_key->vlan == key->vlan) 5509 return fltr; 5510 } 5511 return NULL; 5512 } 5513 5514 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, 5515 struct bnxt_l2_key *key, 5516 u32 idx) 5517 { 5518 struct bnxt_l2_filter *fltr = NULL; 5519 5520 rcu_read_lock(); 5521 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5522 if (fltr) 5523 atomic_inc(&fltr->refcnt); 5524 rcu_read_unlock(); 5525 return fltr; 5526 } 5527 5528 #define BNXT_IPV4_4TUPLE(bp, fkeys) \ 5529 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5530 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ 5531 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5532 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) 5533 5534 #define BNXT_IPV6_4TUPLE(bp, fkeys) \ 5535 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5536 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ 5537 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5538 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) 5539 5540 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) 5541 { 5542 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5543 if (BNXT_IPV4_4TUPLE(bp, fkeys)) 5544 return sizeof(fkeys->addrs.v4addrs) + 5545 sizeof(fkeys->ports); 5546 5547 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 5548 return sizeof(fkeys->addrs.v4addrs); 5549 } 5550 5551 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 5552 if (BNXT_IPV6_4TUPLE(bp, fkeys)) 5553 return sizeof(fkeys->addrs.v6addrs) + 5554 sizeof(fkeys->ports); 5555 5556 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 5557 return sizeof(fkeys->addrs.v6addrs); 5558 } 5559 5560 return 0; 5561 } 5562 5563 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, 5564 const unsigned char *key) 5565 { 5566 u64 prefix = bp->toeplitz_prefix, hash = 0; 5567 struct bnxt_ipv4_tuple tuple4; 5568 struct bnxt_ipv6_tuple tuple6; 5569 int i, j, len = 0; 5570 u8 *four_tuple; 5571 5572 len = bnxt_get_rss_flow_tuple_len(bp, fkeys); 5573 if (!len) 5574 return 0; 5575 5576 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5577 tuple4.v4addrs = fkeys->addrs.v4addrs; 5578 tuple4.ports = fkeys->ports; 5579 four_tuple = (unsigned char *)&tuple4; 5580 } else { 5581 tuple6.v6addrs = fkeys->addrs.v6addrs; 5582 tuple6.ports = fkeys->ports; 5583 four_tuple = (unsigned char *)&tuple6; 5584 } 5585 5586 for (i = 0, j = 8; i < len; i++, j++) { 5587 u8 byte = four_tuple[i]; 5588 int bit; 5589 5590 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { 5591 if (byte & 0x80) 5592 hash ^= prefix; 5593 } 5594 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; 5595 } 5596 5597 /* The valid part of the hash is in the upper 32 bits. */ 5598 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; 5599 } 5600 5601 #ifdef CONFIG_RFS_ACCEL 5602 static struct bnxt_l2_filter * 5603 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) 5604 { 5605 struct bnxt_l2_filter *fltr; 5606 u32 idx; 5607 5608 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5609 BNXT_L2_FLTR_HASH_MASK; 5610 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5611 return fltr; 5612 } 5613 #endif 5614 5615 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, 5616 struct bnxt_l2_key *key, u32 idx) 5617 { 5618 struct hlist_head *head; 5619 5620 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); 5621 fltr->l2_key.vlan = key->vlan; 5622 fltr->base.type = BNXT_FLTR_TYPE_L2; 5623 if (fltr->base.flags) { 5624 int bit_id; 5625 5626 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5627 bp->max_fltr, 0); 5628 if (bit_id < 0) 5629 return -ENOMEM; 5630 fltr->base.sw_id = (u16)bit_id; 5631 bp->ntp_fltr_count++; 5632 } 5633 head = &bp->l2_fltr_hash_tbl[idx]; 5634 hlist_add_head_rcu(&fltr->base.hash, head); 5635 bnxt_insert_usr_fltr(bp, &fltr->base); 5636 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 5637 atomic_set(&fltr->refcnt, 1); 5638 return 0; 5639 } 5640 5641 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, 5642 struct bnxt_l2_key *key, 5643 gfp_t gfp) 5644 { 5645 struct bnxt_l2_filter *fltr; 5646 u32 idx; 5647 int rc; 5648 5649 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5650 BNXT_L2_FLTR_HASH_MASK; 5651 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5652 if (fltr) 5653 return fltr; 5654 5655 fltr = kzalloc(sizeof(*fltr), gfp); 5656 if (!fltr) 5657 return ERR_PTR(-ENOMEM); 5658 spin_lock_bh(&bp->ntp_fltr_lock); 5659 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 5660 spin_unlock_bh(&bp->ntp_fltr_lock); 5661 if (rc) { 5662 bnxt_del_l2_filter(bp, fltr); 5663 fltr = ERR_PTR(rc); 5664 } 5665 return fltr; 5666 } 5667 5668 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, 5669 struct bnxt_l2_key *key, 5670 u16 flags) 5671 { 5672 struct bnxt_l2_filter *fltr; 5673 u32 idx; 5674 int rc; 5675 5676 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5677 BNXT_L2_FLTR_HASH_MASK; 5678 spin_lock_bh(&bp->ntp_fltr_lock); 5679 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5680 if (fltr) { 5681 fltr = ERR_PTR(-EEXIST); 5682 goto l2_filter_exit; 5683 } 5684 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); 5685 if (!fltr) { 5686 fltr = ERR_PTR(-ENOMEM); 5687 goto l2_filter_exit; 5688 } 5689 fltr->base.flags = flags; 5690 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 5691 if (rc) { 5692 spin_unlock_bh(&bp->ntp_fltr_lock); 5693 bnxt_del_l2_filter(bp, fltr); 5694 return ERR_PTR(rc); 5695 } 5696 5697 l2_filter_exit: 5698 spin_unlock_bh(&bp->ntp_fltr_lock); 5699 return fltr; 5700 } 5701 5702 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) 5703 { 5704 #ifdef CONFIG_BNXT_SRIOV 5705 struct bnxt_vf_info *vf = &pf->vf[vf_idx]; 5706 5707 return vf->fw_fid; 5708 #else 5709 return INVALID_HW_RING_ID; 5710 #endif 5711 } 5712 5713 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5714 { 5715 struct hwrm_cfa_l2_filter_free_input *req; 5716 u16 target_id = 0xffff; 5717 int rc; 5718 5719 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 5720 struct bnxt_pf_info *pf = &bp->pf; 5721 5722 if (fltr->base.vf_idx >= pf->active_vfs) 5723 return -EINVAL; 5724 5725 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 5726 if (target_id == INVALID_HW_RING_ID) 5727 return -EINVAL; 5728 } 5729 5730 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 5731 if (rc) 5732 return rc; 5733 5734 req->target_id = cpu_to_le16(target_id); 5735 req->l2_filter_id = fltr->base.filter_id; 5736 return hwrm_req_send(bp, req); 5737 } 5738 5739 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5740 { 5741 struct hwrm_cfa_l2_filter_alloc_output *resp; 5742 struct hwrm_cfa_l2_filter_alloc_input *req; 5743 u16 target_id = 0xffff; 5744 int rc; 5745 5746 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 5747 struct bnxt_pf_info *pf = &bp->pf; 5748 5749 if (fltr->base.vf_idx >= pf->active_vfs) 5750 return -EINVAL; 5751 5752 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 5753 } 5754 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 5755 if (rc) 5756 return rc; 5757 5758 req->target_id = cpu_to_le16(target_id); 5759 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 5760 5761 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 5762 req->flags |= 5763 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 5764 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); 5765 req->enables = 5766 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 5767 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 5768 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 5769 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); 5770 eth_broadcast_addr(req->l2_addr_mask); 5771 5772 if (fltr->l2_key.vlan) { 5773 req->enables |= 5774 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | 5775 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | 5776 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); 5777 req->num_vlans = 1; 5778 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); 5779 req->l2_ivlan_mask = cpu_to_le16(0xfff); 5780 } 5781 5782 resp = hwrm_req_hold(bp, req); 5783 rc = hwrm_req_send(bp, req); 5784 if (!rc) { 5785 fltr->base.filter_id = resp->l2_filter_id; 5786 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 5787 } 5788 hwrm_req_drop(bp, req); 5789 return rc; 5790 } 5791 5792 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 5793 struct bnxt_ntuple_filter *fltr) 5794 { 5795 struct hwrm_cfa_ntuple_filter_free_input *req; 5796 int rc; 5797 5798 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); 5799 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 5800 if (rc) 5801 return rc; 5802 5803 req->ntuple_filter_id = fltr->base.filter_id; 5804 return hwrm_req_send(bp, req); 5805 } 5806 5807 #define BNXT_NTP_FLTR_FLAGS \ 5808 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 5809 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 5810 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 5811 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 5812 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 5813 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 5814 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 5815 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 5816 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 5817 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 5818 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 5819 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 5820 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 5821 5822 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 5823 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 5824 5825 void bnxt_fill_ipv6_mask(__be32 mask[4]) 5826 { 5827 int i; 5828 5829 for (i = 0; i < 4; i++) 5830 mask[i] = cpu_to_be32(~0); 5831 } 5832 5833 static void 5834 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, 5835 struct hwrm_cfa_ntuple_filter_alloc_input *req, 5836 struct bnxt_ntuple_filter *fltr) 5837 { 5838 struct bnxt_rss_ctx *rss_ctx, *tmp; 5839 u16 rxq = fltr->base.rxq; 5840 5841 if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 5842 list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { 5843 if (rss_ctx->index == fltr->base.fw_vnic_id) { 5844 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 5845 5846 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5847 break; 5848 } 5849 } 5850 return; 5851 } 5852 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 5853 struct bnxt_vnic_info *vnic; 5854 u32 enables; 5855 5856 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 5857 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5858 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; 5859 req->enables |= cpu_to_le32(enables); 5860 req->rfs_ring_tbl_idx = cpu_to_le16(rxq); 5861 } else { 5862 u32 flags; 5863 5864 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 5865 req->flags |= cpu_to_le32(flags); 5866 req->dst_id = cpu_to_le16(rxq); 5867 } 5868 } 5869 5870 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 5871 struct bnxt_ntuple_filter *fltr) 5872 { 5873 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 5874 struct hwrm_cfa_ntuple_filter_alloc_input *req; 5875 struct bnxt_flow_masks *masks = &fltr->fmasks; 5876 struct flow_keys *keys = &fltr->fkeys; 5877 struct bnxt_l2_filter *l2_fltr; 5878 struct bnxt_vnic_info *vnic; 5879 int rc; 5880 5881 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 5882 if (rc) 5883 return rc; 5884 5885 l2_fltr = fltr->l2_fltr; 5886 req->l2_filter_id = l2_fltr->base.filter_id; 5887 5888 if (fltr->base.flags & BNXT_ACT_DROP) { 5889 req->flags = 5890 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); 5891 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 5892 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); 5893 } else { 5894 vnic = &bp->vnic_info[fltr->base.rxq + 1]; 5895 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5896 } 5897 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 5898 5899 req->ethertype = htons(ETH_P_IP); 5900 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 5901 req->ip_protocol = keys->basic.ip_proto; 5902 5903 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 5904 req->ethertype = htons(ETH_P_IPV6); 5905 req->ip_addr_type = 5906 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 5907 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; 5908 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; 5909 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; 5910 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; 5911 } else { 5912 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 5913 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; 5914 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 5915 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; 5916 } 5917 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 5918 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 5919 req->tunnel_type = 5920 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 5921 } 5922 5923 req->src_port = keys->ports.src; 5924 req->src_port_mask = masks->ports.src; 5925 req->dst_port = keys->ports.dst; 5926 req->dst_port_mask = masks->ports.dst; 5927 5928 resp = hwrm_req_hold(bp, req); 5929 rc = hwrm_req_send(bp, req); 5930 if (!rc) 5931 fltr->base.filter_id = resp->ntuple_filter_id; 5932 hwrm_req_drop(bp, req); 5933 return rc; 5934 } 5935 5936 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 5937 const u8 *mac_addr) 5938 { 5939 struct bnxt_l2_filter *fltr; 5940 struct bnxt_l2_key key; 5941 int rc; 5942 5943 ether_addr_copy(key.dst_mac_addr, mac_addr); 5944 key.vlan = 0; 5945 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); 5946 if (IS_ERR(fltr)) 5947 return PTR_ERR(fltr); 5948 5949 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; 5950 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 5951 if (rc) 5952 bnxt_del_l2_filter(bp, fltr); 5953 else 5954 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; 5955 return rc; 5956 } 5957 5958 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 5959 { 5960 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 5961 5962 /* Any associated ntuple filters will also be cleared by firmware. */ 5963 for (i = 0; i < num_of_vnics; i++) { 5964 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5965 5966 for (j = 0; j < vnic->uc_filter_count; j++) { 5967 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; 5968 5969 bnxt_hwrm_l2_filter_free(bp, fltr); 5970 bnxt_del_l2_filter(bp, fltr); 5971 } 5972 vnic->uc_filter_count = 0; 5973 } 5974 } 5975 5976 #define BNXT_DFLT_TUNL_TPA_BMAP \ 5977 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ 5978 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ 5979 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) 5980 5981 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, 5982 struct hwrm_vnic_tpa_cfg_input *req) 5983 { 5984 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; 5985 5986 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) 5987 return; 5988 5989 if (bp->vxlan_port) 5990 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; 5991 if (bp->vxlan_gpe_port) 5992 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; 5993 if (bp->nge_port) 5994 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; 5995 5996 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); 5997 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); 5998 } 5999 6000 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6001 u32 tpa_flags) 6002 { 6003 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 6004 struct hwrm_vnic_tpa_cfg_input *req; 6005 int rc; 6006 6007 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 6008 return 0; 6009 6010 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 6011 if (rc) 6012 return rc; 6013 6014 if (tpa_flags) { 6015 u16 mss = bp->dev->mtu - 40; 6016 u32 nsegs, n, segs = 0, flags; 6017 6018 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 6019 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 6020 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 6021 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 6022 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 6023 if (tpa_flags & BNXT_FLAG_GRO) 6024 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 6025 6026 req->flags = cpu_to_le32(flags); 6027 6028 req->enables = 6029 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 6030 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 6031 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 6032 6033 /* Number of segs are log2 units, and first packet is not 6034 * included as part of this units. 6035 */ 6036 if (mss <= BNXT_RX_PAGE_SIZE) { 6037 n = BNXT_RX_PAGE_SIZE / mss; 6038 nsegs = (MAX_SKB_FRAGS - 1) * n; 6039 } else { 6040 n = mss / BNXT_RX_PAGE_SIZE; 6041 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 6042 n++; 6043 nsegs = (MAX_SKB_FRAGS - n) / n; 6044 } 6045 6046 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6047 segs = MAX_TPA_SEGS_P5; 6048 max_aggs = bp->max_tpa; 6049 } else { 6050 segs = ilog2(nsegs); 6051 } 6052 req->max_agg_segs = cpu_to_le16(segs); 6053 req->max_aggs = cpu_to_le16(max_aggs); 6054 6055 req->min_agg_len = cpu_to_le32(512); 6056 bnxt_hwrm_vnic_update_tunl_tpa(bp, req); 6057 } 6058 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6059 6060 return hwrm_req_send(bp, req); 6061 } 6062 6063 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 6064 { 6065 struct bnxt_ring_grp_info *grp_info; 6066 6067 grp_info = &bp->grp_info[ring->grp_idx]; 6068 return grp_info->cp_fw_ring_id; 6069 } 6070 6071 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 6072 { 6073 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6074 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 6075 else 6076 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 6077 } 6078 6079 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 6080 { 6081 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6082 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 6083 else 6084 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 6085 } 6086 6087 int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) 6088 { 6089 int entries; 6090 u16 *tbl; 6091 6092 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6093 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 6094 else 6095 entries = HW_HASH_INDEX_SIZE; 6096 6097 bp->rss_indir_tbl_entries = entries; 6098 tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); 6099 if (!tbl) 6100 return -ENOMEM; 6101 6102 if (rss_ctx) 6103 rss_ctx->rss_indir_tbl = tbl; 6104 else 6105 bp->rss_indir_tbl = tbl; 6106 6107 return 0; 6108 } 6109 6110 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) 6111 { 6112 u16 max_rings, max_entries, pad, i; 6113 u16 *rss_indir_tbl; 6114 6115 if (!bp->rx_nr_rings) 6116 return; 6117 6118 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6119 max_rings = bp->rx_nr_rings - 1; 6120 else 6121 max_rings = bp->rx_nr_rings; 6122 6123 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 6124 if (rss_ctx) 6125 rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; 6126 else 6127 rss_indir_tbl = &bp->rss_indir_tbl[0]; 6128 6129 for (i = 0; i < max_entries; i++) 6130 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 6131 6132 pad = bp->rss_indir_tbl_entries - max_entries; 6133 if (pad) 6134 memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); 6135 } 6136 6137 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 6138 { 6139 u16 i, tbl_size, max_ring = 0; 6140 6141 if (!bp->rss_indir_tbl) 6142 return 0; 6143 6144 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6145 for (i = 0; i < tbl_size; i++) 6146 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 6147 return max_ring; 6148 } 6149 6150 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 6151 { 6152 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6153 if (!rx_rings) 6154 return 0; 6155 return bnxt_calc_nr_ring_pages(rx_rings - 1, 6156 BNXT_RSS_TABLE_ENTRIES_P5); 6157 } 6158 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6159 return 2; 6160 return 1; 6161 } 6162 6163 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6164 { 6165 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 6166 u16 i, j; 6167 6168 /* Fill the RSS indirection table with ring group ids */ 6169 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 6170 if (!no_rss) 6171 j = bp->rss_indir_tbl[i]; 6172 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 6173 } 6174 } 6175 6176 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 6177 struct bnxt_vnic_info *vnic) 6178 { 6179 __le16 *ring_tbl = vnic->rss_table; 6180 struct bnxt_rx_ring_info *rxr; 6181 u16 tbl_size, i; 6182 6183 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6184 6185 for (i = 0; i < tbl_size; i++) { 6186 u16 ring_id, j; 6187 6188 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) 6189 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); 6190 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) 6191 j = vnic->rss_ctx->rss_indir_tbl[i]; 6192 else 6193 j = bp->rss_indir_tbl[i]; 6194 rxr = &bp->rx_ring[j]; 6195 6196 ring_id = rxr->rx_ring_struct.fw_ring_id; 6197 *ring_tbl++ = cpu_to_le16(ring_id); 6198 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6199 *ring_tbl++ = cpu_to_le16(ring_id); 6200 } 6201 } 6202 6203 static void 6204 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 6205 struct bnxt_vnic_info *vnic) 6206 { 6207 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6208 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 6209 if (bp->flags & BNXT_FLAG_CHIP_P7) 6210 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; 6211 } else { 6212 bnxt_fill_hw_rss_tbl(bp, vnic); 6213 } 6214 6215 if (bp->rss_hash_delta) { 6216 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 6217 if (bp->rss_hash_cfg & bp->rss_hash_delta) 6218 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 6219 else 6220 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 6221 } else { 6222 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 6223 } 6224 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 6225 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 6226 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 6227 } 6228 6229 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6230 bool set_rss) 6231 { 6232 struct hwrm_vnic_rss_cfg_input *req; 6233 int rc; 6234 6235 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 6236 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 6237 return 0; 6238 6239 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6240 if (rc) 6241 return rc; 6242 6243 if (set_rss) 6244 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6245 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6246 return hwrm_req_send(bp, req); 6247 } 6248 6249 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, 6250 struct bnxt_vnic_info *vnic, bool set_rss) 6251 { 6252 struct hwrm_vnic_rss_cfg_input *req; 6253 dma_addr_t ring_tbl_map; 6254 u32 i, nr_ctxs; 6255 int rc; 6256 6257 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6258 if (rc) 6259 return rc; 6260 6261 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6262 if (!set_rss) 6263 return hwrm_req_send(bp, req); 6264 6265 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6266 ring_tbl_map = vnic->rss_table_dma_addr; 6267 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 6268 6269 hwrm_req_hold(bp, req); 6270 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 6271 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 6272 req->ring_table_pair_index = i; 6273 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 6274 rc = hwrm_req_send(bp, req); 6275 if (rc) 6276 goto exit; 6277 } 6278 6279 exit: 6280 hwrm_req_drop(bp, req); 6281 return rc; 6282 } 6283 6284 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 6285 { 6286 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6287 struct hwrm_vnic_rss_qcfg_output *resp; 6288 struct hwrm_vnic_rss_qcfg_input *req; 6289 6290 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 6291 return; 6292 6293 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6294 /* all contexts configured to same hash_type, zero always exists */ 6295 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6296 resp = hwrm_req_hold(bp, req); 6297 if (!hwrm_req_send(bp, req)) { 6298 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 6299 bp->rss_hash_delta = 0; 6300 } 6301 hwrm_req_drop(bp, req); 6302 } 6303 6304 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6305 { 6306 struct hwrm_vnic_plcmodes_cfg_input *req; 6307 int rc; 6308 6309 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 6310 if (rc) 6311 return rc; 6312 6313 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 6314 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 6315 6316 if (BNXT_RX_PAGE_MODE(bp)) { 6317 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 6318 } else { 6319 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 6320 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 6321 req->enables |= 6322 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 6323 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 6324 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 6325 } 6326 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6327 return hwrm_req_send(bp, req); 6328 } 6329 6330 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, 6331 struct bnxt_vnic_info *vnic, 6332 u16 ctx_idx) 6333 { 6334 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 6335 6336 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 6337 return; 6338 6339 req->rss_cos_lb_ctx_id = 6340 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); 6341 6342 hwrm_req_send(bp, req); 6343 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 6344 } 6345 6346 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 6347 { 6348 int i, j; 6349 6350 for (i = 0; i < bp->nr_vnics; i++) { 6351 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6352 6353 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 6354 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 6355 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); 6356 } 6357 } 6358 bp->rsscos_nr_ctxs = 0; 6359 } 6360 6361 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, 6362 struct bnxt_vnic_info *vnic, u16 ctx_idx) 6363 { 6364 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 6365 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 6366 int rc; 6367 6368 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 6369 if (rc) 6370 return rc; 6371 6372 resp = hwrm_req_hold(bp, req); 6373 rc = hwrm_req_send(bp, req); 6374 if (!rc) 6375 vnic->fw_rss_cos_lb_ctx[ctx_idx] = 6376 le16_to_cpu(resp->rss_cos_lb_ctx_id); 6377 hwrm_req_drop(bp, req); 6378 6379 return rc; 6380 } 6381 6382 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 6383 { 6384 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 6385 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 6386 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 6387 } 6388 6389 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6390 { 6391 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6392 struct hwrm_vnic_cfg_input *req; 6393 unsigned int ring = 0, grp_idx; 6394 u16 def_vlan = 0; 6395 int rc; 6396 6397 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 6398 if (rc) 6399 return rc; 6400 6401 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6402 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 6403 6404 req->default_rx_ring_id = 6405 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 6406 req->default_cmpl_ring_id = 6407 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 6408 req->enables = 6409 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 6410 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 6411 goto vnic_mru; 6412 } 6413 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 6414 /* Only RSS support for now TBD: COS & LB */ 6415 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 6416 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6417 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6418 VNIC_CFG_REQ_ENABLES_MRU); 6419 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 6420 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); 6421 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6422 VNIC_CFG_REQ_ENABLES_MRU); 6423 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 6424 } else { 6425 req->rss_rule = cpu_to_le16(0xffff); 6426 } 6427 6428 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 6429 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 6430 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 6431 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 6432 } else { 6433 req->cos_rule = cpu_to_le16(0xffff); 6434 } 6435 6436 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 6437 ring = 0; 6438 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 6439 ring = vnic->vnic_id - 1; 6440 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 6441 ring = bp->rx_nr_rings - 1; 6442 6443 grp_idx = bp->rx_ring[ring].bnapi->index; 6444 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 6445 req->lb_rule = cpu_to_le16(0xffff); 6446 vnic_mru: 6447 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); 6448 6449 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6450 #ifdef CONFIG_BNXT_SRIOV 6451 if (BNXT_VF(bp)) 6452 def_vlan = bp->vf.vlan; 6453 #endif 6454 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 6455 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 6456 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) 6457 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 6458 6459 return hwrm_req_send(bp, req); 6460 } 6461 6462 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, 6463 struct bnxt_vnic_info *vnic) 6464 { 6465 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { 6466 struct hwrm_vnic_free_input *req; 6467 6468 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 6469 return; 6470 6471 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6472 6473 hwrm_req_send(bp, req); 6474 vnic->fw_vnic_id = INVALID_HW_RING_ID; 6475 } 6476 } 6477 6478 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 6479 { 6480 u16 i; 6481 6482 for (i = 0; i < bp->nr_vnics; i++) 6483 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); 6484 } 6485 6486 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6487 unsigned int start_rx_ring_idx, 6488 unsigned int nr_rings) 6489 { 6490 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 6491 struct hwrm_vnic_alloc_output *resp; 6492 struct hwrm_vnic_alloc_input *req; 6493 int rc; 6494 6495 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 6496 if (rc) 6497 return rc; 6498 6499 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6500 goto vnic_no_ring_grps; 6501 6502 /* map ring groups to this vnic */ 6503 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 6504 grp_idx = bp->rx_ring[i].bnapi->index; 6505 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 6506 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 6507 j, nr_rings); 6508 break; 6509 } 6510 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 6511 } 6512 6513 vnic_no_ring_grps: 6514 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 6515 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 6516 if (vnic->vnic_id == BNXT_VNIC_DEFAULT) 6517 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 6518 6519 resp = hwrm_req_hold(bp, req); 6520 rc = hwrm_req_send(bp, req); 6521 if (!rc) 6522 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 6523 hwrm_req_drop(bp, req); 6524 return rc; 6525 } 6526 6527 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 6528 { 6529 struct hwrm_vnic_qcaps_output *resp; 6530 struct hwrm_vnic_qcaps_input *req; 6531 int rc; 6532 6533 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 6534 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; 6535 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; 6536 if (bp->hwrm_spec_code < 0x10600) 6537 return 0; 6538 6539 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 6540 if (rc) 6541 return rc; 6542 6543 resp = hwrm_req_hold(bp, req); 6544 rc = hwrm_req_send(bp, req); 6545 if (!rc) { 6546 u32 flags = le32_to_cpu(resp->flags); 6547 6548 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 6549 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 6550 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; 6551 if (flags & 6552 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 6553 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 6554 6555 /* Older P5 fw before EXT_HW_STATS support did not set 6556 * VLAN_STRIP_CAP properly. 6557 */ 6558 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 6559 (BNXT_CHIP_P5(bp) && 6560 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 6561 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 6562 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 6563 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; 6564 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) 6565 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; 6566 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 6567 if (bp->max_tpa_v2) { 6568 if (BNXT_CHIP_P5(bp)) 6569 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 6570 else 6571 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; 6572 } 6573 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) 6574 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; 6575 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) 6576 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; 6577 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) 6578 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; 6579 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) 6580 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; 6581 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) 6582 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; 6583 } 6584 hwrm_req_drop(bp, req); 6585 return rc; 6586 } 6587 6588 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 6589 { 6590 struct hwrm_ring_grp_alloc_output *resp; 6591 struct hwrm_ring_grp_alloc_input *req; 6592 int rc; 6593 u16 i; 6594 6595 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6596 return 0; 6597 6598 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 6599 if (rc) 6600 return rc; 6601 6602 resp = hwrm_req_hold(bp, req); 6603 for (i = 0; i < bp->rx_nr_rings; i++) { 6604 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 6605 6606 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 6607 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 6608 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 6609 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 6610 6611 rc = hwrm_req_send(bp, req); 6612 6613 if (rc) 6614 break; 6615 6616 bp->grp_info[grp_idx].fw_grp_id = 6617 le32_to_cpu(resp->ring_group_id); 6618 } 6619 hwrm_req_drop(bp, req); 6620 return rc; 6621 } 6622 6623 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 6624 { 6625 struct hwrm_ring_grp_free_input *req; 6626 u16 i; 6627 6628 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6629 return; 6630 6631 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 6632 return; 6633 6634 hwrm_req_hold(bp, req); 6635 for (i = 0; i < bp->cp_nr_rings; i++) { 6636 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 6637 continue; 6638 req->ring_group_id = 6639 cpu_to_le32(bp->grp_info[i].fw_grp_id); 6640 6641 hwrm_req_send(bp, req); 6642 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 6643 } 6644 hwrm_req_drop(bp, req); 6645 } 6646 6647 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 6648 struct bnxt_ring_struct *ring, 6649 u32 ring_type, u32 map_index) 6650 { 6651 struct hwrm_ring_alloc_output *resp; 6652 struct hwrm_ring_alloc_input *req; 6653 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 6654 struct bnxt_ring_grp_info *grp_info; 6655 int rc, err = 0; 6656 u16 ring_id; 6657 6658 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 6659 if (rc) 6660 goto exit; 6661 6662 req->enables = 0; 6663 if (rmem->nr_pages > 1) { 6664 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 6665 /* Page size is in log2 units */ 6666 req->page_size = BNXT_PAGE_SHIFT; 6667 req->page_tbl_depth = 1; 6668 } else { 6669 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 6670 } 6671 req->fbo = 0; 6672 /* Association of ring index with doorbell index and MSIX number */ 6673 req->logical_id = cpu_to_le16(map_index); 6674 6675 switch (ring_type) { 6676 case HWRM_RING_ALLOC_TX: { 6677 struct bnxt_tx_ring_info *txr; 6678 6679 txr = container_of(ring, struct bnxt_tx_ring_info, 6680 tx_ring_struct); 6681 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 6682 /* Association of transmit ring with completion ring */ 6683 grp_info = &bp->grp_info[ring->grp_idx]; 6684 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 6685 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 6686 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6687 req->queue_id = cpu_to_le16(ring->queue_id); 6688 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) 6689 req->cmpl_coal_cnt = 6690 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; 6691 break; 6692 } 6693 case HWRM_RING_ALLOC_RX: 6694 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6695 req->length = cpu_to_le32(bp->rx_ring_mask + 1); 6696 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6697 u16 flags = 0; 6698 6699 /* Association of rx ring with stats context */ 6700 grp_info = &bp->grp_info[ring->grp_idx]; 6701 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 6702 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6703 req->enables |= cpu_to_le32( 6704 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6705 if (NET_IP_ALIGN == 2) 6706 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 6707 req->flags = cpu_to_le16(flags); 6708 } 6709 break; 6710 case HWRM_RING_ALLOC_AGG: 6711 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6712 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 6713 /* Association of agg ring with rx ring */ 6714 grp_info = &bp->grp_info[ring->grp_idx]; 6715 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 6716 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 6717 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6718 req->enables |= cpu_to_le32( 6719 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 6720 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6721 } else { 6722 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6723 } 6724 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 6725 break; 6726 case HWRM_RING_ALLOC_CMPL: 6727 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 6728 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6729 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6730 /* Association of cp ring with nq */ 6731 grp_info = &bp->grp_info[map_index]; 6732 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 6733 req->cq_handle = cpu_to_le64(ring->handle); 6734 req->enables |= cpu_to_le32( 6735 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 6736 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 6737 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6738 } 6739 break; 6740 case HWRM_RING_ALLOC_NQ: 6741 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 6742 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6743 if (bp->flags & BNXT_FLAG_USING_MSIX) 6744 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6745 break; 6746 default: 6747 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 6748 ring_type); 6749 return -1; 6750 } 6751 6752 resp = hwrm_req_hold(bp, req); 6753 rc = hwrm_req_send(bp, req); 6754 err = le16_to_cpu(resp->error_code); 6755 ring_id = le16_to_cpu(resp->ring_id); 6756 hwrm_req_drop(bp, req); 6757 6758 exit: 6759 if (rc || err) { 6760 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 6761 ring_type, rc, err); 6762 return -EIO; 6763 } 6764 ring->fw_ring_id = ring_id; 6765 return rc; 6766 } 6767 6768 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 6769 { 6770 int rc; 6771 6772 if (BNXT_PF(bp)) { 6773 struct hwrm_func_cfg_input *req; 6774 6775 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 6776 if (rc) 6777 return rc; 6778 6779 req->fid = cpu_to_le16(0xffff); 6780 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6781 req->async_event_cr = cpu_to_le16(idx); 6782 return hwrm_req_send(bp, req); 6783 } else { 6784 struct hwrm_func_vf_cfg_input *req; 6785 6786 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 6787 if (rc) 6788 return rc; 6789 6790 req->enables = 6791 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6792 req->async_event_cr = cpu_to_le16(idx); 6793 return hwrm_req_send(bp, req); 6794 } 6795 } 6796 6797 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 6798 u32 ring_type) 6799 { 6800 switch (ring_type) { 6801 case HWRM_RING_ALLOC_TX: 6802 db->db_ring_mask = bp->tx_ring_mask; 6803 break; 6804 case HWRM_RING_ALLOC_RX: 6805 db->db_ring_mask = bp->rx_ring_mask; 6806 break; 6807 case HWRM_RING_ALLOC_AGG: 6808 db->db_ring_mask = bp->rx_agg_ring_mask; 6809 break; 6810 case HWRM_RING_ALLOC_CMPL: 6811 case HWRM_RING_ALLOC_NQ: 6812 db->db_ring_mask = bp->cp_ring_mask; 6813 break; 6814 } 6815 if (bp->flags & BNXT_FLAG_CHIP_P7) { 6816 db->db_epoch_mask = db->db_ring_mask + 1; 6817 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); 6818 } 6819 } 6820 6821 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 6822 u32 map_idx, u32 xid) 6823 { 6824 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6825 switch (ring_type) { 6826 case HWRM_RING_ALLOC_TX: 6827 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 6828 break; 6829 case HWRM_RING_ALLOC_RX: 6830 case HWRM_RING_ALLOC_AGG: 6831 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 6832 break; 6833 case HWRM_RING_ALLOC_CMPL: 6834 db->db_key64 = DBR_PATH_L2; 6835 break; 6836 case HWRM_RING_ALLOC_NQ: 6837 db->db_key64 = DBR_PATH_L2; 6838 break; 6839 } 6840 db->db_key64 |= (u64)xid << DBR_XID_SFT; 6841 6842 if (bp->flags & BNXT_FLAG_CHIP_P7) 6843 db->db_key64 |= DBR_VALID; 6844 6845 db->doorbell = bp->bar1 + bp->db_offset; 6846 } else { 6847 db->doorbell = bp->bar1 + map_idx * 0x80; 6848 switch (ring_type) { 6849 case HWRM_RING_ALLOC_TX: 6850 db->db_key32 = DB_KEY_TX; 6851 break; 6852 case HWRM_RING_ALLOC_RX: 6853 case HWRM_RING_ALLOC_AGG: 6854 db->db_key32 = DB_KEY_RX; 6855 break; 6856 case HWRM_RING_ALLOC_CMPL: 6857 db->db_key32 = DB_KEY_CP; 6858 break; 6859 } 6860 } 6861 bnxt_set_db_mask(bp, db, ring_type); 6862 } 6863 6864 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 6865 { 6866 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 6867 int i, rc = 0; 6868 u32 type; 6869 6870 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6871 type = HWRM_RING_ALLOC_NQ; 6872 else 6873 type = HWRM_RING_ALLOC_CMPL; 6874 for (i = 0; i < bp->cp_nr_rings; i++) { 6875 struct bnxt_napi *bnapi = bp->bnapi[i]; 6876 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6877 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 6878 u32 map_idx = ring->map_idx; 6879 unsigned int vector; 6880 6881 vector = bp->irq_tbl[map_idx].vector; 6882 disable_irq_nosync(vector); 6883 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6884 if (rc) { 6885 enable_irq(vector); 6886 goto err_out; 6887 } 6888 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 6889 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 6890 enable_irq(vector); 6891 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 6892 6893 if (!i) { 6894 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 6895 if (rc) 6896 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 6897 } 6898 } 6899 6900 type = HWRM_RING_ALLOC_TX; 6901 for (i = 0; i < bp->tx_nr_rings; i++) { 6902 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6903 struct bnxt_ring_struct *ring; 6904 u32 map_idx; 6905 6906 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6907 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; 6908 struct bnxt_napi *bnapi = txr->bnapi; 6909 u32 type2 = HWRM_RING_ALLOC_CMPL; 6910 6911 ring = &cpr2->cp_ring_struct; 6912 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6913 map_idx = bnapi->index; 6914 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6915 if (rc) 6916 goto err_out; 6917 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6918 ring->fw_ring_id); 6919 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6920 } 6921 ring = &txr->tx_ring_struct; 6922 map_idx = i; 6923 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6924 if (rc) 6925 goto err_out; 6926 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 6927 } 6928 6929 type = HWRM_RING_ALLOC_RX; 6930 for (i = 0; i < bp->rx_nr_rings; i++) { 6931 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6932 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6933 struct bnxt_napi *bnapi = rxr->bnapi; 6934 u32 map_idx = bnapi->index; 6935 6936 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6937 if (rc) 6938 goto err_out; 6939 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 6940 /* If we have agg rings, post agg buffers first. */ 6941 if (!agg_rings) 6942 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6943 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 6944 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6945 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; 6946 u32 type2 = HWRM_RING_ALLOC_CMPL; 6947 6948 ring = &cpr2->cp_ring_struct; 6949 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6950 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6951 if (rc) 6952 goto err_out; 6953 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6954 ring->fw_ring_id); 6955 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6956 } 6957 } 6958 6959 if (agg_rings) { 6960 type = HWRM_RING_ALLOC_AGG; 6961 for (i = 0; i < bp->rx_nr_rings; i++) { 6962 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6963 struct bnxt_ring_struct *ring = 6964 &rxr->rx_agg_ring_struct; 6965 u32 grp_idx = ring->grp_idx; 6966 u32 map_idx = grp_idx + bp->rx_nr_rings; 6967 6968 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6969 if (rc) 6970 goto err_out; 6971 6972 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 6973 ring->fw_ring_id); 6974 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 6975 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6976 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 6977 } 6978 } 6979 err_out: 6980 return rc; 6981 } 6982 6983 static int hwrm_ring_free_send_msg(struct bnxt *bp, 6984 struct bnxt_ring_struct *ring, 6985 u32 ring_type, int cmpl_ring_id) 6986 { 6987 struct hwrm_ring_free_output *resp; 6988 struct hwrm_ring_free_input *req; 6989 u16 error_code = 0; 6990 int rc; 6991 6992 if (BNXT_NO_FW_ACCESS(bp)) 6993 return 0; 6994 6995 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 6996 if (rc) 6997 goto exit; 6998 6999 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 7000 req->ring_type = ring_type; 7001 req->ring_id = cpu_to_le16(ring->fw_ring_id); 7002 7003 resp = hwrm_req_hold(bp, req); 7004 rc = hwrm_req_send(bp, req); 7005 error_code = le16_to_cpu(resp->error_code); 7006 hwrm_req_drop(bp, req); 7007 exit: 7008 if (rc || error_code) { 7009 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 7010 ring_type, rc, error_code); 7011 return -EIO; 7012 } 7013 return 0; 7014 } 7015 7016 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 7017 { 7018 u32 type; 7019 int i; 7020 7021 if (!bp->bnapi) 7022 return; 7023 7024 for (i = 0; i < bp->tx_nr_rings; i++) { 7025 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 7026 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7027 7028 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7029 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 7030 7031 hwrm_ring_free_send_msg(bp, ring, 7032 RING_FREE_REQ_RING_TYPE_TX, 7033 close_path ? cmpl_ring_id : 7034 INVALID_HW_RING_ID); 7035 ring->fw_ring_id = INVALID_HW_RING_ID; 7036 } 7037 } 7038 7039 for (i = 0; i < bp->rx_nr_rings; i++) { 7040 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7041 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7042 u32 grp_idx = rxr->bnapi->index; 7043 7044 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7045 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7046 7047 hwrm_ring_free_send_msg(bp, ring, 7048 RING_FREE_REQ_RING_TYPE_RX, 7049 close_path ? cmpl_ring_id : 7050 INVALID_HW_RING_ID); 7051 ring->fw_ring_id = INVALID_HW_RING_ID; 7052 bp->grp_info[grp_idx].rx_fw_ring_id = 7053 INVALID_HW_RING_ID; 7054 } 7055 } 7056 7057 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7058 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 7059 else 7060 type = RING_FREE_REQ_RING_TYPE_RX; 7061 for (i = 0; i < bp->rx_nr_rings; i++) { 7062 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7063 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7064 u32 grp_idx = rxr->bnapi->index; 7065 7066 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7067 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7068 7069 hwrm_ring_free_send_msg(bp, ring, type, 7070 close_path ? cmpl_ring_id : 7071 INVALID_HW_RING_ID); 7072 ring->fw_ring_id = INVALID_HW_RING_ID; 7073 bp->grp_info[grp_idx].agg_fw_ring_id = 7074 INVALID_HW_RING_ID; 7075 } 7076 } 7077 7078 /* The completion rings are about to be freed. After that the 7079 * IRQ doorbell will not work anymore. So we need to disable 7080 * IRQ here. 7081 */ 7082 bnxt_disable_int_sync(bp); 7083 7084 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7085 type = RING_FREE_REQ_RING_TYPE_NQ; 7086 else 7087 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 7088 for (i = 0; i < bp->cp_nr_rings; i++) { 7089 struct bnxt_napi *bnapi = bp->bnapi[i]; 7090 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7091 struct bnxt_ring_struct *ring; 7092 int j; 7093 7094 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { 7095 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 7096 7097 ring = &cpr2->cp_ring_struct; 7098 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7099 continue; 7100 hwrm_ring_free_send_msg(bp, ring, 7101 RING_FREE_REQ_RING_TYPE_L2_CMPL, 7102 INVALID_HW_RING_ID); 7103 ring->fw_ring_id = INVALID_HW_RING_ID; 7104 } 7105 ring = &cpr->cp_ring_struct; 7106 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7107 hwrm_ring_free_send_msg(bp, ring, type, 7108 INVALID_HW_RING_ID); 7109 ring->fw_ring_id = INVALID_HW_RING_ID; 7110 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 7111 } 7112 } 7113 } 7114 7115 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7116 bool shared); 7117 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7118 bool shared); 7119 7120 static int bnxt_hwrm_get_rings(struct bnxt *bp) 7121 { 7122 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7123 struct hwrm_func_qcfg_output *resp; 7124 struct hwrm_func_qcfg_input *req; 7125 int rc; 7126 7127 if (bp->hwrm_spec_code < 0x10601) 7128 return 0; 7129 7130 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7131 if (rc) 7132 return rc; 7133 7134 req->fid = cpu_to_le16(0xffff); 7135 resp = hwrm_req_hold(bp, req); 7136 rc = hwrm_req_send(bp, req); 7137 if (rc) { 7138 hwrm_req_drop(bp, req); 7139 return rc; 7140 } 7141 7142 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7143 if (BNXT_NEW_RM(bp)) { 7144 u16 cp, stats; 7145 7146 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 7147 hw_resc->resv_hw_ring_grps = 7148 le32_to_cpu(resp->alloc_hw_ring_grps); 7149 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 7150 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); 7151 cp = le16_to_cpu(resp->alloc_cmpl_rings); 7152 stats = le16_to_cpu(resp->alloc_stat_ctx); 7153 hw_resc->resv_irqs = cp; 7154 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7155 int rx = hw_resc->resv_rx_rings; 7156 int tx = hw_resc->resv_tx_rings; 7157 7158 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7159 rx >>= 1; 7160 if (cp < (rx + tx)) { 7161 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 7162 if (rc) 7163 goto get_rings_exit; 7164 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7165 rx <<= 1; 7166 hw_resc->resv_rx_rings = rx; 7167 hw_resc->resv_tx_rings = tx; 7168 } 7169 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 7170 hw_resc->resv_hw_ring_grps = rx; 7171 } 7172 hw_resc->resv_cp_rings = cp; 7173 hw_resc->resv_stat_ctxs = stats; 7174 } 7175 get_rings_exit: 7176 hwrm_req_drop(bp, req); 7177 return rc; 7178 } 7179 7180 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 7181 { 7182 struct hwrm_func_qcfg_output *resp; 7183 struct hwrm_func_qcfg_input *req; 7184 int rc; 7185 7186 if (bp->hwrm_spec_code < 0x10601) 7187 return 0; 7188 7189 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7190 if (rc) 7191 return rc; 7192 7193 req->fid = cpu_to_le16(fid); 7194 resp = hwrm_req_hold(bp, req); 7195 rc = hwrm_req_send(bp, req); 7196 if (!rc) 7197 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7198 7199 hwrm_req_drop(bp, req); 7200 return rc; 7201 } 7202 7203 static bool bnxt_rfs_supported(struct bnxt *bp); 7204 7205 static struct hwrm_func_cfg_input * 7206 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7207 { 7208 struct hwrm_func_cfg_input *req; 7209 u32 enables = 0; 7210 7211 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 7212 return NULL; 7213 7214 req->fid = cpu_to_le16(0xffff); 7215 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7216 req->num_tx_rings = cpu_to_le16(hwr->tx); 7217 if (BNXT_NEW_RM(bp)) { 7218 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 7219 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7220 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7221 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 7222 enables |= hwr->cp_p5 ? 7223 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7224 } else { 7225 enables |= hwr->cp ? 7226 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7227 enables |= hwr->grp ? 7228 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7229 } 7230 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7231 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 7232 0; 7233 req->num_rx_rings = cpu_to_le16(hwr->rx); 7234 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7235 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7236 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7237 req->num_msix = cpu_to_le16(hwr->cp); 7238 } else { 7239 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7240 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7241 } 7242 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7243 req->num_vnics = cpu_to_le16(hwr->vnic); 7244 } 7245 req->enables = cpu_to_le32(enables); 7246 return req; 7247 } 7248 7249 static struct hwrm_func_vf_cfg_input * 7250 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7251 { 7252 struct hwrm_func_vf_cfg_input *req; 7253 u32 enables = 0; 7254 7255 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 7256 return NULL; 7257 7258 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7259 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7260 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7261 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7262 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7263 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7264 enables |= hwr->cp_p5 ? 7265 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7266 } else { 7267 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7268 enables |= hwr->grp ? 7269 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7270 } 7271 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7272 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 7273 7274 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 7275 req->num_tx_rings = cpu_to_le16(hwr->tx); 7276 req->num_rx_rings = cpu_to_le16(hwr->rx); 7277 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7278 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7279 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7280 } else { 7281 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7282 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7283 } 7284 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7285 req->num_vnics = cpu_to_le16(hwr->vnic); 7286 7287 req->enables = cpu_to_le32(enables); 7288 return req; 7289 } 7290 7291 static int 7292 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7293 { 7294 struct hwrm_func_cfg_input *req; 7295 int rc; 7296 7297 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7298 if (!req) 7299 return -ENOMEM; 7300 7301 if (!req->enables) { 7302 hwrm_req_drop(bp, req); 7303 return 0; 7304 } 7305 7306 rc = hwrm_req_send(bp, req); 7307 if (rc) 7308 return rc; 7309 7310 if (bp->hwrm_spec_code < 0x10601) 7311 bp->hw_resc.resv_tx_rings = hwr->tx; 7312 7313 return bnxt_hwrm_get_rings(bp); 7314 } 7315 7316 static int 7317 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7318 { 7319 struct hwrm_func_vf_cfg_input *req; 7320 int rc; 7321 7322 if (!BNXT_NEW_RM(bp)) { 7323 bp->hw_resc.resv_tx_rings = hwr->tx; 7324 return 0; 7325 } 7326 7327 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7328 if (!req) 7329 return -ENOMEM; 7330 7331 rc = hwrm_req_send(bp, req); 7332 if (rc) 7333 return rc; 7334 7335 return bnxt_hwrm_get_rings(bp); 7336 } 7337 7338 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7339 { 7340 if (BNXT_PF(bp)) 7341 return bnxt_hwrm_reserve_pf_rings(bp, hwr); 7342 else 7343 return bnxt_hwrm_reserve_vf_rings(bp, hwr); 7344 } 7345 7346 int bnxt_nq_rings_in_use(struct bnxt *bp) 7347 { 7348 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); 7349 } 7350 7351 static int bnxt_cp_rings_in_use(struct bnxt *bp) 7352 { 7353 int cp; 7354 7355 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7356 return bnxt_nq_rings_in_use(bp); 7357 7358 cp = bp->tx_nr_rings + bp->rx_nr_rings; 7359 return cp; 7360 } 7361 7362 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 7363 { 7364 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); 7365 } 7366 7367 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7368 { 7369 if (!hwr->grp) 7370 return 0; 7371 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7372 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); 7373 7374 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7375 rss_ctx *= hwr->vnic; 7376 return rss_ctx; 7377 } 7378 if (BNXT_VF(bp)) 7379 return BNXT_VF_MAX_RSS_CTX; 7380 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) 7381 return hwr->grp + 1; 7382 return 1; 7383 } 7384 7385 /* Check if a default RSS map needs to be setup. This function is only 7386 * used on older firmware that does not require reserving RX rings. 7387 */ 7388 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 7389 { 7390 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7391 7392 /* The RSS map is valid for RX rings set to resv_rx_rings */ 7393 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 7394 hw_resc->resv_rx_rings = bp->rx_nr_rings; 7395 if (!netif_is_rxfh_configured(bp->dev)) 7396 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7397 } 7398 } 7399 7400 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) 7401 { 7402 if (bp->flags & BNXT_FLAG_RFS) { 7403 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7404 return 2 + bp->num_rss_ctx; 7405 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7406 return rx_rings + 1; 7407 } 7408 return 1; 7409 } 7410 7411 static bool bnxt_need_reserve_rings(struct bnxt *bp) 7412 { 7413 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7414 int cp = bnxt_cp_rings_in_use(bp); 7415 int nq = bnxt_nq_rings_in_use(bp); 7416 int rx = bp->rx_nr_rings, stat; 7417 int vnic, grp = rx; 7418 7419 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7420 bp->hwrm_spec_code >= 0x10601) 7421 return true; 7422 7423 /* Old firmware does not need RX ring reservations but we still 7424 * need to setup a default RSS map when needed. With new firmware 7425 * we go through RX ring reservations first and then set up the 7426 * RSS map for the successfully reserved RX rings when needed. 7427 */ 7428 if (!BNXT_NEW_RM(bp)) { 7429 bnxt_check_rss_tbl_no_rmgr(bp); 7430 return false; 7431 } 7432 7433 vnic = bnxt_get_total_vnics(bp, rx); 7434 7435 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7436 rx <<= 1; 7437 stat = bnxt_get_func_stat_ctxs(bp); 7438 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 7439 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 7440 (hw_resc->resv_hw_ring_grps != grp && 7441 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 7442 return true; 7443 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 7444 hw_resc->resv_irqs != nq) 7445 return true; 7446 return false; 7447 } 7448 7449 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7450 { 7451 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7452 7453 hwr->tx = hw_resc->resv_tx_rings; 7454 if (BNXT_NEW_RM(bp)) { 7455 hwr->rx = hw_resc->resv_rx_rings; 7456 hwr->cp = hw_resc->resv_irqs; 7457 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7458 hwr->cp_p5 = hw_resc->resv_cp_rings; 7459 hwr->grp = hw_resc->resv_hw_ring_grps; 7460 hwr->vnic = hw_resc->resv_vnics; 7461 hwr->stat = hw_resc->resv_stat_ctxs; 7462 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; 7463 } 7464 } 7465 7466 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7467 { 7468 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && 7469 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); 7470 } 7471 7472 static int bnxt_get_avail_msix(struct bnxt *bp, int num); 7473 7474 static int __bnxt_reserve_rings(struct bnxt *bp) 7475 { 7476 struct bnxt_hw_rings hwr = {0}; 7477 int cp = bp->cp_nr_rings; 7478 int rx_rings, rc; 7479 int ulp_msix = 0; 7480 bool sh = false; 7481 int tx_cp; 7482 7483 if (!bnxt_need_reserve_rings(bp)) 7484 return 0; 7485 7486 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 7487 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 7488 if (!ulp_msix) 7489 bnxt_set_ulp_stat_ctxs(bp, 0); 7490 7491 if (ulp_msix > bp->ulp_num_msix_want) 7492 ulp_msix = bp->ulp_num_msix_want; 7493 hwr.cp = cp + ulp_msix; 7494 } else { 7495 hwr.cp = bnxt_nq_rings_in_use(bp); 7496 } 7497 7498 hwr.tx = bp->tx_nr_rings; 7499 hwr.rx = bp->rx_nr_rings; 7500 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7501 sh = true; 7502 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7503 hwr.cp_p5 = hwr.rx + hwr.tx; 7504 7505 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); 7506 7507 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7508 hwr.rx <<= 1; 7509 hwr.grp = bp->rx_nr_rings; 7510 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 7511 hwr.stat = bnxt_get_func_stat_ctxs(bp); 7512 7513 rc = bnxt_hwrm_reserve_rings(bp, &hwr); 7514 if (rc) 7515 return rc; 7516 7517 bnxt_copy_reserved_rings(bp, &hwr); 7518 7519 rx_rings = hwr.rx; 7520 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7521 if (hwr.rx >= 2) { 7522 rx_rings = hwr.rx >> 1; 7523 } else { 7524 if (netif_running(bp->dev)) 7525 return -ENOMEM; 7526 7527 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7528 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7529 bp->dev->hw_features &= ~NETIF_F_LRO; 7530 bp->dev->features &= ~NETIF_F_LRO; 7531 bnxt_set_ring_params(bp); 7532 } 7533 } 7534 rx_rings = min_t(int, rx_rings, hwr.grp); 7535 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); 7536 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) 7537 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); 7538 hwr.cp = min_t(int, hwr.cp, hwr.stat); 7539 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); 7540 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7541 hwr.rx = rx_rings << 1; 7542 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); 7543 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 7544 bp->tx_nr_rings = hwr.tx; 7545 7546 /* If we cannot reserve all the RX rings, reset the RSS map only 7547 * if absolutely necessary 7548 */ 7549 if (rx_rings != bp->rx_nr_rings) { 7550 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 7551 rx_rings, bp->rx_nr_rings); 7552 if (netif_is_rxfh_configured(bp->dev) && 7553 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 7554 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 7555 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 7556 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 7557 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 7558 } 7559 } 7560 bp->rx_nr_rings = rx_rings; 7561 bp->cp_nr_rings = hwr.cp; 7562 7563 if (!bnxt_rings_ok(bp, &hwr)) 7564 return -ENOMEM; 7565 7566 if (!netif_is_rxfh_configured(bp->dev)) 7567 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7568 7569 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { 7570 int resv_msix, resv_ctx, ulp_ctxs; 7571 struct bnxt_hw_resc *hw_resc; 7572 7573 hw_resc = &bp->hw_resc; 7574 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; 7575 ulp_msix = min_t(int, resv_msix, ulp_msix); 7576 bnxt_set_ulp_msix_num(bp, ulp_msix); 7577 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; 7578 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); 7579 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); 7580 } 7581 7582 return rc; 7583 } 7584 7585 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7586 { 7587 struct hwrm_func_vf_cfg_input *req; 7588 u32 flags; 7589 7590 if (!BNXT_NEW_RM(bp)) 7591 return 0; 7592 7593 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7594 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 7595 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 7596 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 7597 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 7598 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 7599 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 7600 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7601 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 7602 7603 req->flags = cpu_to_le32(flags); 7604 return hwrm_req_send_silent(bp, req); 7605 } 7606 7607 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7608 { 7609 struct hwrm_func_cfg_input *req; 7610 u32 flags; 7611 7612 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7613 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 7614 if (BNXT_NEW_RM(bp)) { 7615 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 7616 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 7617 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 7618 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 7619 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7620 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 7621 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 7622 else 7623 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 7624 } 7625 7626 req->flags = cpu_to_le32(flags); 7627 return hwrm_req_send_silent(bp, req); 7628 } 7629 7630 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7631 { 7632 if (bp->hwrm_spec_code < 0x10801) 7633 return 0; 7634 7635 if (BNXT_PF(bp)) 7636 return bnxt_hwrm_check_pf_rings(bp, hwr); 7637 7638 return bnxt_hwrm_check_vf_rings(bp, hwr); 7639 } 7640 7641 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 7642 { 7643 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7644 struct hwrm_ring_aggint_qcaps_output *resp; 7645 struct hwrm_ring_aggint_qcaps_input *req; 7646 int rc; 7647 7648 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 7649 coal_cap->num_cmpl_dma_aggr_max = 63; 7650 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 7651 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 7652 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 7653 coal_cap->int_lat_tmr_min_max = 65535; 7654 coal_cap->int_lat_tmr_max_max = 65535; 7655 coal_cap->num_cmpl_aggr_int_max = 65535; 7656 coal_cap->timer_units = 80; 7657 7658 if (bp->hwrm_spec_code < 0x10902) 7659 return; 7660 7661 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 7662 return; 7663 7664 resp = hwrm_req_hold(bp, req); 7665 rc = hwrm_req_send_silent(bp, req); 7666 if (!rc) { 7667 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 7668 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 7669 coal_cap->num_cmpl_dma_aggr_max = 7670 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 7671 coal_cap->num_cmpl_dma_aggr_during_int_max = 7672 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 7673 coal_cap->cmpl_aggr_dma_tmr_max = 7674 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 7675 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 7676 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 7677 coal_cap->int_lat_tmr_min_max = 7678 le16_to_cpu(resp->int_lat_tmr_min_max); 7679 coal_cap->int_lat_tmr_max_max = 7680 le16_to_cpu(resp->int_lat_tmr_max_max); 7681 coal_cap->num_cmpl_aggr_int_max = 7682 le16_to_cpu(resp->num_cmpl_aggr_int_max); 7683 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 7684 } 7685 hwrm_req_drop(bp, req); 7686 } 7687 7688 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 7689 { 7690 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7691 7692 return usec * 1000 / coal_cap->timer_units; 7693 } 7694 7695 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 7696 struct bnxt_coal *hw_coal, 7697 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7698 { 7699 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7700 u16 val, tmr, max, flags = hw_coal->flags; 7701 u32 cmpl_params = coal_cap->cmpl_params; 7702 7703 max = hw_coal->bufs_per_record * 128; 7704 if (hw_coal->budget) 7705 max = hw_coal->bufs_per_record * hw_coal->budget; 7706 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 7707 7708 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 7709 req->num_cmpl_aggr_int = cpu_to_le16(val); 7710 7711 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 7712 req->num_cmpl_dma_aggr = cpu_to_le16(val); 7713 7714 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 7715 coal_cap->num_cmpl_dma_aggr_during_int_max); 7716 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 7717 7718 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 7719 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 7720 req->int_lat_tmr_max = cpu_to_le16(tmr); 7721 7722 /* min timer set to 1/2 of interrupt timer */ 7723 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 7724 val = tmr / 2; 7725 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 7726 req->int_lat_tmr_min = cpu_to_le16(val); 7727 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7728 } 7729 7730 /* buf timer set to 1/4 of interrupt timer */ 7731 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 7732 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 7733 7734 if (cmpl_params & 7735 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 7736 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 7737 val = clamp_t(u16, tmr, 1, 7738 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 7739 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 7740 req->enables |= 7741 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 7742 } 7743 7744 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 7745 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 7746 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 7747 req->flags = cpu_to_le16(flags); 7748 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 7749 } 7750 7751 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 7752 struct bnxt_coal *hw_coal) 7753 { 7754 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 7755 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7756 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7757 u32 nq_params = coal_cap->nq_params; 7758 u16 tmr; 7759 int rc; 7760 7761 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 7762 return 0; 7763 7764 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7765 if (rc) 7766 return rc; 7767 7768 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 7769 req->flags = 7770 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 7771 7772 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 7773 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 7774 req->int_lat_tmr_min = cpu_to_le16(tmr); 7775 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7776 return hwrm_req_send(bp, req); 7777 } 7778 7779 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 7780 { 7781 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 7782 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7783 struct bnxt_coal coal; 7784 int rc; 7785 7786 /* Tick values in micro seconds. 7787 * 1 coal_buf x bufs_per_record = 1 completion record. 7788 */ 7789 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 7790 7791 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 7792 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 7793 7794 if (!bnapi->rx_ring) 7795 return -ENODEV; 7796 7797 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7798 if (rc) 7799 return rc; 7800 7801 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 7802 7803 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 7804 7805 return hwrm_req_send(bp, req_rx); 7806 } 7807 7808 static int 7809 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7810 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7811 { 7812 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 7813 7814 req->ring_id = cpu_to_le16(ring_id); 7815 return hwrm_req_send(bp, req); 7816 } 7817 7818 static int 7819 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7820 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7821 { 7822 struct bnxt_tx_ring_info *txr; 7823 int i, rc; 7824 7825 bnxt_for_each_napi_tx(i, bnapi, txr) { 7826 u16 ring_id; 7827 7828 ring_id = bnxt_cp_ring_for_tx(bp, txr); 7829 req->ring_id = cpu_to_le16(ring_id); 7830 rc = hwrm_req_send(bp, req); 7831 if (rc) 7832 return rc; 7833 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7834 return 0; 7835 } 7836 return 0; 7837 } 7838 7839 int bnxt_hwrm_set_coal(struct bnxt *bp) 7840 { 7841 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 7842 int i, rc; 7843 7844 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7845 if (rc) 7846 return rc; 7847 7848 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7849 if (rc) { 7850 hwrm_req_drop(bp, req_rx); 7851 return rc; 7852 } 7853 7854 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 7855 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 7856 7857 hwrm_req_hold(bp, req_rx); 7858 hwrm_req_hold(bp, req_tx); 7859 for (i = 0; i < bp->cp_nr_rings; i++) { 7860 struct bnxt_napi *bnapi = bp->bnapi[i]; 7861 struct bnxt_coal *hw_coal; 7862 7863 if (!bnapi->rx_ring) 7864 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7865 else 7866 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 7867 if (rc) 7868 break; 7869 7870 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7871 continue; 7872 7873 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 7874 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7875 if (rc) 7876 break; 7877 } 7878 if (bnapi->rx_ring) 7879 hw_coal = &bp->rx_coal; 7880 else 7881 hw_coal = &bp->tx_coal; 7882 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 7883 } 7884 hwrm_req_drop(bp, req_rx); 7885 hwrm_req_drop(bp, req_tx); 7886 return rc; 7887 } 7888 7889 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 7890 { 7891 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 7892 struct hwrm_stat_ctx_free_input *req; 7893 int i; 7894 7895 if (!bp->bnapi) 7896 return; 7897 7898 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7899 return; 7900 7901 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 7902 return; 7903 if (BNXT_FW_MAJ(bp) <= 20) { 7904 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 7905 hwrm_req_drop(bp, req); 7906 return; 7907 } 7908 hwrm_req_hold(bp, req0); 7909 } 7910 hwrm_req_hold(bp, req); 7911 for (i = 0; i < bp->cp_nr_rings; i++) { 7912 struct bnxt_napi *bnapi = bp->bnapi[i]; 7913 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7914 7915 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 7916 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 7917 if (req0) { 7918 req0->stat_ctx_id = req->stat_ctx_id; 7919 hwrm_req_send(bp, req0); 7920 } 7921 hwrm_req_send(bp, req); 7922 7923 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 7924 } 7925 } 7926 hwrm_req_drop(bp, req); 7927 if (req0) 7928 hwrm_req_drop(bp, req0); 7929 } 7930 7931 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 7932 { 7933 struct hwrm_stat_ctx_alloc_output *resp; 7934 struct hwrm_stat_ctx_alloc_input *req; 7935 int rc, i; 7936 7937 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7938 return 0; 7939 7940 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 7941 if (rc) 7942 return rc; 7943 7944 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 7945 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 7946 7947 resp = hwrm_req_hold(bp, req); 7948 for (i = 0; i < bp->cp_nr_rings; i++) { 7949 struct bnxt_napi *bnapi = bp->bnapi[i]; 7950 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7951 7952 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 7953 7954 rc = hwrm_req_send(bp, req); 7955 if (rc) 7956 break; 7957 7958 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 7959 7960 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 7961 } 7962 hwrm_req_drop(bp, req); 7963 return rc; 7964 } 7965 7966 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 7967 { 7968 struct hwrm_func_qcfg_output *resp; 7969 struct hwrm_func_qcfg_input *req; 7970 u16 flags; 7971 int rc; 7972 7973 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7974 if (rc) 7975 return rc; 7976 7977 req->fid = cpu_to_le16(0xffff); 7978 resp = hwrm_req_hold(bp, req); 7979 rc = hwrm_req_send(bp, req); 7980 if (rc) 7981 goto func_qcfg_exit; 7982 7983 #ifdef CONFIG_BNXT_SRIOV 7984 if (BNXT_VF(bp)) { 7985 struct bnxt_vf_info *vf = &bp->vf; 7986 7987 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 7988 } else { 7989 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 7990 } 7991 #endif 7992 flags = le16_to_cpu(resp->flags); 7993 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 7994 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 7995 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 7996 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 7997 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 7998 } 7999 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 8000 bp->flags |= BNXT_FLAG_MULTI_HOST; 8001 8002 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 8003 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 8004 8005 switch (resp->port_partition_type) { 8006 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 8007 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 8008 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 8009 bp->port_partition_type = resp->port_partition_type; 8010 break; 8011 } 8012 if (bp->hwrm_spec_code < 0x10707 || 8013 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 8014 bp->br_mode = BRIDGE_MODE_VEB; 8015 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 8016 bp->br_mode = BRIDGE_MODE_VEPA; 8017 else 8018 bp->br_mode = BRIDGE_MODE_UNDEF; 8019 8020 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 8021 if (!bp->max_mtu) 8022 bp->max_mtu = BNXT_MAX_MTU; 8023 8024 if (bp->db_size) 8025 goto func_qcfg_exit; 8026 8027 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; 8028 if (BNXT_CHIP_P5(bp)) { 8029 if (BNXT_PF(bp)) 8030 bp->db_offset = DB_PF_OFFSET_P5; 8031 else 8032 bp->db_offset = DB_VF_OFFSET_P5; 8033 } 8034 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 8035 1024); 8036 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 8037 bp->db_size <= bp->db_offset) 8038 bp->db_size = pci_resource_len(bp->pdev, 2); 8039 8040 func_qcfg_exit: 8041 hwrm_req_drop(bp, req); 8042 return rc; 8043 } 8044 8045 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 8046 u8 init_val, u8 init_offset, 8047 bool init_mask_set) 8048 { 8049 ctxm->init_value = init_val; 8050 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 8051 if (init_mask_set) 8052 ctxm->init_offset = init_offset * 4; 8053 else 8054 ctxm->init_value = 0; 8055 } 8056 8057 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 8058 { 8059 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8060 u16 type; 8061 8062 for (type = 0; type < ctx_max; type++) { 8063 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8064 int n = 1; 8065 8066 if (!ctxm->max_entries) 8067 continue; 8068 8069 if (ctxm->instance_bmap) 8070 n = hweight32(ctxm->instance_bmap); 8071 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 8072 if (!ctxm->pg_info) 8073 return -ENOMEM; 8074 } 8075 return 0; 8076 } 8077 8078 #define BNXT_CTX_INIT_VALID(flags) \ 8079 (!!((flags) & \ 8080 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 8081 8082 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 8083 { 8084 struct hwrm_func_backing_store_qcaps_v2_output *resp; 8085 struct hwrm_func_backing_store_qcaps_v2_input *req; 8086 struct bnxt_ctx_mem_info *ctx; 8087 u16 type; 8088 int rc; 8089 8090 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 8091 if (rc) 8092 return rc; 8093 8094 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8095 if (!ctx) 8096 return -ENOMEM; 8097 bp->ctx = ctx; 8098 8099 resp = hwrm_req_hold(bp, req); 8100 8101 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 8102 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8103 u8 init_val, init_off, i; 8104 __le32 *p; 8105 u32 flags; 8106 8107 req->type = cpu_to_le16(type); 8108 rc = hwrm_req_send(bp, req); 8109 if (rc) 8110 goto ctx_done; 8111 flags = le32_to_cpu(resp->flags); 8112 type = le16_to_cpu(resp->next_valid_type); 8113 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) 8114 continue; 8115 8116 ctxm->type = le16_to_cpu(resp->type); 8117 ctxm->entry_size = le16_to_cpu(resp->entry_size); 8118 ctxm->flags = flags; 8119 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 8120 ctxm->entry_multiple = resp->entry_multiple; 8121 ctxm->max_entries = le32_to_cpu(resp->max_num_entries); 8122 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 8123 init_val = resp->ctx_init_value; 8124 init_off = resp->ctx_init_offset; 8125 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 8126 BNXT_CTX_INIT_VALID(flags)); 8127 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 8128 BNXT_MAX_SPLIT_ENTRY); 8129 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 8130 i++, p++) 8131 ctxm->split[i] = le32_to_cpu(*p); 8132 } 8133 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 8134 8135 ctx_done: 8136 hwrm_req_drop(bp, req); 8137 return rc; 8138 } 8139 8140 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 8141 { 8142 struct hwrm_func_backing_store_qcaps_output *resp; 8143 struct hwrm_func_backing_store_qcaps_input *req; 8144 int rc; 8145 8146 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 8147 return 0; 8148 8149 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8150 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 8151 8152 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 8153 if (rc) 8154 return rc; 8155 8156 resp = hwrm_req_hold(bp, req); 8157 rc = hwrm_req_send_silent(bp, req); 8158 if (!rc) { 8159 struct bnxt_ctx_mem_type *ctxm; 8160 struct bnxt_ctx_mem_info *ctx; 8161 u8 init_val, init_idx = 0; 8162 u16 init_mask; 8163 8164 ctx = bp->ctx; 8165 if (!ctx) { 8166 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8167 if (!ctx) { 8168 rc = -ENOMEM; 8169 goto ctx_err; 8170 } 8171 bp->ctx = ctx; 8172 } 8173 init_val = resp->ctx_kind_initializer; 8174 init_mask = le16_to_cpu(resp->ctx_init_mask); 8175 8176 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8177 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 8178 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 8179 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 8180 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); 8181 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 8182 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 8183 (init_mask & (1 << init_idx++)) != 0); 8184 8185 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8186 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 8187 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 8188 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 8189 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 8190 (init_mask & (1 << init_idx++)) != 0); 8191 8192 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8193 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 8194 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 8195 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 8196 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 8197 (init_mask & (1 << init_idx++)) != 0); 8198 8199 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8200 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 8201 ctxm->max_entries = ctxm->vnic_entries + 8202 le16_to_cpu(resp->vnic_max_ring_table_entries); 8203 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 8204 bnxt_init_ctx_initializer(ctxm, init_val, 8205 resp->vnic_init_offset, 8206 (init_mask & (1 << init_idx++)) != 0); 8207 8208 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8209 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 8210 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 8211 bnxt_init_ctx_initializer(ctxm, init_val, 8212 resp->stat_init_offset, 8213 (init_mask & (1 << init_idx++)) != 0); 8214 8215 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8216 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 8217 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 8218 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 8219 ctxm->entry_multiple = resp->tqm_entries_multiple; 8220 if (!ctxm->entry_multiple) 8221 ctxm->entry_multiple = 1; 8222 8223 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 8224 8225 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8226 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 8227 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 8228 ctxm->mrav_num_entries_units = 8229 le16_to_cpu(resp->mrav_num_entries_units); 8230 bnxt_init_ctx_initializer(ctxm, init_val, 8231 resp->mrav_init_offset, 8232 (init_mask & (1 << init_idx++)) != 0); 8233 8234 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8235 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 8236 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 8237 8238 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 8239 if (!ctx->tqm_fp_rings_count) 8240 ctx->tqm_fp_rings_count = bp->max_q; 8241 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 8242 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 8243 8244 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8245 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 8246 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 8247 8248 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 8249 } else { 8250 rc = 0; 8251 } 8252 ctx_err: 8253 hwrm_req_drop(bp, req); 8254 return rc; 8255 } 8256 8257 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 8258 __le64 *pg_dir) 8259 { 8260 if (!rmem->nr_pages) 8261 return; 8262 8263 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 8264 if (rmem->depth >= 1) { 8265 if (rmem->depth == 2) 8266 *pg_attr |= 2; 8267 else 8268 *pg_attr |= 1; 8269 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 8270 } else { 8271 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 8272 } 8273 } 8274 8275 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 8276 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 8277 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 8278 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 8279 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 8280 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 8281 8282 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 8283 { 8284 struct hwrm_func_backing_store_cfg_input *req; 8285 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8286 struct bnxt_ctx_pg_info *ctx_pg; 8287 struct bnxt_ctx_mem_type *ctxm; 8288 void **__req = (void **)&req; 8289 u32 req_len = sizeof(*req); 8290 __le32 *num_entries; 8291 __le64 *pg_dir; 8292 u32 flags = 0; 8293 u8 *pg_attr; 8294 u32 ena; 8295 int rc; 8296 int i; 8297 8298 if (!ctx) 8299 return 0; 8300 8301 if (req_len > bp->hwrm_max_ext_req_len) 8302 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 8303 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 8304 if (rc) 8305 return rc; 8306 8307 req->enables = cpu_to_le32(enables); 8308 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 8309 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8310 ctx_pg = ctxm->pg_info; 8311 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 8312 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 8313 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 8314 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 8315 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8316 &req->qpc_pg_size_qpc_lvl, 8317 &req->qpc_page_dir); 8318 8319 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) 8320 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); 8321 } 8322 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 8323 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8324 ctx_pg = ctxm->pg_info; 8325 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 8326 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 8327 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 8328 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8329 &req->srq_pg_size_srq_lvl, 8330 &req->srq_page_dir); 8331 } 8332 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 8333 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8334 ctx_pg = ctxm->pg_info; 8335 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 8336 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 8337 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 8338 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8339 &req->cq_pg_size_cq_lvl, 8340 &req->cq_page_dir); 8341 } 8342 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 8343 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8344 ctx_pg = ctxm->pg_info; 8345 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 8346 req->vnic_num_ring_table_entries = 8347 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 8348 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 8349 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8350 &req->vnic_pg_size_vnic_lvl, 8351 &req->vnic_page_dir); 8352 } 8353 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 8354 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8355 ctx_pg = ctxm->pg_info; 8356 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 8357 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 8358 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8359 &req->stat_pg_size_stat_lvl, 8360 &req->stat_page_dir); 8361 } 8362 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 8363 u32 units; 8364 8365 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8366 ctx_pg = ctxm->pg_info; 8367 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 8368 units = ctxm->mrav_num_entries_units; 8369 if (units) { 8370 u32 num_mr, num_ah = ctxm->mrav_av_entries; 8371 u32 entries; 8372 8373 num_mr = ctx_pg->entries - num_ah; 8374 entries = ((num_mr / units) << 16) | (num_ah / units); 8375 req->mrav_num_entries = cpu_to_le32(entries); 8376 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 8377 } 8378 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 8379 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8380 &req->mrav_pg_size_mrav_lvl, 8381 &req->mrav_page_dir); 8382 } 8383 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 8384 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8385 ctx_pg = ctxm->pg_info; 8386 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 8387 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 8388 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8389 &req->tim_pg_size_tim_lvl, 8390 &req->tim_page_dir); 8391 } 8392 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8393 for (i = 0, num_entries = &req->tqm_sp_num_entries, 8394 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 8395 pg_dir = &req->tqm_sp_page_dir, 8396 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 8397 ctx_pg = ctxm->pg_info; 8398 i < BNXT_MAX_TQM_RINGS; 8399 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 8400 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 8401 if (!(enables & ena)) 8402 continue; 8403 8404 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 8405 *num_entries = cpu_to_le32(ctx_pg->entries); 8406 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 8407 } 8408 req->flags = cpu_to_le32(flags); 8409 return hwrm_req_send(bp, req); 8410 } 8411 8412 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 8413 struct bnxt_ctx_pg_info *ctx_pg) 8414 { 8415 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8416 8417 rmem->page_size = BNXT_PAGE_SIZE; 8418 rmem->pg_arr = ctx_pg->ctx_pg_arr; 8419 rmem->dma_arr = ctx_pg->ctx_dma_arr; 8420 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 8421 if (rmem->depth >= 1) 8422 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 8423 return bnxt_alloc_ring(bp, rmem); 8424 } 8425 8426 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 8427 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 8428 u8 depth, struct bnxt_ctx_mem_type *ctxm) 8429 { 8430 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8431 int rc; 8432 8433 if (!mem_size) 8434 return -EINVAL; 8435 8436 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8437 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 8438 ctx_pg->nr_pages = 0; 8439 return -EINVAL; 8440 } 8441 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 8442 int nr_tbls, i; 8443 8444 rmem->depth = 2; 8445 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 8446 GFP_KERNEL); 8447 if (!ctx_pg->ctx_pg_tbl) 8448 return -ENOMEM; 8449 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 8450 rmem->nr_pages = nr_tbls; 8451 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8452 if (rc) 8453 return rc; 8454 for (i = 0; i < nr_tbls; i++) { 8455 struct bnxt_ctx_pg_info *pg_tbl; 8456 8457 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 8458 if (!pg_tbl) 8459 return -ENOMEM; 8460 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 8461 rmem = &pg_tbl->ring_mem; 8462 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 8463 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 8464 rmem->depth = 1; 8465 rmem->nr_pages = MAX_CTX_PAGES; 8466 rmem->ctx_mem = ctxm; 8467 if (i == (nr_tbls - 1)) { 8468 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 8469 8470 if (rem) 8471 rmem->nr_pages = rem; 8472 } 8473 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 8474 if (rc) 8475 break; 8476 } 8477 } else { 8478 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8479 if (rmem->nr_pages > 1 || depth) 8480 rmem->depth = 1; 8481 rmem->ctx_mem = ctxm; 8482 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8483 } 8484 return rc; 8485 } 8486 8487 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 8488 struct bnxt_ctx_pg_info *ctx_pg) 8489 { 8490 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8491 8492 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 8493 ctx_pg->ctx_pg_tbl) { 8494 int i, nr_tbls = rmem->nr_pages; 8495 8496 for (i = 0; i < nr_tbls; i++) { 8497 struct bnxt_ctx_pg_info *pg_tbl; 8498 struct bnxt_ring_mem_info *rmem2; 8499 8500 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 8501 if (!pg_tbl) 8502 continue; 8503 rmem2 = &pg_tbl->ring_mem; 8504 bnxt_free_ring(bp, rmem2); 8505 ctx_pg->ctx_pg_arr[i] = NULL; 8506 kfree(pg_tbl); 8507 ctx_pg->ctx_pg_tbl[i] = NULL; 8508 } 8509 kfree(ctx_pg->ctx_pg_tbl); 8510 ctx_pg->ctx_pg_tbl = NULL; 8511 } 8512 bnxt_free_ring(bp, rmem); 8513 ctx_pg->nr_pages = 0; 8514 } 8515 8516 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 8517 struct bnxt_ctx_mem_type *ctxm, u32 entries, 8518 u8 pg_lvl) 8519 { 8520 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 8521 int i, rc = 0, n = 1; 8522 u32 mem_size; 8523 8524 if (!ctxm->entry_size || !ctx_pg) 8525 return -EINVAL; 8526 if (ctxm->instance_bmap) 8527 n = hweight32(ctxm->instance_bmap); 8528 if (ctxm->entry_multiple) 8529 entries = roundup(entries, ctxm->entry_multiple); 8530 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 8531 mem_size = entries * ctxm->entry_size; 8532 for (i = 0; i < n && !rc; i++) { 8533 ctx_pg[i].entries = entries; 8534 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 8535 ctxm->init_value ? ctxm : NULL); 8536 } 8537 return rc; 8538 } 8539 8540 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 8541 struct bnxt_ctx_mem_type *ctxm, 8542 bool last) 8543 { 8544 struct hwrm_func_backing_store_cfg_v2_input *req; 8545 u32 instance_bmap = ctxm->instance_bmap; 8546 int i, j, rc = 0, n = 1; 8547 __le32 *p; 8548 8549 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 8550 return 0; 8551 8552 if (instance_bmap) 8553 n = hweight32(ctxm->instance_bmap); 8554 else 8555 instance_bmap = 1; 8556 8557 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 8558 if (rc) 8559 return rc; 8560 hwrm_req_hold(bp, req); 8561 req->type = cpu_to_le16(ctxm->type); 8562 req->entry_size = cpu_to_le16(ctxm->entry_size); 8563 req->subtype_valid_cnt = ctxm->split_entry_cnt; 8564 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 8565 p[i] = cpu_to_le32(ctxm->split[i]); 8566 for (i = 0, j = 0; j < n && !rc; i++) { 8567 struct bnxt_ctx_pg_info *ctx_pg; 8568 8569 if (!(instance_bmap & (1 << i))) 8570 continue; 8571 req->instance = cpu_to_le16(i); 8572 ctx_pg = &ctxm->pg_info[j++]; 8573 if (!ctx_pg->entries) 8574 continue; 8575 req->num_entries = cpu_to_le32(ctx_pg->entries); 8576 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8577 &req->page_size_pbl_level, 8578 &req->page_dir); 8579 if (last && j == n) 8580 req->flags = 8581 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 8582 rc = hwrm_req_send(bp, req); 8583 } 8584 hwrm_req_drop(bp, req); 8585 return rc; 8586 } 8587 8588 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) 8589 { 8590 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8591 struct bnxt_ctx_mem_type *ctxm; 8592 u16 last_type; 8593 int rc = 0; 8594 u16 type; 8595 8596 if (!ena) 8597 return 0; 8598 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) 8599 last_type = BNXT_CTX_MAX - 1; 8600 else 8601 last_type = BNXT_CTX_L2_MAX - 1; 8602 ctx->ctx_arr[last_type].last = 1; 8603 8604 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 8605 ctxm = &ctx->ctx_arr[type]; 8606 8607 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 8608 if (rc) 8609 return rc; 8610 } 8611 return 0; 8612 } 8613 8614 void bnxt_free_ctx_mem(struct bnxt *bp) 8615 { 8616 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8617 u16 type; 8618 8619 if (!ctx) 8620 return; 8621 8622 for (type = 0; type < BNXT_CTX_V2_MAX; type++) { 8623 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8624 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 8625 int i, n = 1; 8626 8627 if (!ctx_pg) 8628 continue; 8629 if (ctxm->instance_bmap) 8630 n = hweight32(ctxm->instance_bmap); 8631 for (i = 0; i < n; i++) 8632 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 8633 8634 kfree(ctx_pg); 8635 ctxm->pg_info = NULL; 8636 } 8637 8638 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 8639 kfree(ctx); 8640 bp->ctx = NULL; 8641 } 8642 8643 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 8644 { 8645 struct bnxt_ctx_mem_type *ctxm; 8646 struct bnxt_ctx_mem_info *ctx; 8647 u32 l2_qps, qp1_qps, max_qps; 8648 u32 ena, entries_sp, entries; 8649 u32 srqs, max_srqs, min; 8650 u32 num_mr, num_ah; 8651 u32 extra_srqs = 0; 8652 u32 extra_qps = 0; 8653 u32 fast_qpmd_qps; 8654 u8 pg_lvl = 1; 8655 int i, rc; 8656 8657 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 8658 if (rc) { 8659 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 8660 rc); 8661 return rc; 8662 } 8663 ctx = bp->ctx; 8664 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 8665 return 0; 8666 8667 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8668 l2_qps = ctxm->qp_l2_entries; 8669 qp1_qps = ctxm->qp_qp1_entries; 8670 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; 8671 max_qps = ctxm->max_entries; 8672 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8673 srqs = ctxm->srq_l2_entries; 8674 max_srqs = ctxm->max_entries; 8675 ena = 0; 8676 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 8677 pg_lvl = 2; 8678 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); 8679 /* allocate extra qps if fw supports RoCE fast qp destroy feature */ 8680 extra_qps += fast_qpmd_qps; 8681 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 8682 if (fast_qpmd_qps) 8683 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; 8684 } 8685 8686 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8687 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 8688 pg_lvl); 8689 if (rc) 8690 return rc; 8691 8692 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8693 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 8694 if (rc) 8695 return rc; 8696 8697 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8698 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 8699 extra_qps * 2, pg_lvl); 8700 if (rc) 8701 return rc; 8702 8703 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8704 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8705 if (rc) 8706 return rc; 8707 8708 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8709 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8710 if (rc) 8711 return rc; 8712 8713 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 8714 goto skip_rdma; 8715 8716 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8717 /* 128K extra is needed to accommodate static AH context 8718 * allocation by f/w. 8719 */ 8720 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 8721 num_ah = min_t(u32, num_mr, 1024 * 128); 8722 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 8723 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 8724 ctxm->mrav_av_entries = num_ah; 8725 8726 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 8727 if (rc) 8728 return rc; 8729 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 8730 8731 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8732 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 8733 if (rc) 8734 return rc; 8735 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 8736 8737 skip_rdma: 8738 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8739 min = ctxm->min_entries; 8740 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 8741 2 * (extra_qps + qp1_qps) + min; 8742 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 8743 if (rc) 8744 return rc; 8745 8746 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8747 entries = l2_qps + 2 * (extra_qps + qp1_qps); 8748 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 8749 if (rc) 8750 return rc; 8751 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 8752 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 8753 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 8754 8755 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8756 rc = bnxt_backing_store_cfg_v2(bp, ena); 8757 else 8758 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 8759 if (rc) { 8760 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 8761 rc); 8762 return rc; 8763 } 8764 ctx->flags |= BNXT_CTX_FLAG_INITED; 8765 return 0; 8766 } 8767 8768 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 8769 { 8770 struct hwrm_func_resource_qcaps_output *resp; 8771 struct hwrm_func_resource_qcaps_input *req; 8772 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8773 int rc; 8774 8775 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 8776 if (rc) 8777 return rc; 8778 8779 req->fid = cpu_to_le16(0xffff); 8780 resp = hwrm_req_hold(bp, req); 8781 rc = hwrm_req_send_silent(bp, req); 8782 if (rc) 8783 goto hwrm_func_resc_qcaps_exit; 8784 8785 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 8786 if (!all) 8787 goto hwrm_func_resc_qcaps_exit; 8788 8789 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 8790 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 8791 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 8792 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 8793 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 8794 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 8795 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 8796 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 8797 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 8798 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 8799 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 8800 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 8801 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 8802 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 8803 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 8804 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 8805 8806 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 8807 u16 max_msix = le16_to_cpu(resp->max_msix); 8808 8809 hw_resc->max_nqs = max_msix; 8810 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 8811 } 8812 8813 if (BNXT_PF(bp)) { 8814 struct bnxt_pf_info *pf = &bp->pf; 8815 8816 pf->vf_resv_strategy = 8817 le16_to_cpu(resp->vf_reservation_strategy); 8818 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 8819 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 8820 } 8821 hwrm_func_resc_qcaps_exit: 8822 hwrm_req_drop(bp, req); 8823 return rc; 8824 } 8825 8826 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 8827 { 8828 struct hwrm_port_mac_ptp_qcfg_output *resp; 8829 struct hwrm_port_mac_ptp_qcfg_input *req; 8830 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 8831 bool phc_cfg; 8832 u8 flags; 8833 int rc; 8834 8835 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { 8836 rc = -ENODEV; 8837 goto no_ptp; 8838 } 8839 8840 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 8841 if (rc) 8842 goto no_ptp; 8843 8844 req->port_id = cpu_to_le16(bp->pf.port_id); 8845 resp = hwrm_req_hold(bp, req); 8846 rc = hwrm_req_send(bp, req); 8847 if (rc) 8848 goto exit; 8849 8850 flags = resp->flags; 8851 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 8852 rc = -ENODEV; 8853 goto exit; 8854 } 8855 if (!ptp) { 8856 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 8857 if (!ptp) { 8858 rc = -ENOMEM; 8859 goto exit; 8860 } 8861 ptp->bp = bp; 8862 bp->ptp_cfg = ptp; 8863 } 8864 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { 8865 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 8866 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 8867 } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 8868 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 8869 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 8870 } else { 8871 rc = -ENODEV; 8872 goto exit; 8873 } 8874 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 8875 rc = bnxt_ptp_init(bp, phc_cfg); 8876 if (rc) 8877 netdev_warn(bp->dev, "PTP initialization failed.\n"); 8878 exit: 8879 hwrm_req_drop(bp, req); 8880 if (!rc) 8881 return 0; 8882 8883 no_ptp: 8884 bnxt_ptp_clear(bp); 8885 kfree(ptp); 8886 bp->ptp_cfg = NULL; 8887 return rc; 8888 } 8889 8890 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 8891 { 8892 struct hwrm_func_qcaps_output *resp; 8893 struct hwrm_func_qcaps_input *req; 8894 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8895 u32 flags, flags_ext, flags_ext2; 8896 int rc; 8897 8898 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 8899 if (rc) 8900 return rc; 8901 8902 req->fid = cpu_to_le16(0xffff); 8903 resp = hwrm_req_hold(bp, req); 8904 rc = hwrm_req_send(bp, req); 8905 if (rc) 8906 goto hwrm_func_qcaps_exit; 8907 8908 flags = le32_to_cpu(resp->flags); 8909 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 8910 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 8911 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 8912 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 8913 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 8914 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 8915 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 8916 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 8917 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 8918 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 8919 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 8920 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 8921 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 8922 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 8923 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 8924 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 8925 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 8926 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 8927 8928 flags_ext = le32_to_cpu(resp->flags_ext); 8929 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 8930 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 8931 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 8932 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 8933 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 8934 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 8935 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 8936 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 8937 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 8938 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 8939 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 8940 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 8941 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) 8942 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; 8943 8944 flags_ext2 = le32_to_cpu(resp->flags_ext2); 8945 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 8946 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 8947 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) 8948 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; 8949 8950 bp->tx_push_thresh = 0; 8951 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 8952 BNXT_FW_MAJ(bp) > 217) 8953 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 8954 8955 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 8956 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 8957 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 8958 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 8959 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 8960 if (!hw_resc->max_hw_ring_grps) 8961 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 8962 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 8963 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 8964 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 8965 8966 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); 8967 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); 8968 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 8969 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 8970 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 8971 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 8972 8973 if (BNXT_PF(bp)) { 8974 struct bnxt_pf_info *pf = &bp->pf; 8975 8976 pf->fw_fid = le16_to_cpu(resp->fid); 8977 pf->port_id = le16_to_cpu(resp->port_id); 8978 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 8979 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 8980 pf->max_vfs = le16_to_cpu(resp->max_vfs); 8981 bp->flags &= ~BNXT_FLAG_WOL_CAP; 8982 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 8983 bp->flags |= BNXT_FLAG_WOL_CAP; 8984 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 8985 bp->fw_cap |= BNXT_FW_CAP_PTP; 8986 } else { 8987 bnxt_ptp_clear(bp); 8988 kfree(bp->ptp_cfg); 8989 bp->ptp_cfg = NULL; 8990 } 8991 } else { 8992 #ifdef CONFIG_BNXT_SRIOV 8993 struct bnxt_vf_info *vf = &bp->vf; 8994 8995 vf->fw_fid = le16_to_cpu(resp->fid); 8996 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 8997 #endif 8998 } 8999 9000 hwrm_func_qcaps_exit: 9001 hwrm_req_drop(bp, req); 9002 return rc; 9003 } 9004 9005 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 9006 { 9007 struct hwrm_dbg_qcaps_output *resp; 9008 struct hwrm_dbg_qcaps_input *req; 9009 int rc; 9010 9011 bp->fw_dbg_cap = 0; 9012 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 9013 return; 9014 9015 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 9016 if (rc) 9017 return; 9018 9019 req->fid = cpu_to_le16(0xffff); 9020 resp = hwrm_req_hold(bp, req); 9021 rc = hwrm_req_send(bp, req); 9022 if (rc) 9023 goto hwrm_dbg_qcaps_exit; 9024 9025 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 9026 9027 hwrm_dbg_qcaps_exit: 9028 hwrm_req_drop(bp, req); 9029 } 9030 9031 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 9032 9033 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 9034 { 9035 int rc; 9036 9037 rc = __bnxt_hwrm_func_qcaps(bp); 9038 if (rc) 9039 return rc; 9040 9041 bnxt_hwrm_dbg_qcaps(bp); 9042 9043 rc = bnxt_hwrm_queue_qportcfg(bp); 9044 if (rc) { 9045 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 9046 return rc; 9047 } 9048 if (bp->hwrm_spec_code >= 0x10803) { 9049 rc = bnxt_alloc_ctx_mem(bp); 9050 if (rc) 9051 return rc; 9052 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 9053 if (!rc) 9054 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 9055 } 9056 return 0; 9057 } 9058 9059 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 9060 { 9061 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 9062 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 9063 u32 flags; 9064 int rc; 9065 9066 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 9067 return 0; 9068 9069 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 9070 if (rc) 9071 return rc; 9072 9073 resp = hwrm_req_hold(bp, req); 9074 rc = hwrm_req_send(bp, req); 9075 if (rc) 9076 goto hwrm_cfa_adv_qcaps_exit; 9077 9078 flags = le32_to_cpu(resp->flags); 9079 if (flags & 9080 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 9081 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 9082 9083 if (flags & 9084 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) 9085 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; 9086 9087 if (flags & 9088 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) 9089 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; 9090 9091 hwrm_cfa_adv_qcaps_exit: 9092 hwrm_req_drop(bp, req); 9093 return rc; 9094 } 9095 9096 static int __bnxt_alloc_fw_health(struct bnxt *bp) 9097 { 9098 if (bp->fw_health) 9099 return 0; 9100 9101 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 9102 if (!bp->fw_health) 9103 return -ENOMEM; 9104 9105 mutex_init(&bp->fw_health->lock); 9106 return 0; 9107 } 9108 9109 static int bnxt_alloc_fw_health(struct bnxt *bp) 9110 { 9111 int rc; 9112 9113 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 9114 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9115 return 0; 9116 9117 rc = __bnxt_alloc_fw_health(bp); 9118 if (rc) { 9119 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 9120 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9121 return rc; 9122 } 9123 9124 return 0; 9125 } 9126 9127 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 9128 { 9129 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 9130 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 9131 BNXT_FW_HEALTH_WIN_MAP_OFF); 9132 } 9133 9134 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 9135 { 9136 struct bnxt_fw_health *fw_health = bp->fw_health; 9137 u32 reg_type; 9138 9139 if (!fw_health) 9140 return; 9141 9142 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 9143 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9144 fw_health->status_reliable = false; 9145 9146 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 9147 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9148 fw_health->resets_reliable = false; 9149 } 9150 9151 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 9152 { 9153 void __iomem *hs; 9154 u32 status_loc; 9155 u32 reg_type; 9156 u32 sig; 9157 9158 if (bp->fw_health) 9159 bp->fw_health->status_reliable = false; 9160 9161 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 9162 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 9163 9164 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 9165 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 9166 if (!bp->chip_num) { 9167 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 9168 bp->chip_num = readl(bp->bar0 + 9169 BNXT_FW_HEALTH_WIN_BASE + 9170 BNXT_GRC_REG_CHIP_NUM); 9171 } 9172 if (!BNXT_CHIP_P5_PLUS(bp)) 9173 return; 9174 9175 status_loc = BNXT_GRC_REG_STATUS_P5 | 9176 BNXT_FW_HEALTH_REG_TYPE_BAR0; 9177 } else { 9178 status_loc = readl(hs + offsetof(struct hcomm_status, 9179 fw_status_loc)); 9180 } 9181 9182 if (__bnxt_alloc_fw_health(bp)) { 9183 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 9184 return; 9185 } 9186 9187 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 9188 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 9189 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 9190 __bnxt_map_fw_health_reg(bp, status_loc); 9191 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 9192 BNXT_FW_HEALTH_WIN_OFF(status_loc); 9193 } 9194 9195 bp->fw_health->status_reliable = true; 9196 } 9197 9198 static int bnxt_map_fw_health_regs(struct bnxt *bp) 9199 { 9200 struct bnxt_fw_health *fw_health = bp->fw_health; 9201 u32 reg_base = 0xffffffff; 9202 int i; 9203 9204 bp->fw_health->status_reliable = false; 9205 bp->fw_health->resets_reliable = false; 9206 /* Only pre-map the monitoring GRC registers using window 3 */ 9207 for (i = 0; i < 4; i++) { 9208 u32 reg = fw_health->regs[i]; 9209 9210 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 9211 continue; 9212 if (reg_base == 0xffffffff) 9213 reg_base = reg & BNXT_GRC_BASE_MASK; 9214 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 9215 return -ERANGE; 9216 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 9217 } 9218 bp->fw_health->status_reliable = true; 9219 bp->fw_health->resets_reliable = true; 9220 if (reg_base == 0xffffffff) 9221 return 0; 9222 9223 __bnxt_map_fw_health_reg(bp, reg_base); 9224 return 0; 9225 } 9226 9227 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 9228 { 9229 if (!bp->fw_health) 9230 return; 9231 9232 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 9233 bp->fw_health->status_reliable = true; 9234 bp->fw_health->resets_reliable = true; 9235 } else { 9236 bnxt_try_map_fw_health_reg(bp); 9237 } 9238 } 9239 9240 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 9241 { 9242 struct bnxt_fw_health *fw_health = bp->fw_health; 9243 struct hwrm_error_recovery_qcfg_output *resp; 9244 struct hwrm_error_recovery_qcfg_input *req; 9245 int rc, i; 9246 9247 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9248 return 0; 9249 9250 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 9251 if (rc) 9252 return rc; 9253 9254 resp = hwrm_req_hold(bp, req); 9255 rc = hwrm_req_send(bp, req); 9256 if (rc) 9257 goto err_recovery_out; 9258 fw_health->flags = le32_to_cpu(resp->flags); 9259 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 9260 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 9261 rc = -EINVAL; 9262 goto err_recovery_out; 9263 } 9264 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 9265 fw_health->master_func_wait_dsecs = 9266 le32_to_cpu(resp->master_func_wait_period); 9267 fw_health->normal_func_wait_dsecs = 9268 le32_to_cpu(resp->normal_func_wait_period); 9269 fw_health->post_reset_wait_dsecs = 9270 le32_to_cpu(resp->master_func_wait_period_after_reset); 9271 fw_health->post_reset_max_wait_dsecs = 9272 le32_to_cpu(resp->max_bailout_time_after_reset); 9273 fw_health->regs[BNXT_FW_HEALTH_REG] = 9274 le32_to_cpu(resp->fw_health_status_reg); 9275 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 9276 le32_to_cpu(resp->fw_heartbeat_reg); 9277 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 9278 le32_to_cpu(resp->fw_reset_cnt_reg); 9279 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 9280 le32_to_cpu(resp->reset_inprogress_reg); 9281 fw_health->fw_reset_inprog_reg_mask = 9282 le32_to_cpu(resp->reset_inprogress_reg_mask); 9283 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 9284 if (fw_health->fw_reset_seq_cnt >= 16) { 9285 rc = -EINVAL; 9286 goto err_recovery_out; 9287 } 9288 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 9289 fw_health->fw_reset_seq_regs[i] = 9290 le32_to_cpu(resp->reset_reg[i]); 9291 fw_health->fw_reset_seq_vals[i] = 9292 le32_to_cpu(resp->reset_reg_val[i]); 9293 fw_health->fw_reset_seq_delay_msec[i] = 9294 resp->delay_after_reset[i]; 9295 } 9296 err_recovery_out: 9297 hwrm_req_drop(bp, req); 9298 if (!rc) 9299 rc = bnxt_map_fw_health_regs(bp); 9300 if (rc) 9301 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9302 return rc; 9303 } 9304 9305 static int bnxt_hwrm_func_reset(struct bnxt *bp) 9306 { 9307 struct hwrm_func_reset_input *req; 9308 int rc; 9309 9310 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 9311 if (rc) 9312 return rc; 9313 9314 req->enables = 0; 9315 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 9316 return hwrm_req_send(bp, req); 9317 } 9318 9319 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 9320 { 9321 struct hwrm_nvm_get_dev_info_output nvm_info; 9322 9323 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 9324 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 9325 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 9326 nvm_info.nvm_cfg_ver_upd); 9327 } 9328 9329 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 9330 { 9331 struct hwrm_queue_qportcfg_output *resp; 9332 struct hwrm_queue_qportcfg_input *req; 9333 u8 i, j, *qptr; 9334 bool no_rdma; 9335 int rc = 0; 9336 9337 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 9338 if (rc) 9339 return rc; 9340 9341 resp = hwrm_req_hold(bp, req); 9342 rc = hwrm_req_send(bp, req); 9343 if (rc) 9344 goto qportcfg_exit; 9345 9346 if (!resp->max_configurable_queues) { 9347 rc = -EINVAL; 9348 goto qportcfg_exit; 9349 } 9350 bp->max_tc = resp->max_configurable_queues; 9351 bp->max_lltc = resp->max_configurable_lossless_queues; 9352 if (bp->max_tc > BNXT_MAX_QUEUE) 9353 bp->max_tc = BNXT_MAX_QUEUE; 9354 9355 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 9356 qptr = &resp->queue_id0; 9357 for (i = 0, j = 0; i < bp->max_tc; i++) { 9358 bp->q_info[j].queue_id = *qptr; 9359 bp->q_ids[i] = *qptr++; 9360 bp->q_info[j].queue_profile = *qptr++; 9361 bp->tc_to_qidx[j] = j; 9362 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 9363 (no_rdma && BNXT_PF(bp))) 9364 j++; 9365 } 9366 bp->max_q = bp->max_tc; 9367 bp->max_tc = max_t(u8, j, 1); 9368 9369 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 9370 bp->max_tc = 1; 9371 9372 if (bp->max_lltc > bp->max_tc) 9373 bp->max_lltc = bp->max_tc; 9374 9375 qportcfg_exit: 9376 hwrm_req_drop(bp, req); 9377 return rc; 9378 } 9379 9380 static int bnxt_hwrm_poll(struct bnxt *bp) 9381 { 9382 struct hwrm_ver_get_input *req; 9383 int rc; 9384 9385 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 9386 if (rc) 9387 return rc; 9388 9389 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 9390 req->hwrm_intf_min = HWRM_VERSION_MINOR; 9391 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 9392 9393 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 9394 rc = hwrm_req_send(bp, req); 9395 return rc; 9396 } 9397 9398 static int bnxt_hwrm_ver_get(struct bnxt *bp) 9399 { 9400 struct hwrm_ver_get_output *resp; 9401 struct hwrm_ver_get_input *req; 9402 u16 fw_maj, fw_min, fw_bld, fw_rsv; 9403 u32 dev_caps_cfg, hwrm_ver; 9404 int rc, len; 9405 9406 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 9407 if (rc) 9408 return rc; 9409 9410 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 9411 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 9412 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 9413 req->hwrm_intf_min = HWRM_VERSION_MINOR; 9414 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 9415 9416 resp = hwrm_req_hold(bp, req); 9417 rc = hwrm_req_send(bp, req); 9418 if (rc) 9419 goto hwrm_ver_get_exit; 9420 9421 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 9422 9423 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 9424 resp->hwrm_intf_min_8b << 8 | 9425 resp->hwrm_intf_upd_8b; 9426 if (resp->hwrm_intf_maj_8b < 1) { 9427 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 9428 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 9429 resp->hwrm_intf_upd_8b); 9430 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 9431 } 9432 9433 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 9434 HWRM_VERSION_UPDATE; 9435 9436 if (bp->hwrm_spec_code > hwrm_ver) 9437 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 9438 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 9439 HWRM_VERSION_UPDATE); 9440 else 9441 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 9442 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 9443 resp->hwrm_intf_upd_8b); 9444 9445 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 9446 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 9447 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 9448 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 9449 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 9450 len = FW_VER_STR_LEN; 9451 } else { 9452 fw_maj = resp->hwrm_fw_maj_8b; 9453 fw_min = resp->hwrm_fw_min_8b; 9454 fw_bld = resp->hwrm_fw_bld_8b; 9455 fw_rsv = resp->hwrm_fw_rsvd_8b; 9456 len = BC_HWRM_STR_LEN; 9457 } 9458 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 9459 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 9460 fw_rsv); 9461 9462 if (strlen(resp->active_pkg_name)) { 9463 int fw_ver_len = strlen(bp->fw_ver_str); 9464 9465 snprintf(bp->fw_ver_str + fw_ver_len, 9466 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 9467 resp->active_pkg_name); 9468 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 9469 } 9470 9471 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 9472 if (!bp->hwrm_cmd_timeout) 9473 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 9474 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 9475 if (!bp->hwrm_cmd_max_timeout) 9476 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 9477 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) 9478 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", 9479 bp->hwrm_cmd_max_timeout / 1000); 9480 9481 if (resp->hwrm_intf_maj_8b >= 1) { 9482 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 9483 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 9484 } 9485 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 9486 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 9487 9488 bp->chip_num = le16_to_cpu(resp->chip_num); 9489 bp->chip_rev = resp->chip_rev; 9490 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 9491 !resp->chip_metal) 9492 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 9493 9494 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 9495 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 9496 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 9497 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 9498 9499 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 9500 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 9501 9502 if (dev_caps_cfg & 9503 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 9504 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 9505 9506 if (dev_caps_cfg & 9507 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 9508 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 9509 9510 if (dev_caps_cfg & 9511 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 9512 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 9513 9514 hwrm_ver_get_exit: 9515 hwrm_req_drop(bp, req); 9516 return rc; 9517 } 9518 9519 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 9520 { 9521 struct hwrm_fw_set_time_input *req; 9522 struct tm tm; 9523 time64_t now = ktime_get_real_seconds(); 9524 int rc; 9525 9526 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 9527 bp->hwrm_spec_code < 0x10400) 9528 return -EOPNOTSUPP; 9529 9530 time64_to_tm(now, 0, &tm); 9531 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 9532 if (rc) 9533 return rc; 9534 9535 req->year = cpu_to_le16(1900 + tm.tm_year); 9536 req->month = 1 + tm.tm_mon; 9537 req->day = tm.tm_mday; 9538 req->hour = tm.tm_hour; 9539 req->minute = tm.tm_min; 9540 req->second = tm.tm_sec; 9541 return hwrm_req_send(bp, req); 9542 } 9543 9544 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 9545 { 9546 u64 sw_tmp; 9547 9548 hw &= mask; 9549 sw_tmp = (*sw & ~mask) | hw; 9550 if (hw < (*sw & mask)) 9551 sw_tmp += mask + 1; 9552 WRITE_ONCE(*sw, sw_tmp); 9553 } 9554 9555 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 9556 int count, bool ignore_zero) 9557 { 9558 int i; 9559 9560 for (i = 0; i < count; i++) { 9561 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 9562 9563 if (ignore_zero && !hw) 9564 continue; 9565 9566 if (masks[i] == -1ULL) 9567 sw_stats[i] = hw; 9568 else 9569 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 9570 } 9571 } 9572 9573 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 9574 { 9575 if (!stats->hw_stats) 9576 return; 9577 9578 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 9579 stats->hw_masks, stats->len / 8, false); 9580 } 9581 9582 static void bnxt_accumulate_all_stats(struct bnxt *bp) 9583 { 9584 struct bnxt_stats_mem *ring0_stats; 9585 bool ignore_zero = false; 9586 int i; 9587 9588 /* Chip bug. Counter intermittently becomes 0. */ 9589 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9590 ignore_zero = true; 9591 9592 for (i = 0; i < bp->cp_nr_rings; i++) { 9593 struct bnxt_napi *bnapi = bp->bnapi[i]; 9594 struct bnxt_cp_ring_info *cpr; 9595 struct bnxt_stats_mem *stats; 9596 9597 cpr = &bnapi->cp_ring; 9598 stats = &cpr->stats; 9599 if (!i) 9600 ring0_stats = stats; 9601 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 9602 ring0_stats->hw_masks, 9603 ring0_stats->len / 8, ignore_zero); 9604 } 9605 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9606 struct bnxt_stats_mem *stats = &bp->port_stats; 9607 __le64 *hw_stats = stats->hw_stats; 9608 u64 *sw_stats = stats->sw_stats; 9609 u64 *masks = stats->hw_masks; 9610 int cnt; 9611 9612 cnt = sizeof(struct rx_port_stats) / 8; 9613 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 9614 9615 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9616 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9617 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9618 cnt = sizeof(struct tx_port_stats) / 8; 9619 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 9620 } 9621 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 9622 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 9623 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 9624 } 9625 } 9626 9627 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 9628 { 9629 struct hwrm_port_qstats_input *req; 9630 struct bnxt_pf_info *pf = &bp->pf; 9631 int rc; 9632 9633 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 9634 return 0; 9635 9636 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 9637 return -EOPNOTSUPP; 9638 9639 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 9640 if (rc) 9641 return rc; 9642 9643 req->flags = flags; 9644 req->port_id = cpu_to_le16(pf->port_id); 9645 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 9646 BNXT_TX_PORT_STATS_BYTE_OFFSET); 9647 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 9648 return hwrm_req_send(bp, req); 9649 } 9650 9651 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 9652 { 9653 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 9654 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 9655 struct hwrm_port_qstats_ext_output *resp_qs; 9656 struct hwrm_port_qstats_ext_input *req_qs; 9657 struct bnxt_pf_info *pf = &bp->pf; 9658 u32 tx_stat_size; 9659 int rc; 9660 9661 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 9662 return 0; 9663 9664 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 9665 return -EOPNOTSUPP; 9666 9667 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 9668 if (rc) 9669 return rc; 9670 9671 req_qs->flags = flags; 9672 req_qs->port_id = cpu_to_le16(pf->port_id); 9673 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 9674 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 9675 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 9676 sizeof(struct tx_port_stats_ext) : 0; 9677 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 9678 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 9679 resp_qs = hwrm_req_hold(bp, req_qs); 9680 rc = hwrm_req_send(bp, req_qs); 9681 if (!rc) { 9682 bp->fw_rx_stats_ext_size = 9683 le16_to_cpu(resp_qs->rx_stat_size) / 8; 9684 if (BNXT_FW_MAJ(bp) < 220 && 9685 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 9686 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 9687 9688 bp->fw_tx_stats_ext_size = tx_stat_size ? 9689 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 9690 } else { 9691 bp->fw_rx_stats_ext_size = 0; 9692 bp->fw_tx_stats_ext_size = 0; 9693 } 9694 hwrm_req_drop(bp, req_qs); 9695 9696 if (flags) 9697 return rc; 9698 9699 if (bp->fw_tx_stats_ext_size <= 9700 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 9701 bp->pri2cos_valid = 0; 9702 return rc; 9703 } 9704 9705 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 9706 if (rc) 9707 return rc; 9708 9709 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 9710 9711 resp_qc = hwrm_req_hold(bp, req_qc); 9712 rc = hwrm_req_send(bp, req_qc); 9713 if (!rc) { 9714 u8 *pri2cos; 9715 int i, j; 9716 9717 pri2cos = &resp_qc->pri0_cos_queue_id; 9718 for (i = 0; i < 8; i++) { 9719 u8 queue_id = pri2cos[i]; 9720 u8 queue_idx; 9721 9722 /* Per port queue IDs start from 0, 10, 20, etc */ 9723 queue_idx = queue_id % 10; 9724 if (queue_idx > BNXT_MAX_QUEUE) { 9725 bp->pri2cos_valid = false; 9726 hwrm_req_drop(bp, req_qc); 9727 return rc; 9728 } 9729 for (j = 0; j < bp->max_q; j++) { 9730 if (bp->q_ids[j] == queue_id) 9731 bp->pri2cos_idx[i] = queue_idx; 9732 } 9733 } 9734 bp->pri2cos_valid = true; 9735 } 9736 hwrm_req_drop(bp, req_qc); 9737 9738 return rc; 9739 } 9740 9741 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 9742 { 9743 bnxt_hwrm_tunnel_dst_port_free(bp, 9744 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 9745 bnxt_hwrm_tunnel_dst_port_free(bp, 9746 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 9747 } 9748 9749 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 9750 { 9751 int rc, i; 9752 u32 tpa_flags = 0; 9753 9754 if (set_tpa) 9755 tpa_flags = bp->flags & BNXT_FLAG_TPA; 9756 else if (BNXT_NO_FW_ACCESS(bp)) 9757 return 0; 9758 for (i = 0; i < bp->nr_vnics; i++) { 9759 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); 9760 if (rc) { 9761 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 9762 i, rc); 9763 return rc; 9764 } 9765 } 9766 return 0; 9767 } 9768 9769 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 9770 { 9771 int i; 9772 9773 for (i = 0; i < bp->nr_vnics; i++) 9774 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); 9775 } 9776 9777 static void bnxt_clear_vnic(struct bnxt *bp) 9778 { 9779 if (!bp->vnic_info) 9780 return; 9781 9782 bnxt_hwrm_clear_vnic_filter(bp); 9783 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 9784 /* clear all RSS setting before free vnic ctx */ 9785 bnxt_hwrm_clear_vnic_rss(bp); 9786 bnxt_hwrm_vnic_ctx_free(bp); 9787 } 9788 /* before free the vnic, undo the vnic tpa settings */ 9789 if (bp->flags & BNXT_FLAG_TPA) 9790 bnxt_set_tpa(bp, false); 9791 bnxt_hwrm_vnic_free(bp); 9792 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9793 bnxt_hwrm_vnic_ctx_free(bp); 9794 } 9795 9796 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 9797 bool irq_re_init) 9798 { 9799 bnxt_clear_vnic(bp); 9800 bnxt_hwrm_ring_free(bp, close_path); 9801 bnxt_hwrm_ring_grp_free(bp); 9802 if (irq_re_init) { 9803 bnxt_hwrm_stat_ctx_free(bp); 9804 bnxt_hwrm_free_tunnel_ports(bp); 9805 } 9806 } 9807 9808 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 9809 { 9810 struct hwrm_func_cfg_input *req; 9811 u8 evb_mode; 9812 int rc; 9813 9814 if (br_mode == BRIDGE_MODE_VEB) 9815 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 9816 else if (br_mode == BRIDGE_MODE_VEPA) 9817 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 9818 else 9819 return -EINVAL; 9820 9821 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 9822 if (rc) 9823 return rc; 9824 9825 req->fid = cpu_to_le16(0xffff); 9826 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 9827 req->evb_mode = evb_mode; 9828 return hwrm_req_send(bp, req); 9829 } 9830 9831 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 9832 { 9833 struct hwrm_func_cfg_input *req; 9834 int rc; 9835 9836 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 9837 return 0; 9838 9839 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 9840 if (rc) 9841 return rc; 9842 9843 req->fid = cpu_to_le16(0xffff); 9844 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 9845 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 9846 if (size == 128) 9847 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 9848 9849 return hwrm_req_send(bp, req); 9850 } 9851 9852 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 9853 { 9854 int rc; 9855 9856 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 9857 goto skip_rss_ctx; 9858 9859 /* allocate context for vnic */ 9860 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 9861 if (rc) { 9862 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9863 vnic->vnic_id, rc); 9864 goto vnic_setup_err; 9865 } 9866 bp->rsscos_nr_ctxs++; 9867 9868 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9869 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); 9870 if (rc) { 9871 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 9872 vnic->vnic_id, rc); 9873 goto vnic_setup_err; 9874 } 9875 bp->rsscos_nr_ctxs++; 9876 } 9877 9878 skip_rss_ctx: 9879 /* configure default vnic, ring grp */ 9880 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 9881 if (rc) { 9882 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9883 vnic->vnic_id, rc); 9884 goto vnic_setup_err; 9885 } 9886 9887 /* Enable RSS hashing on vnic */ 9888 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); 9889 if (rc) { 9890 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 9891 vnic->vnic_id, rc); 9892 goto vnic_setup_err; 9893 } 9894 9895 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9896 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 9897 if (rc) { 9898 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9899 vnic->vnic_id, rc); 9900 } 9901 } 9902 9903 vnic_setup_err: 9904 return rc; 9905 } 9906 9907 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 9908 { 9909 int rc; 9910 9911 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 9912 if (rc) { 9913 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 9914 vnic->vnic_id, rc); 9915 return rc; 9916 } 9917 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 9918 if (rc) 9919 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9920 vnic->vnic_id, rc); 9921 return rc; 9922 } 9923 9924 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 9925 { 9926 int rc, i, nr_ctxs; 9927 9928 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 9929 for (i = 0; i < nr_ctxs; i++) { 9930 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); 9931 if (rc) { 9932 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 9933 vnic->vnic_id, i, rc); 9934 break; 9935 } 9936 bp->rsscos_nr_ctxs++; 9937 } 9938 if (i < nr_ctxs) 9939 return -ENOMEM; 9940 9941 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); 9942 if (rc) 9943 return rc; 9944 9945 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9946 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 9947 if (rc) { 9948 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9949 vnic->vnic_id, rc); 9950 } 9951 } 9952 return rc; 9953 } 9954 9955 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 9956 { 9957 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9958 return __bnxt_setup_vnic_p5(bp, vnic); 9959 else 9960 return __bnxt_setup_vnic(bp, vnic); 9961 } 9962 9963 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, 9964 struct bnxt_vnic_info *vnic, 9965 u16 start_rx_ring_idx, int rx_rings) 9966 { 9967 int rc; 9968 9969 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); 9970 if (rc) { 9971 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9972 vnic->vnic_id, rc); 9973 return rc; 9974 } 9975 return bnxt_setup_vnic(bp, vnic); 9976 } 9977 9978 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 9979 { 9980 struct bnxt_vnic_info *vnic; 9981 int i, rc = 0; 9982 9983 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 9984 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 9985 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); 9986 } 9987 9988 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9989 return 0; 9990 9991 for (i = 0; i < bp->rx_nr_rings; i++) { 9992 u16 vnic_id = i + 1; 9993 u16 ring_id = i; 9994 9995 if (vnic_id >= bp->nr_vnics) 9996 break; 9997 9998 vnic = &bp->vnic_info[vnic_id]; 9999 vnic->flags |= BNXT_VNIC_RFS_FLAG; 10000 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 10001 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 10002 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) 10003 break; 10004 } 10005 return rc; 10006 } 10007 10008 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, 10009 bool all) 10010 { 10011 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10012 struct bnxt_filter_base *usr_fltr, *tmp; 10013 struct bnxt_ntuple_filter *ntp_fltr; 10014 int i; 10015 10016 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); 10017 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { 10018 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) 10019 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); 10020 } 10021 if (!all) 10022 return; 10023 10024 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 10025 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && 10026 usr_fltr->fw_vnic_id == rss_ctx->index) { 10027 ntp_fltr = container_of(usr_fltr, 10028 struct bnxt_ntuple_filter, 10029 base); 10030 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); 10031 bnxt_del_ntp_filter(bp, ntp_fltr); 10032 bnxt_del_one_usr_fltr(bp, usr_fltr); 10033 } 10034 } 10035 10036 if (vnic->rss_table) 10037 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, 10038 vnic->rss_table, 10039 vnic->rss_table_dma_addr); 10040 kfree(rss_ctx->rss_indir_tbl); 10041 list_del(&rss_ctx->list); 10042 bp->num_rss_ctx--; 10043 clear_bit(rss_ctx->index, bp->rss_ctx_bmap); 10044 kfree(rss_ctx); 10045 } 10046 10047 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) 10048 { 10049 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); 10050 struct bnxt_rss_ctx *rss_ctx, *tmp; 10051 10052 list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { 10053 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10054 10055 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || 10056 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || 10057 __bnxt_setup_vnic_p5(bp, vnic)) { 10058 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", 10059 rss_ctx->index); 10060 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 10061 } 10062 } 10063 } 10064 10065 struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) 10066 { 10067 struct bnxt_rss_ctx *rss_ctx = NULL; 10068 10069 rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); 10070 if (rss_ctx) { 10071 rss_ctx->vnic.rss_ctx = rss_ctx; 10072 list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); 10073 bp->num_rss_ctx++; 10074 } 10075 return rss_ctx; 10076 } 10077 10078 void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) 10079 { 10080 struct bnxt_rss_ctx *rss_ctx, *tmp; 10081 10082 list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) 10083 bnxt_del_one_rss_ctx(bp, rss_ctx, all); 10084 10085 if (all) 10086 bitmap_free(bp->rss_ctx_bmap); 10087 } 10088 10089 static void bnxt_init_multi_rss_ctx(struct bnxt *bp) 10090 { 10091 bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); 10092 if (bp->rss_ctx_bmap) { 10093 /* burn index 0 since we cannot have context 0 */ 10094 __set_bit(0, bp->rss_ctx_bmap); 10095 INIT_LIST_HEAD(&bp->rss_ctx_list); 10096 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; 10097 } 10098 } 10099 10100 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 10101 static bool bnxt_promisc_ok(struct bnxt *bp) 10102 { 10103 #ifdef CONFIG_BNXT_SRIOV 10104 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 10105 return false; 10106 #endif 10107 return true; 10108 } 10109 10110 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 10111 { 10112 struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; 10113 unsigned int rc = 0; 10114 10115 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); 10116 if (rc) { 10117 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10118 rc); 10119 return rc; 10120 } 10121 10122 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10123 if (rc) { 10124 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10125 rc); 10126 return rc; 10127 } 10128 return rc; 10129 } 10130 10131 static int bnxt_cfg_rx_mode(struct bnxt *); 10132 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 10133 10134 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 10135 { 10136 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 10137 int rc = 0; 10138 unsigned int rx_nr_rings = bp->rx_nr_rings; 10139 10140 if (irq_re_init) { 10141 rc = bnxt_hwrm_stat_ctx_alloc(bp); 10142 if (rc) { 10143 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 10144 rc); 10145 goto err_out; 10146 } 10147 } 10148 10149 rc = bnxt_hwrm_ring_alloc(bp); 10150 if (rc) { 10151 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 10152 goto err_out; 10153 } 10154 10155 rc = bnxt_hwrm_ring_grp_alloc(bp); 10156 if (rc) { 10157 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 10158 goto err_out; 10159 } 10160 10161 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10162 rx_nr_rings--; 10163 10164 /* default vnic 0 */ 10165 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); 10166 if (rc) { 10167 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 10168 goto err_out; 10169 } 10170 10171 if (BNXT_VF(bp)) 10172 bnxt_hwrm_func_qcfg(bp); 10173 10174 rc = bnxt_setup_vnic(bp, vnic); 10175 if (rc) 10176 goto err_out; 10177 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 10178 bnxt_hwrm_update_rss_hash_cfg(bp); 10179 10180 if (bp->flags & BNXT_FLAG_RFS) { 10181 rc = bnxt_alloc_rfs_vnics(bp); 10182 if (rc) 10183 goto err_out; 10184 } 10185 10186 if (bp->flags & BNXT_FLAG_TPA) { 10187 rc = bnxt_set_tpa(bp, true); 10188 if (rc) 10189 goto err_out; 10190 } 10191 10192 if (BNXT_VF(bp)) 10193 bnxt_update_vf_mac(bp); 10194 10195 /* Filter for default vnic 0 */ 10196 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 10197 if (rc) { 10198 if (BNXT_VF(bp) && rc == -ENODEV) 10199 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 10200 else 10201 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 10202 goto err_out; 10203 } 10204 vnic->uc_filter_count = 1; 10205 10206 vnic->rx_mask = 0; 10207 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 10208 goto skip_rx_mask; 10209 10210 if (bp->dev->flags & IFF_BROADCAST) 10211 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 10212 10213 if (bp->dev->flags & IFF_PROMISC) 10214 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10215 10216 if (bp->dev->flags & IFF_ALLMULTI) { 10217 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10218 vnic->mc_list_count = 0; 10219 } else if (bp->dev->flags & IFF_MULTICAST) { 10220 u32 mask = 0; 10221 10222 bnxt_mc_list_updated(bp, &mask); 10223 vnic->rx_mask |= mask; 10224 } 10225 10226 rc = bnxt_cfg_rx_mode(bp); 10227 if (rc) 10228 goto err_out; 10229 10230 skip_rx_mask: 10231 rc = bnxt_hwrm_set_coal(bp); 10232 if (rc) 10233 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 10234 rc); 10235 10236 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10237 rc = bnxt_setup_nitroa0_vnic(bp); 10238 if (rc) 10239 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 10240 rc); 10241 } 10242 10243 if (BNXT_VF(bp)) { 10244 bnxt_hwrm_func_qcfg(bp); 10245 netdev_update_features(bp->dev); 10246 } 10247 10248 return 0; 10249 10250 err_out: 10251 bnxt_hwrm_resource_free(bp, 0, true); 10252 10253 return rc; 10254 } 10255 10256 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 10257 { 10258 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 10259 return 0; 10260 } 10261 10262 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 10263 { 10264 bnxt_init_cp_rings(bp); 10265 bnxt_init_rx_rings(bp); 10266 bnxt_init_tx_rings(bp); 10267 bnxt_init_ring_grps(bp, irq_re_init); 10268 bnxt_init_vnics(bp); 10269 10270 return bnxt_init_chip(bp, irq_re_init); 10271 } 10272 10273 static int bnxt_set_real_num_queues(struct bnxt *bp) 10274 { 10275 int rc; 10276 struct net_device *dev = bp->dev; 10277 10278 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 10279 bp->tx_nr_rings_xdp); 10280 if (rc) 10281 return rc; 10282 10283 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 10284 if (rc) 10285 return rc; 10286 10287 #ifdef CONFIG_RFS_ACCEL 10288 if (bp->flags & BNXT_FLAG_RFS) 10289 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 10290 #endif 10291 10292 return rc; 10293 } 10294 10295 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 10296 bool shared) 10297 { 10298 int _rx = *rx, _tx = *tx; 10299 10300 if (shared) { 10301 *rx = min_t(int, _rx, max); 10302 *tx = min_t(int, _tx, max); 10303 } else { 10304 if (max < 2) 10305 return -ENOMEM; 10306 10307 while (_rx + _tx > max) { 10308 if (_rx > _tx && _rx > 1) 10309 _rx--; 10310 else if (_tx > 1) 10311 _tx--; 10312 } 10313 *rx = _rx; 10314 *tx = _tx; 10315 } 10316 return 0; 10317 } 10318 10319 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 10320 { 10321 return (tx - tx_xdp) / tx_sets + tx_xdp; 10322 } 10323 10324 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 10325 { 10326 int tcs = bp->num_tc; 10327 10328 if (!tcs) 10329 tcs = 1; 10330 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 10331 } 10332 10333 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 10334 { 10335 int tcs = bp->num_tc; 10336 10337 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 10338 bp->tx_nr_rings_xdp; 10339 } 10340 10341 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 10342 bool sh) 10343 { 10344 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 10345 10346 if (tx_cp != *tx) { 10347 int tx_saved = tx_cp, rc; 10348 10349 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 10350 if (rc) 10351 return rc; 10352 if (tx_cp != tx_saved) 10353 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 10354 return 0; 10355 } 10356 return __bnxt_trim_rings(bp, rx, tx, max, sh); 10357 } 10358 10359 static void bnxt_setup_msix(struct bnxt *bp) 10360 { 10361 const int len = sizeof(bp->irq_tbl[0].name); 10362 struct net_device *dev = bp->dev; 10363 int tcs, i; 10364 10365 tcs = bp->num_tc; 10366 if (tcs) { 10367 int i, off, count; 10368 10369 for (i = 0; i < tcs; i++) { 10370 count = bp->tx_nr_rings_per_tc; 10371 off = BNXT_TC_TO_RING_BASE(bp, i); 10372 netdev_set_tc_queue(dev, i, count, off); 10373 } 10374 } 10375 10376 for (i = 0; i < bp->cp_nr_rings; i++) { 10377 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10378 char *attr; 10379 10380 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 10381 attr = "TxRx"; 10382 else if (i < bp->rx_nr_rings) 10383 attr = "rx"; 10384 else 10385 attr = "tx"; 10386 10387 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 10388 attr, i); 10389 bp->irq_tbl[map_idx].handler = bnxt_msix; 10390 } 10391 } 10392 10393 static void bnxt_setup_inta(struct bnxt *bp) 10394 { 10395 const int len = sizeof(bp->irq_tbl[0].name); 10396 10397 if (bp->num_tc) { 10398 netdev_reset_tc(bp->dev); 10399 bp->num_tc = 0; 10400 } 10401 10402 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 10403 0); 10404 bp->irq_tbl[0].handler = bnxt_inta; 10405 } 10406 10407 static int bnxt_init_int_mode(struct bnxt *bp); 10408 10409 static int bnxt_setup_int_mode(struct bnxt *bp) 10410 { 10411 int rc; 10412 10413 if (!bp->irq_tbl) { 10414 rc = bnxt_init_int_mode(bp); 10415 if (rc || !bp->irq_tbl) 10416 return rc ?: -ENODEV; 10417 } 10418 10419 if (bp->flags & BNXT_FLAG_USING_MSIX) 10420 bnxt_setup_msix(bp); 10421 else 10422 bnxt_setup_inta(bp); 10423 10424 rc = bnxt_set_real_num_queues(bp); 10425 return rc; 10426 } 10427 10428 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 10429 { 10430 return bp->hw_resc.max_rsscos_ctxs; 10431 } 10432 10433 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 10434 { 10435 return bp->hw_resc.max_vnics; 10436 } 10437 10438 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 10439 { 10440 return bp->hw_resc.max_stat_ctxs; 10441 } 10442 10443 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 10444 { 10445 return bp->hw_resc.max_cp_rings; 10446 } 10447 10448 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 10449 { 10450 unsigned int cp = bp->hw_resc.max_cp_rings; 10451 10452 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 10453 cp -= bnxt_get_ulp_msix_num(bp); 10454 10455 return cp; 10456 } 10457 10458 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 10459 { 10460 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 10461 10462 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10463 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 10464 10465 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 10466 } 10467 10468 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 10469 { 10470 bp->hw_resc.max_irqs = max_irqs; 10471 } 10472 10473 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 10474 { 10475 unsigned int cp; 10476 10477 cp = bnxt_get_max_func_cp_rings_for_en(bp); 10478 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10479 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 10480 else 10481 return cp - bp->cp_nr_rings; 10482 } 10483 10484 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 10485 { 10486 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 10487 } 10488 10489 static int bnxt_get_avail_msix(struct bnxt *bp, int num) 10490 { 10491 int max_irq = bnxt_get_max_func_irqs(bp); 10492 int total_req = bp->cp_nr_rings + num; 10493 10494 if (max_irq < total_req) { 10495 num = max_irq - bp->cp_nr_rings; 10496 if (num <= 0) 10497 return 0; 10498 } 10499 return num; 10500 } 10501 10502 static int bnxt_get_num_msix(struct bnxt *bp) 10503 { 10504 if (!BNXT_NEW_RM(bp)) 10505 return bnxt_get_max_func_irqs(bp); 10506 10507 return bnxt_nq_rings_in_use(bp); 10508 } 10509 10510 static int bnxt_init_msix(struct bnxt *bp) 10511 { 10512 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp; 10513 struct msix_entry *msix_ent; 10514 10515 total_vecs = bnxt_get_num_msix(bp); 10516 max = bnxt_get_max_func_irqs(bp); 10517 if (total_vecs > max) 10518 total_vecs = max; 10519 10520 if (!total_vecs) 10521 return 0; 10522 10523 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 10524 if (!msix_ent) 10525 return -ENOMEM; 10526 10527 for (i = 0; i < total_vecs; i++) { 10528 msix_ent[i].entry = i; 10529 msix_ent[i].vector = 0; 10530 } 10531 10532 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 10533 min = 2; 10534 10535 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 10536 ulp_msix = bnxt_get_ulp_msix_num(bp); 10537 if (total_vecs < 0 || total_vecs < ulp_msix) { 10538 rc = -ENODEV; 10539 goto msix_setup_exit; 10540 } 10541 10542 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 10543 if (bp->irq_tbl) { 10544 for (i = 0; i < total_vecs; i++) 10545 bp->irq_tbl[i].vector = msix_ent[i].vector; 10546 10547 bp->total_irqs = total_vecs; 10548 /* Trim rings based upon num of vectors allocated */ 10549 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 10550 total_vecs - ulp_msix, min == 1); 10551 if (rc) 10552 goto msix_setup_exit; 10553 10554 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 10555 bp->cp_nr_rings = (min == 1) ? 10556 max_t(int, tx_cp, bp->rx_nr_rings) : 10557 tx_cp + bp->rx_nr_rings; 10558 10559 } else { 10560 rc = -ENOMEM; 10561 goto msix_setup_exit; 10562 } 10563 bp->flags |= BNXT_FLAG_USING_MSIX; 10564 kfree(msix_ent); 10565 return 0; 10566 10567 msix_setup_exit: 10568 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 10569 kfree(bp->irq_tbl); 10570 bp->irq_tbl = NULL; 10571 pci_disable_msix(bp->pdev); 10572 kfree(msix_ent); 10573 return rc; 10574 } 10575 10576 static int bnxt_init_inta(struct bnxt *bp) 10577 { 10578 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL); 10579 if (!bp->irq_tbl) 10580 return -ENOMEM; 10581 10582 bp->total_irqs = 1; 10583 bp->rx_nr_rings = 1; 10584 bp->tx_nr_rings = 1; 10585 bp->cp_nr_rings = 1; 10586 bp->flags |= BNXT_FLAG_SHARED_RINGS; 10587 bp->irq_tbl[0].vector = bp->pdev->irq; 10588 return 0; 10589 } 10590 10591 static int bnxt_init_int_mode(struct bnxt *bp) 10592 { 10593 int rc = -ENODEV; 10594 10595 if (bp->flags & BNXT_FLAG_MSIX_CAP) 10596 rc = bnxt_init_msix(bp); 10597 10598 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 10599 /* fallback to INTA */ 10600 rc = bnxt_init_inta(bp); 10601 } 10602 return rc; 10603 } 10604 10605 static void bnxt_clear_int_mode(struct bnxt *bp) 10606 { 10607 if (bp->flags & BNXT_FLAG_USING_MSIX) 10608 pci_disable_msix(bp->pdev); 10609 10610 kfree(bp->irq_tbl); 10611 bp->irq_tbl = NULL; 10612 bp->flags &= ~BNXT_FLAG_USING_MSIX; 10613 } 10614 10615 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 10616 { 10617 bool irq_cleared = false; 10618 int tcs = bp->num_tc; 10619 int irqs_required; 10620 int rc; 10621 10622 if (!bnxt_need_reserve_rings(bp)) 10623 return 0; 10624 10625 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 10626 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 10627 10628 if (ulp_msix > bp->ulp_num_msix_want) 10629 ulp_msix = bp->ulp_num_msix_want; 10630 irqs_required = ulp_msix + bp->cp_nr_rings; 10631 } else { 10632 irqs_required = bnxt_get_num_msix(bp); 10633 } 10634 10635 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { 10636 bnxt_ulp_irq_stop(bp); 10637 bnxt_clear_int_mode(bp); 10638 irq_cleared = true; 10639 } 10640 rc = __bnxt_reserve_rings(bp); 10641 if (irq_cleared) { 10642 if (!rc) 10643 rc = bnxt_init_int_mode(bp); 10644 bnxt_ulp_irq_restart(bp, rc); 10645 } 10646 if (rc) { 10647 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 10648 return rc; 10649 } 10650 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 10651 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 10652 netdev_err(bp->dev, "tx ring reservation failure\n"); 10653 netdev_reset_tc(bp->dev); 10654 bp->num_tc = 0; 10655 if (bp->tx_nr_rings_xdp) 10656 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 10657 else 10658 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 10659 return -ENOMEM; 10660 } 10661 return 0; 10662 } 10663 10664 static void bnxt_free_irq(struct bnxt *bp) 10665 { 10666 struct bnxt_irq *irq; 10667 int i; 10668 10669 #ifdef CONFIG_RFS_ACCEL 10670 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 10671 bp->dev->rx_cpu_rmap = NULL; 10672 #endif 10673 if (!bp->irq_tbl || !bp->bnapi) 10674 return; 10675 10676 for (i = 0; i < bp->cp_nr_rings; i++) { 10677 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10678 10679 irq = &bp->irq_tbl[map_idx]; 10680 if (irq->requested) { 10681 if (irq->have_cpumask) { 10682 irq_set_affinity_hint(irq->vector, NULL); 10683 free_cpumask_var(irq->cpu_mask); 10684 irq->have_cpumask = 0; 10685 } 10686 free_irq(irq->vector, bp->bnapi[i]); 10687 } 10688 10689 irq->requested = 0; 10690 } 10691 } 10692 10693 static int bnxt_request_irq(struct bnxt *bp) 10694 { 10695 int i, j, rc = 0; 10696 unsigned long flags = 0; 10697 #ifdef CONFIG_RFS_ACCEL 10698 struct cpu_rmap *rmap; 10699 #endif 10700 10701 rc = bnxt_setup_int_mode(bp); 10702 if (rc) { 10703 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 10704 rc); 10705 return rc; 10706 } 10707 #ifdef CONFIG_RFS_ACCEL 10708 rmap = bp->dev->rx_cpu_rmap; 10709 #endif 10710 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 10711 flags = IRQF_SHARED; 10712 10713 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 10714 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10715 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 10716 10717 #ifdef CONFIG_RFS_ACCEL 10718 if (rmap && bp->bnapi[i]->rx_ring) { 10719 rc = irq_cpu_rmap_add(rmap, irq->vector); 10720 if (rc) 10721 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 10722 j); 10723 j++; 10724 } 10725 #endif 10726 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 10727 bp->bnapi[i]); 10728 if (rc) 10729 break; 10730 10731 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); 10732 irq->requested = 1; 10733 10734 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 10735 int numa_node = dev_to_node(&bp->pdev->dev); 10736 10737 irq->have_cpumask = 1; 10738 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 10739 irq->cpu_mask); 10740 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 10741 if (rc) { 10742 netdev_warn(bp->dev, 10743 "Set affinity failed, IRQ = %d\n", 10744 irq->vector); 10745 break; 10746 } 10747 } 10748 } 10749 return rc; 10750 } 10751 10752 static void bnxt_del_napi(struct bnxt *bp) 10753 { 10754 int i; 10755 10756 if (!bp->bnapi) 10757 return; 10758 10759 for (i = 0; i < bp->rx_nr_rings; i++) 10760 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); 10761 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) 10762 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); 10763 10764 for (i = 0; i < bp->cp_nr_rings; i++) { 10765 struct bnxt_napi *bnapi = bp->bnapi[i]; 10766 10767 __netif_napi_del(&bnapi->napi); 10768 } 10769 /* We called __netif_napi_del(), we need 10770 * to respect an RCU grace period before freeing napi structures. 10771 */ 10772 synchronize_net(); 10773 } 10774 10775 static void bnxt_init_napi(struct bnxt *bp) 10776 { 10777 int i; 10778 unsigned int cp_nr_rings = bp->cp_nr_rings; 10779 struct bnxt_napi *bnapi; 10780 10781 if (bp->flags & BNXT_FLAG_USING_MSIX) { 10782 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 10783 10784 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10785 poll_fn = bnxt_poll_p5; 10786 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10787 cp_nr_rings--; 10788 for (i = 0; i < cp_nr_rings; i++) { 10789 bnapi = bp->bnapi[i]; 10790 netif_napi_add(bp->dev, &bnapi->napi, poll_fn); 10791 } 10792 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10793 bnapi = bp->bnapi[cp_nr_rings]; 10794 netif_napi_add(bp->dev, &bnapi->napi, 10795 bnxt_poll_nitroa0); 10796 } 10797 } else { 10798 bnapi = bp->bnapi[0]; 10799 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll); 10800 } 10801 } 10802 10803 static void bnxt_disable_napi(struct bnxt *bp) 10804 { 10805 int i; 10806 10807 if (!bp->bnapi || 10808 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 10809 return; 10810 10811 for (i = 0; i < bp->cp_nr_rings; i++) { 10812 struct bnxt_napi *bnapi = bp->bnapi[i]; 10813 struct bnxt_cp_ring_info *cpr; 10814 10815 cpr = &bnapi->cp_ring; 10816 if (bnapi->tx_fault) 10817 cpr->sw_stats->tx.tx_resets++; 10818 if (bnapi->in_reset) 10819 cpr->sw_stats->rx.rx_resets++; 10820 napi_disable(&bnapi->napi); 10821 if (bnapi->rx_ring) 10822 cancel_work_sync(&cpr->dim.work); 10823 } 10824 } 10825 10826 static void bnxt_enable_napi(struct bnxt *bp) 10827 { 10828 int i; 10829 10830 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 10831 for (i = 0; i < bp->cp_nr_rings; i++) { 10832 struct bnxt_napi *bnapi = bp->bnapi[i]; 10833 struct bnxt_cp_ring_info *cpr; 10834 10835 bnapi->tx_fault = 0; 10836 10837 cpr = &bnapi->cp_ring; 10838 bnapi->in_reset = false; 10839 10840 if (bnapi->rx_ring) { 10841 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 10842 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 10843 } 10844 napi_enable(&bnapi->napi); 10845 } 10846 } 10847 10848 void bnxt_tx_disable(struct bnxt *bp) 10849 { 10850 int i; 10851 struct bnxt_tx_ring_info *txr; 10852 10853 if (bp->tx_ring) { 10854 for (i = 0; i < bp->tx_nr_rings; i++) { 10855 txr = &bp->tx_ring[i]; 10856 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 10857 } 10858 } 10859 /* Make sure napi polls see @dev_state change */ 10860 synchronize_net(); 10861 /* Drop carrier first to prevent TX timeout */ 10862 netif_carrier_off(bp->dev); 10863 /* Stop all TX queues */ 10864 netif_tx_disable(bp->dev); 10865 } 10866 10867 void bnxt_tx_enable(struct bnxt *bp) 10868 { 10869 int i; 10870 struct bnxt_tx_ring_info *txr; 10871 10872 for (i = 0; i < bp->tx_nr_rings; i++) { 10873 txr = &bp->tx_ring[i]; 10874 WRITE_ONCE(txr->dev_state, 0); 10875 } 10876 /* Make sure napi polls see @dev_state change */ 10877 synchronize_net(); 10878 netif_tx_wake_all_queues(bp->dev); 10879 if (BNXT_LINK_IS_UP(bp)) 10880 netif_carrier_on(bp->dev); 10881 } 10882 10883 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 10884 { 10885 u8 active_fec = link_info->active_fec_sig_mode & 10886 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 10887 10888 switch (active_fec) { 10889 default: 10890 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 10891 return "None"; 10892 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 10893 return "Clause 74 BaseR"; 10894 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 10895 return "Clause 91 RS(528,514)"; 10896 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 10897 return "Clause 91 RS544_1XN"; 10898 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 10899 return "Clause 91 RS(544,514)"; 10900 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 10901 return "Clause 91 RS272_1XN"; 10902 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 10903 return "Clause 91 RS(272,257)"; 10904 } 10905 } 10906 10907 void bnxt_report_link(struct bnxt *bp) 10908 { 10909 if (BNXT_LINK_IS_UP(bp)) { 10910 const char *signal = ""; 10911 const char *flow_ctrl; 10912 const char *duplex; 10913 u32 speed; 10914 u16 fec; 10915 10916 netif_carrier_on(bp->dev); 10917 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 10918 if (speed == SPEED_UNKNOWN) { 10919 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 10920 return; 10921 } 10922 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 10923 duplex = "full"; 10924 else 10925 duplex = "half"; 10926 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 10927 flow_ctrl = "ON - receive & transmit"; 10928 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 10929 flow_ctrl = "ON - transmit"; 10930 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 10931 flow_ctrl = "ON - receive"; 10932 else 10933 flow_ctrl = "none"; 10934 if (bp->link_info.phy_qcfg_resp.option_flags & 10935 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 10936 u8 sig_mode = bp->link_info.active_fec_sig_mode & 10937 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 10938 switch (sig_mode) { 10939 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 10940 signal = "(NRZ) "; 10941 break; 10942 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 10943 signal = "(PAM4 56Gbps) "; 10944 break; 10945 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: 10946 signal = "(PAM4 112Gbps) "; 10947 break; 10948 default: 10949 break; 10950 } 10951 } 10952 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 10953 speed, signal, duplex, flow_ctrl); 10954 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 10955 netdev_info(bp->dev, "EEE is %s\n", 10956 bp->eee.eee_active ? "active" : 10957 "not active"); 10958 fec = bp->link_info.fec_cfg; 10959 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 10960 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 10961 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 10962 bnxt_report_fec(&bp->link_info)); 10963 } else { 10964 netif_carrier_off(bp->dev); 10965 netdev_err(bp->dev, "NIC Link is Down\n"); 10966 } 10967 } 10968 10969 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 10970 { 10971 if (!resp->supported_speeds_auto_mode && 10972 !resp->supported_speeds_force_mode && 10973 !resp->supported_pam4_speeds_auto_mode && 10974 !resp->supported_pam4_speeds_force_mode && 10975 !resp->supported_speeds2_auto_mode && 10976 !resp->supported_speeds2_force_mode) 10977 return true; 10978 return false; 10979 } 10980 10981 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 10982 { 10983 struct bnxt_link_info *link_info = &bp->link_info; 10984 struct hwrm_port_phy_qcaps_output *resp; 10985 struct hwrm_port_phy_qcaps_input *req; 10986 int rc = 0; 10987 10988 if (bp->hwrm_spec_code < 0x10201) 10989 return 0; 10990 10991 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 10992 if (rc) 10993 return rc; 10994 10995 resp = hwrm_req_hold(bp, req); 10996 rc = hwrm_req_send(bp, req); 10997 if (rc) 10998 goto hwrm_phy_qcaps_exit; 10999 11000 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 11001 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 11002 struct ethtool_keee *eee = &bp->eee; 11003 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 11004 11005 _bnxt_fw_to_linkmode(eee->supported, fw_speeds); 11006 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 11007 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 11008 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 11009 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 11010 } 11011 11012 if (bp->hwrm_spec_code >= 0x10a01) { 11013 if (bnxt_phy_qcaps_no_speed(resp)) { 11014 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 11015 netdev_warn(bp->dev, "Ethernet link disabled\n"); 11016 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 11017 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 11018 netdev_info(bp->dev, "Ethernet link enabled\n"); 11019 /* Phy re-enabled, reprobe the speeds */ 11020 link_info->support_auto_speeds = 0; 11021 link_info->support_pam4_auto_speeds = 0; 11022 link_info->support_auto_speeds2 = 0; 11023 } 11024 } 11025 if (resp->supported_speeds_auto_mode) 11026 link_info->support_auto_speeds = 11027 le16_to_cpu(resp->supported_speeds_auto_mode); 11028 if (resp->supported_pam4_speeds_auto_mode) 11029 link_info->support_pam4_auto_speeds = 11030 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 11031 if (resp->supported_speeds2_auto_mode) 11032 link_info->support_auto_speeds2 = 11033 le16_to_cpu(resp->supported_speeds2_auto_mode); 11034 11035 bp->port_count = resp->port_cnt; 11036 11037 hwrm_phy_qcaps_exit: 11038 hwrm_req_drop(bp, req); 11039 return rc; 11040 } 11041 11042 static bool bnxt_support_dropped(u16 advertising, u16 supported) 11043 { 11044 u16 diff = advertising ^ supported; 11045 11046 return ((supported | diff) != supported); 11047 } 11048 11049 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 11050 { 11051 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 11052 11053 /* Check if any advertised speeds are no longer supported. The caller 11054 * holds the link_lock mutex, so we can modify link_info settings. 11055 */ 11056 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11057 if (bnxt_support_dropped(link_info->advertising, 11058 link_info->support_auto_speeds2)) { 11059 link_info->advertising = link_info->support_auto_speeds2; 11060 return true; 11061 } 11062 return false; 11063 } 11064 if (bnxt_support_dropped(link_info->advertising, 11065 link_info->support_auto_speeds)) { 11066 link_info->advertising = link_info->support_auto_speeds; 11067 return true; 11068 } 11069 if (bnxt_support_dropped(link_info->advertising_pam4, 11070 link_info->support_pam4_auto_speeds)) { 11071 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 11072 return true; 11073 } 11074 return false; 11075 } 11076 11077 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 11078 { 11079 struct bnxt_link_info *link_info = &bp->link_info; 11080 struct hwrm_port_phy_qcfg_output *resp; 11081 struct hwrm_port_phy_qcfg_input *req; 11082 u8 link_state = link_info->link_state; 11083 bool support_changed; 11084 int rc; 11085 11086 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 11087 if (rc) 11088 return rc; 11089 11090 resp = hwrm_req_hold(bp, req); 11091 rc = hwrm_req_send(bp, req); 11092 if (rc) { 11093 hwrm_req_drop(bp, req); 11094 if (BNXT_VF(bp) && rc == -ENODEV) { 11095 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 11096 rc = 0; 11097 } 11098 return rc; 11099 } 11100 11101 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 11102 link_info->phy_link_status = resp->link; 11103 link_info->duplex = resp->duplex_cfg; 11104 if (bp->hwrm_spec_code >= 0x10800) 11105 link_info->duplex = resp->duplex_state; 11106 link_info->pause = resp->pause; 11107 link_info->auto_mode = resp->auto_mode; 11108 link_info->auto_pause_setting = resp->auto_pause; 11109 link_info->lp_pause = resp->link_partner_adv_pause; 11110 link_info->force_pause_setting = resp->force_pause; 11111 link_info->duplex_setting = resp->duplex_cfg; 11112 if (link_info->phy_link_status == BNXT_LINK_LINK) { 11113 link_info->link_speed = le16_to_cpu(resp->link_speed); 11114 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 11115 link_info->active_lanes = resp->active_lanes; 11116 } else { 11117 link_info->link_speed = 0; 11118 link_info->active_lanes = 0; 11119 } 11120 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 11121 link_info->force_pam4_link_speed = 11122 le16_to_cpu(resp->force_pam4_link_speed); 11123 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); 11124 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 11125 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 11126 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); 11127 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 11128 link_info->auto_pam4_link_speeds = 11129 le16_to_cpu(resp->auto_pam4_link_speed_mask); 11130 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); 11131 link_info->lp_auto_link_speeds = 11132 le16_to_cpu(resp->link_partner_adv_speeds); 11133 link_info->lp_auto_pam4_link_speeds = 11134 resp->link_partner_pam4_adv_speeds; 11135 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 11136 link_info->phy_ver[0] = resp->phy_maj; 11137 link_info->phy_ver[1] = resp->phy_min; 11138 link_info->phy_ver[2] = resp->phy_bld; 11139 link_info->media_type = resp->media_type; 11140 link_info->phy_type = resp->phy_type; 11141 link_info->transceiver = resp->xcvr_pkg_type; 11142 link_info->phy_addr = resp->eee_config_phy_addr & 11143 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 11144 link_info->module_status = resp->module_status; 11145 11146 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 11147 struct ethtool_keee *eee = &bp->eee; 11148 u16 fw_speeds; 11149 11150 eee->eee_active = 0; 11151 if (resp->eee_config_phy_addr & 11152 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 11153 eee->eee_active = 1; 11154 fw_speeds = le16_to_cpu( 11155 resp->link_partner_adv_eee_link_speed_mask); 11156 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds); 11157 } 11158 11159 /* Pull initial EEE config */ 11160 if (!chng_link_state) { 11161 if (resp->eee_config_phy_addr & 11162 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 11163 eee->eee_enabled = 1; 11164 11165 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 11166 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds); 11167 11168 if (resp->eee_config_phy_addr & 11169 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 11170 __le32 tmr; 11171 11172 eee->tx_lpi_enabled = 1; 11173 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 11174 eee->tx_lpi_timer = le32_to_cpu(tmr) & 11175 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 11176 } 11177 } 11178 } 11179 11180 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 11181 if (bp->hwrm_spec_code >= 0x10504) { 11182 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 11183 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 11184 } 11185 /* TODO: need to add more logic to report VF link */ 11186 if (chng_link_state) { 11187 if (link_info->phy_link_status == BNXT_LINK_LINK) 11188 link_info->link_state = BNXT_LINK_STATE_UP; 11189 else 11190 link_info->link_state = BNXT_LINK_STATE_DOWN; 11191 if (link_state != link_info->link_state) 11192 bnxt_report_link(bp); 11193 } else { 11194 /* always link down if not require to update link state */ 11195 link_info->link_state = BNXT_LINK_STATE_DOWN; 11196 } 11197 hwrm_req_drop(bp, req); 11198 11199 if (!BNXT_PHY_CFG_ABLE(bp)) 11200 return 0; 11201 11202 support_changed = bnxt_support_speed_dropped(link_info); 11203 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 11204 bnxt_hwrm_set_link_setting(bp, true, false); 11205 return 0; 11206 } 11207 11208 static void bnxt_get_port_module_status(struct bnxt *bp) 11209 { 11210 struct bnxt_link_info *link_info = &bp->link_info; 11211 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 11212 u8 module_status; 11213 11214 if (bnxt_update_link(bp, true)) 11215 return; 11216 11217 module_status = link_info->module_status; 11218 switch (module_status) { 11219 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 11220 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 11221 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 11222 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 11223 bp->pf.port_id); 11224 if (bp->hwrm_spec_code >= 0x10201) { 11225 netdev_warn(bp->dev, "Module part number %s\n", 11226 resp->phy_vendor_partnumber); 11227 } 11228 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 11229 netdev_warn(bp->dev, "TX is disabled\n"); 11230 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 11231 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 11232 } 11233 } 11234 11235 static void 11236 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 11237 { 11238 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 11239 if (bp->hwrm_spec_code >= 0x10201) 11240 req->auto_pause = 11241 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 11242 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 11243 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 11244 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 11245 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 11246 req->enables |= 11247 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 11248 } else { 11249 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 11250 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 11251 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 11252 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 11253 req->enables |= 11254 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 11255 if (bp->hwrm_spec_code >= 0x10201) { 11256 req->auto_pause = req->force_pause; 11257 req->enables |= cpu_to_le32( 11258 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 11259 } 11260 } 11261 } 11262 11263 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 11264 { 11265 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 11266 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 11267 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11268 req->enables |= 11269 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); 11270 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); 11271 } else if (bp->link_info.advertising) { 11272 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 11273 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 11274 } 11275 if (bp->link_info.advertising_pam4) { 11276 req->enables |= 11277 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 11278 req->auto_link_pam4_speed_mask = 11279 cpu_to_le16(bp->link_info.advertising_pam4); 11280 } 11281 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 11282 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 11283 } else { 11284 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 11285 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11286 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); 11287 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); 11288 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", 11289 (u32)bp->link_info.req_link_speed); 11290 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 11291 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 11292 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 11293 } else { 11294 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 11295 } 11296 } 11297 11298 /* tell chimp that the setting takes effect immediately */ 11299 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 11300 } 11301 11302 int bnxt_hwrm_set_pause(struct bnxt *bp) 11303 { 11304 struct hwrm_port_phy_cfg_input *req; 11305 int rc; 11306 11307 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 11308 if (rc) 11309 return rc; 11310 11311 bnxt_hwrm_set_pause_common(bp, req); 11312 11313 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 11314 bp->link_info.force_link_chng) 11315 bnxt_hwrm_set_link_common(bp, req); 11316 11317 rc = hwrm_req_send(bp, req); 11318 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 11319 /* since changing of pause setting doesn't trigger any link 11320 * change event, the driver needs to update the current pause 11321 * result upon successfully return of the phy_cfg command 11322 */ 11323 bp->link_info.pause = 11324 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 11325 bp->link_info.auto_pause_setting = 0; 11326 if (!bp->link_info.force_link_chng) 11327 bnxt_report_link(bp); 11328 } 11329 bp->link_info.force_link_chng = false; 11330 return rc; 11331 } 11332 11333 static void bnxt_hwrm_set_eee(struct bnxt *bp, 11334 struct hwrm_port_phy_cfg_input *req) 11335 { 11336 struct ethtool_keee *eee = &bp->eee; 11337 11338 if (eee->eee_enabled) { 11339 u16 eee_speeds; 11340 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 11341 11342 if (eee->tx_lpi_enabled) 11343 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 11344 else 11345 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 11346 11347 req->flags |= cpu_to_le32(flags); 11348 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 11349 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 11350 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 11351 } else { 11352 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 11353 } 11354 } 11355 11356 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 11357 { 11358 struct hwrm_port_phy_cfg_input *req; 11359 int rc; 11360 11361 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 11362 if (rc) 11363 return rc; 11364 11365 if (set_pause) 11366 bnxt_hwrm_set_pause_common(bp, req); 11367 11368 bnxt_hwrm_set_link_common(bp, req); 11369 11370 if (set_eee) 11371 bnxt_hwrm_set_eee(bp, req); 11372 return hwrm_req_send(bp, req); 11373 } 11374 11375 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 11376 { 11377 struct hwrm_port_phy_cfg_input *req; 11378 int rc; 11379 11380 if (!BNXT_SINGLE_PF(bp)) 11381 return 0; 11382 11383 if (pci_num_vf(bp->pdev) && 11384 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 11385 return 0; 11386 11387 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 11388 if (rc) 11389 return rc; 11390 11391 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 11392 rc = hwrm_req_send(bp, req); 11393 if (!rc) { 11394 mutex_lock(&bp->link_lock); 11395 /* Device is not obliged link down in certain scenarios, even 11396 * when forced. Setting the state unknown is consistent with 11397 * driver startup and will force link state to be reported 11398 * during subsequent open based on PORT_PHY_QCFG. 11399 */ 11400 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 11401 mutex_unlock(&bp->link_lock); 11402 } 11403 return rc; 11404 } 11405 11406 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 11407 { 11408 #ifdef CONFIG_TEE_BNXT_FW 11409 int rc = tee_bnxt_fw_load(); 11410 11411 if (rc) 11412 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 11413 11414 return rc; 11415 #else 11416 netdev_err(bp->dev, "OP-TEE not supported\n"); 11417 return -ENODEV; 11418 #endif 11419 } 11420 11421 static int bnxt_try_recover_fw(struct bnxt *bp) 11422 { 11423 if (bp->fw_health && bp->fw_health->status_reliable) { 11424 int retry = 0, rc; 11425 u32 sts; 11426 11427 do { 11428 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11429 rc = bnxt_hwrm_poll(bp); 11430 if (!BNXT_FW_IS_BOOTING(sts) && 11431 !BNXT_FW_IS_RECOVERING(sts)) 11432 break; 11433 retry++; 11434 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 11435 11436 if (!BNXT_FW_IS_HEALTHY(sts)) { 11437 netdev_err(bp->dev, 11438 "Firmware not responding, status: 0x%x\n", 11439 sts); 11440 rc = -ENODEV; 11441 } 11442 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 11443 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 11444 return bnxt_fw_reset_via_optee(bp); 11445 } 11446 return rc; 11447 } 11448 11449 return -ENODEV; 11450 } 11451 11452 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 11453 { 11454 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11455 11456 if (!BNXT_NEW_RM(bp)) 11457 return; /* no resource reservations required */ 11458 11459 hw_resc->resv_cp_rings = 0; 11460 hw_resc->resv_stat_ctxs = 0; 11461 hw_resc->resv_irqs = 0; 11462 hw_resc->resv_tx_rings = 0; 11463 hw_resc->resv_rx_rings = 0; 11464 hw_resc->resv_hw_ring_grps = 0; 11465 hw_resc->resv_vnics = 0; 11466 hw_resc->resv_rsscos_ctxs = 0; 11467 if (!fw_reset) { 11468 bp->tx_nr_rings = 0; 11469 bp->rx_nr_rings = 0; 11470 } 11471 } 11472 11473 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 11474 { 11475 int rc; 11476 11477 if (!BNXT_NEW_RM(bp)) 11478 return 0; /* no resource reservations required */ 11479 11480 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 11481 if (rc) 11482 netdev_err(bp->dev, "resc_qcaps failed\n"); 11483 11484 bnxt_clear_reservations(bp, fw_reset); 11485 11486 return rc; 11487 } 11488 11489 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 11490 { 11491 struct hwrm_func_drv_if_change_output *resp; 11492 struct hwrm_func_drv_if_change_input *req; 11493 bool fw_reset = !bp->irq_tbl; 11494 bool resc_reinit = false; 11495 int rc, retry = 0; 11496 u32 flags = 0; 11497 11498 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 11499 return 0; 11500 11501 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 11502 if (rc) 11503 return rc; 11504 11505 if (up) 11506 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 11507 resp = hwrm_req_hold(bp, req); 11508 11509 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 11510 while (retry < BNXT_FW_IF_RETRY) { 11511 rc = hwrm_req_send(bp, req); 11512 if (rc != -EAGAIN) 11513 break; 11514 11515 msleep(50); 11516 retry++; 11517 } 11518 11519 if (rc == -EAGAIN) { 11520 hwrm_req_drop(bp, req); 11521 return rc; 11522 } else if (!rc) { 11523 flags = le32_to_cpu(resp->flags); 11524 } else if (up) { 11525 rc = bnxt_try_recover_fw(bp); 11526 fw_reset = true; 11527 } 11528 hwrm_req_drop(bp, req); 11529 if (rc) 11530 return rc; 11531 11532 if (!up) { 11533 bnxt_inv_fw_health_reg(bp); 11534 return 0; 11535 } 11536 11537 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 11538 resc_reinit = true; 11539 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 11540 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 11541 fw_reset = true; 11542 else 11543 bnxt_remap_fw_health_regs(bp); 11544 11545 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 11546 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 11547 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11548 return -ENODEV; 11549 } 11550 if (resc_reinit || fw_reset) { 11551 if (fw_reset) { 11552 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11553 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11554 bnxt_ulp_irq_stop(bp); 11555 bnxt_free_ctx_mem(bp); 11556 bnxt_dcb_free(bp); 11557 rc = bnxt_fw_init_one(bp); 11558 if (rc) { 11559 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11560 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11561 return rc; 11562 } 11563 bnxt_clear_int_mode(bp); 11564 rc = bnxt_init_int_mode(bp); 11565 if (rc) { 11566 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11567 netdev_err(bp->dev, "init int mode failed\n"); 11568 return rc; 11569 } 11570 } 11571 rc = bnxt_cancel_reservations(bp, fw_reset); 11572 } 11573 return rc; 11574 } 11575 11576 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 11577 { 11578 struct hwrm_port_led_qcaps_output *resp; 11579 struct hwrm_port_led_qcaps_input *req; 11580 struct bnxt_pf_info *pf = &bp->pf; 11581 int rc; 11582 11583 bp->num_leds = 0; 11584 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 11585 return 0; 11586 11587 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 11588 if (rc) 11589 return rc; 11590 11591 req->port_id = cpu_to_le16(pf->port_id); 11592 resp = hwrm_req_hold(bp, req); 11593 rc = hwrm_req_send(bp, req); 11594 if (rc) { 11595 hwrm_req_drop(bp, req); 11596 return rc; 11597 } 11598 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 11599 int i; 11600 11601 bp->num_leds = resp->num_leds; 11602 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 11603 bp->num_leds); 11604 for (i = 0; i < bp->num_leds; i++) { 11605 struct bnxt_led_info *led = &bp->leds[i]; 11606 __le16 caps = led->led_state_caps; 11607 11608 if (!led->led_group_id || 11609 !BNXT_LED_ALT_BLINK_CAP(caps)) { 11610 bp->num_leds = 0; 11611 break; 11612 } 11613 } 11614 } 11615 hwrm_req_drop(bp, req); 11616 return 0; 11617 } 11618 11619 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 11620 { 11621 struct hwrm_wol_filter_alloc_output *resp; 11622 struct hwrm_wol_filter_alloc_input *req; 11623 int rc; 11624 11625 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 11626 if (rc) 11627 return rc; 11628 11629 req->port_id = cpu_to_le16(bp->pf.port_id); 11630 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 11631 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 11632 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 11633 11634 resp = hwrm_req_hold(bp, req); 11635 rc = hwrm_req_send(bp, req); 11636 if (!rc) 11637 bp->wol_filter_id = resp->wol_filter_id; 11638 hwrm_req_drop(bp, req); 11639 return rc; 11640 } 11641 11642 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 11643 { 11644 struct hwrm_wol_filter_free_input *req; 11645 int rc; 11646 11647 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 11648 if (rc) 11649 return rc; 11650 11651 req->port_id = cpu_to_le16(bp->pf.port_id); 11652 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 11653 req->wol_filter_id = bp->wol_filter_id; 11654 11655 return hwrm_req_send(bp, req); 11656 } 11657 11658 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 11659 { 11660 struct hwrm_wol_filter_qcfg_output *resp; 11661 struct hwrm_wol_filter_qcfg_input *req; 11662 u16 next_handle = 0; 11663 int rc; 11664 11665 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 11666 if (rc) 11667 return rc; 11668 11669 req->port_id = cpu_to_le16(bp->pf.port_id); 11670 req->handle = cpu_to_le16(handle); 11671 resp = hwrm_req_hold(bp, req); 11672 rc = hwrm_req_send(bp, req); 11673 if (!rc) { 11674 next_handle = le16_to_cpu(resp->next_handle); 11675 if (next_handle != 0) { 11676 if (resp->wol_type == 11677 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 11678 bp->wol = 1; 11679 bp->wol_filter_id = resp->wol_filter_id; 11680 } 11681 } 11682 } 11683 hwrm_req_drop(bp, req); 11684 return next_handle; 11685 } 11686 11687 static void bnxt_get_wol_settings(struct bnxt *bp) 11688 { 11689 u16 handle = 0; 11690 11691 bp->wol = 0; 11692 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 11693 return; 11694 11695 do { 11696 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 11697 } while (handle && handle != 0xffff); 11698 } 11699 11700 static bool bnxt_eee_config_ok(struct bnxt *bp) 11701 { 11702 struct ethtool_keee *eee = &bp->eee; 11703 struct bnxt_link_info *link_info = &bp->link_info; 11704 11705 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 11706 return true; 11707 11708 if (eee->eee_enabled) { 11709 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 11710 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 11711 11712 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 11713 11714 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 11715 eee->eee_enabled = 0; 11716 return false; 11717 } 11718 if (linkmode_andnot(tmp, eee->advertised, advertising)) { 11719 linkmode_and(eee->advertised, advertising, 11720 eee->supported); 11721 return false; 11722 } 11723 } 11724 return true; 11725 } 11726 11727 static int bnxt_update_phy_setting(struct bnxt *bp) 11728 { 11729 int rc; 11730 bool update_link = false; 11731 bool update_pause = false; 11732 bool update_eee = false; 11733 struct bnxt_link_info *link_info = &bp->link_info; 11734 11735 rc = bnxt_update_link(bp, true); 11736 if (rc) { 11737 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 11738 rc); 11739 return rc; 11740 } 11741 if (!BNXT_SINGLE_PF(bp)) 11742 return 0; 11743 11744 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 11745 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 11746 link_info->req_flow_ctrl) 11747 update_pause = true; 11748 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 11749 link_info->force_pause_setting != link_info->req_flow_ctrl) 11750 update_pause = true; 11751 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 11752 if (BNXT_AUTO_MODE(link_info->auto_mode)) 11753 update_link = true; 11754 if (bnxt_force_speed_updated(link_info)) 11755 update_link = true; 11756 if (link_info->req_duplex != link_info->duplex_setting) 11757 update_link = true; 11758 } else { 11759 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 11760 update_link = true; 11761 if (bnxt_auto_speed_updated(link_info)) 11762 update_link = true; 11763 } 11764 11765 /* The last close may have shutdown the link, so need to call 11766 * PHY_CFG to bring it back up. 11767 */ 11768 if (!BNXT_LINK_IS_UP(bp)) 11769 update_link = true; 11770 11771 if (!bnxt_eee_config_ok(bp)) 11772 update_eee = true; 11773 11774 if (update_link) 11775 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 11776 else if (update_pause) 11777 rc = bnxt_hwrm_set_pause(bp); 11778 if (rc) { 11779 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 11780 rc); 11781 return rc; 11782 } 11783 11784 return rc; 11785 } 11786 11787 /* Common routine to pre-map certain register block to different GRC window. 11788 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 11789 * in PF and 3 windows in VF that can be customized to map in different 11790 * register blocks. 11791 */ 11792 static void bnxt_preset_reg_win(struct bnxt *bp) 11793 { 11794 if (BNXT_PF(bp)) { 11795 /* CAG registers map to GRC window #4 */ 11796 writel(BNXT_CAG_REG_BASE, 11797 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 11798 } 11799 } 11800 11801 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 11802 11803 static int bnxt_reinit_after_abort(struct bnxt *bp) 11804 { 11805 int rc; 11806 11807 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11808 return -EBUSY; 11809 11810 if (bp->dev->reg_state == NETREG_UNREGISTERED) 11811 return -ENODEV; 11812 11813 rc = bnxt_fw_init_one(bp); 11814 if (!rc) { 11815 bnxt_clear_int_mode(bp); 11816 rc = bnxt_init_int_mode(bp); 11817 if (!rc) { 11818 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11819 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11820 } 11821 } 11822 return rc; 11823 } 11824 11825 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 11826 { 11827 struct bnxt_ntuple_filter *ntp_fltr; 11828 struct bnxt_l2_filter *l2_fltr; 11829 11830 if (list_empty(&fltr->list)) 11831 return; 11832 11833 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { 11834 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); 11835 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 11836 atomic_inc(&l2_fltr->refcnt); 11837 ntp_fltr->l2_fltr = l2_fltr; 11838 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { 11839 bnxt_del_ntp_filter(bp, ntp_fltr); 11840 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", 11841 fltr->sw_id); 11842 } 11843 } else if (fltr->type == BNXT_FLTR_TYPE_L2) { 11844 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); 11845 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { 11846 bnxt_del_l2_filter(bp, l2_fltr); 11847 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", 11848 fltr->sw_id); 11849 } 11850 } 11851 } 11852 11853 static void bnxt_cfg_usr_fltrs(struct bnxt *bp) 11854 { 11855 struct bnxt_filter_base *usr_fltr, *tmp; 11856 11857 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) 11858 bnxt_cfg_one_usr_fltr(bp, usr_fltr); 11859 } 11860 11861 static int bnxt_set_xps_mapping(struct bnxt *bp) 11862 { 11863 int numa_node = dev_to_node(&bp->pdev->dev); 11864 unsigned int q_idx, map_idx, cpu, i; 11865 const struct cpumask *cpu_mask_ptr; 11866 int nr_cpus = num_online_cpus(); 11867 cpumask_t *q_map; 11868 int rc = 0; 11869 11870 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); 11871 if (!q_map) 11872 return -ENOMEM; 11873 11874 /* Create CPU mask for all TX queues across MQPRIO traffic classes. 11875 * Each TC has the same number of TX queues. The nth TX queue for each 11876 * TC will have the same CPU mask. 11877 */ 11878 for (i = 0; i < nr_cpus; i++) { 11879 map_idx = i % bp->tx_nr_rings_per_tc; 11880 cpu = cpumask_local_spread(i, numa_node); 11881 cpu_mask_ptr = get_cpu_mask(cpu); 11882 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); 11883 } 11884 11885 /* Register CPU mask for each TX queue except the ones marked for XDP */ 11886 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { 11887 map_idx = q_idx % bp->tx_nr_rings_per_tc; 11888 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); 11889 if (rc) { 11890 netdev_warn(bp->dev, "Error setting XPS for q:%d\n", 11891 q_idx); 11892 break; 11893 } 11894 } 11895 11896 kfree(q_map); 11897 11898 return rc; 11899 } 11900 11901 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11902 { 11903 int rc = 0; 11904 11905 bnxt_preset_reg_win(bp); 11906 netif_carrier_off(bp->dev); 11907 if (irq_re_init) { 11908 /* Reserve rings now if none were reserved at driver probe. */ 11909 rc = bnxt_init_dflt_ring_mode(bp); 11910 if (rc) { 11911 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 11912 return rc; 11913 } 11914 } 11915 rc = bnxt_reserve_rings(bp, irq_re_init); 11916 if (rc) 11917 return rc; 11918 if ((bp->flags & BNXT_FLAG_RFS) && 11919 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 11920 /* disable RFS if falling back to INTA */ 11921 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 11922 bp->flags &= ~BNXT_FLAG_RFS; 11923 } 11924 11925 rc = bnxt_alloc_mem(bp, irq_re_init); 11926 if (rc) { 11927 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 11928 goto open_err_free_mem; 11929 } 11930 11931 if (irq_re_init) { 11932 bnxt_init_napi(bp); 11933 rc = bnxt_request_irq(bp); 11934 if (rc) { 11935 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 11936 goto open_err_irq; 11937 } 11938 } 11939 11940 rc = bnxt_init_nic(bp, irq_re_init); 11941 if (rc) { 11942 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 11943 goto open_err_irq; 11944 } 11945 11946 bnxt_enable_napi(bp); 11947 bnxt_debug_dev_init(bp); 11948 11949 if (link_re_init) { 11950 mutex_lock(&bp->link_lock); 11951 rc = bnxt_update_phy_setting(bp); 11952 mutex_unlock(&bp->link_lock); 11953 if (rc) { 11954 netdev_warn(bp->dev, "failed to update phy settings\n"); 11955 if (BNXT_SINGLE_PF(bp)) { 11956 bp->link_info.phy_retry = true; 11957 bp->link_info.phy_retry_expires = 11958 jiffies + 5 * HZ; 11959 } 11960 } 11961 } 11962 11963 if (irq_re_init) { 11964 udp_tunnel_nic_reset_ntf(bp->dev); 11965 rc = bnxt_set_xps_mapping(bp); 11966 if (rc) 11967 netdev_warn(bp->dev, "failed to set xps mapping\n"); 11968 } 11969 11970 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 11971 if (!static_key_enabled(&bnxt_xdp_locking_key)) 11972 static_branch_enable(&bnxt_xdp_locking_key); 11973 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 11974 static_branch_disable(&bnxt_xdp_locking_key); 11975 } 11976 set_bit(BNXT_STATE_OPEN, &bp->state); 11977 bnxt_enable_int(bp); 11978 /* Enable TX queues */ 11979 bnxt_tx_enable(bp); 11980 mod_timer(&bp->timer, jiffies + bp->current_interval); 11981 /* Poll link status and check for SFP+ module status */ 11982 mutex_lock(&bp->link_lock); 11983 bnxt_get_port_module_status(bp); 11984 mutex_unlock(&bp->link_lock); 11985 11986 /* VF-reps may need to be re-opened after the PF is re-opened */ 11987 if (BNXT_PF(bp)) 11988 bnxt_vf_reps_open(bp); 11989 if (bp->ptp_cfg) 11990 atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); 11991 bnxt_ptp_init_rtc(bp, true); 11992 bnxt_ptp_cfg_tstamp_filters(bp); 11993 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 11994 bnxt_hwrm_realloc_rss_ctx_vnic(bp); 11995 bnxt_cfg_usr_fltrs(bp); 11996 return 0; 11997 11998 open_err_irq: 11999 bnxt_del_napi(bp); 12000 12001 open_err_free_mem: 12002 bnxt_free_skbs(bp); 12003 bnxt_free_irq(bp); 12004 bnxt_free_mem(bp, true); 12005 return rc; 12006 } 12007 12008 /* rtnl_lock held */ 12009 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12010 { 12011 int rc = 0; 12012 12013 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 12014 rc = -EIO; 12015 if (!rc) 12016 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 12017 if (rc) { 12018 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 12019 dev_close(bp->dev); 12020 } 12021 return rc; 12022 } 12023 12024 /* rtnl_lock held, open the NIC half way by allocating all resources, but 12025 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 12026 * self tests. 12027 */ 12028 int bnxt_half_open_nic(struct bnxt *bp) 12029 { 12030 int rc = 0; 12031 12032 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12033 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 12034 rc = -ENODEV; 12035 goto half_open_err; 12036 } 12037 12038 rc = bnxt_alloc_mem(bp, true); 12039 if (rc) { 12040 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12041 goto half_open_err; 12042 } 12043 bnxt_init_napi(bp); 12044 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12045 rc = bnxt_init_nic(bp, true); 12046 if (rc) { 12047 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12048 bnxt_del_napi(bp); 12049 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12050 goto half_open_err; 12051 } 12052 return 0; 12053 12054 half_open_err: 12055 bnxt_free_skbs(bp); 12056 bnxt_free_mem(bp, true); 12057 dev_close(bp->dev); 12058 return rc; 12059 } 12060 12061 /* rtnl_lock held, this call can only be made after a previous successful 12062 * call to bnxt_half_open_nic(). 12063 */ 12064 void bnxt_half_close_nic(struct bnxt *bp) 12065 { 12066 bnxt_hwrm_resource_free(bp, false, true); 12067 bnxt_del_napi(bp); 12068 bnxt_free_skbs(bp); 12069 bnxt_free_mem(bp, true); 12070 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12071 } 12072 12073 void bnxt_reenable_sriov(struct bnxt *bp) 12074 { 12075 if (BNXT_PF(bp)) { 12076 struct bnxt_pf_info *pf = &bp->pf; 12077 int n = pf->active_vfs; 12078 12079 if (n) 12080 bnxt_cfg_hw_sriov(bp, &n, true); 12081 } 12082 } 12083 12084 static int bnxt_open(struct net_device *dev) 12085 { 12086 struct bnxt *bp = netdev_priv(dev); 12087 int rc; 12088 12089 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12090 rc = bnxt_reinit_after_abort(bp); 12091 if (rc) { 12092 if (rc == -EBUSY) 12093 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 12094 else 12095 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 12096 return -ENODEV; 12097 } 12098 } 12099 12100 rc = bnxt_hwrm_if_change(bp, true); 12101 if (rc) 12102 return rc; 12103 12104 rc = __bnxt_open_nic(bp, true, true); 12105 if (rc) { 12106 bnxt_hwrm_if_change(bp, false); 12107 } else { 12108 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 12109 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12110 bnxt_queue_sp_work(bp, 12111 BNXT_RESTART_ULP_SP_EVENT); 12112 } 12113 } 12114 12115 return rc; 12116 } 12117 12118 static bool bnxt_drv_busy(struct bnxt *bp) 12119 { 12120 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 12121 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 12122 } 12123 12124 static void bnxt_get_ring_stats(struct bnxt *bp, 12125 struct rtnl_link_stats64 *stats); 12126 12127 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 12128 bool link_re_init) 12129 { 12130 /* Close the VF-reps before closing PF */ 12131 if (BNXT_PF(bp)) 12132 bnxt_vf_reps_close(bp); 12133 12134 /* Change device state to avoid TX queue wake up's */ 12135 bnxt_tx_disable(bp); 12136 12137 clear_bit(BNXT_STATE_OPEN, &bp->state); 12138 smp_mb__after_atomic(); 12139 while (bnxt_drv_busy(bp)) 12140 msleep(20); 12141 12142 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12143 bnxt_clear_rss_ctxs(bp, false); 12144 /* Flush rings and disable interrupts */ 12145 bnxt_shutdown_nic(bp, irq_re_init); 12146 12147 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 12148 12149 bnxt_debug_dev_exit(bp); 12150 bnxt_disable_napi(bp); 12151 del_timer_sync(&bp->timer); 12152 bnxt_free_skbs(bp); 12153 12154 /* Save ring stats before shutdown */ 12155 if (bp->bnapi && irq_re_init) { 12156 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 12157 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 12158 } 12159 if (irq_re_init) { 12160 bnxt_free_irq(bp); 12161 bnxt_del_napi(bp); 12162 } 12163 bnxt_free_mem(bp, irq_re_init); 12164 } 12165 12166 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12167 { 12168 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12169 /* If we get here, it means firmware reset is in progress 12170 * while we are trying to close. We can safely proceed with 12171 * the close because we are holding rtnl_lock(). Some firmware 12172 * messages may fail as we proceed to close. We set the 12173 * ABORT_ERR flag here so that the FW reset thread will later 12174 * abort when it gets the rtnl_lock() and sees the flag. 12175 */ 12176 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 12177 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12178 } 12179 12180 #ifdef CONFIG_BNXT_SRIOV 12181 if (bp->sriov_cfg) { 12182 int rc; 12183 12184 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 12185 !bp->sriov_cfg, 12186 BNXT_SRIOV_CFG_WAIT_TMO); 12187 if (!rc) 12188 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); 12189 else if (rc < 0) 12190 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); 12191 } 12192 #endif 12193 __bnxt_close_nic(bp, irq_re_init, link_re_init); 12194 } 12195 12196 static int bnxt_close(struct net_device *dev) 12197 { 12198 struct bnxt *bp = netdev_priv(dev); 12199 12200 bnxt_close_nic(bp, true, true); 12201 bnxt_hwrm_shutdown_link(bp); 12202 bnxt_hwrm_if_change(bp, false); 12203 return 0; 12204 } 12205 12206 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 12207 u16 *val) 12208 { 12209 struct hwrm_port_phy_mdio_read_output *resp; 12210 struct hwrm_port_phy_mdio_read_input *req; 12211 int rc; 12212 12213 if (bp->hwrm_spec_code < 0x10a00) 12214 return -EOPNOTSUPP; 12215 12216 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 12217 if (rc) 12218 return rc; 12219 12220 req->port_id = cpu_to_le16(bp->pf.port_id); 12221 req->phy_addr = phy_addr; 12222 req->reg_addr = cpu_to_le16(reg & 0x1f); 12223 if (mdio_phy_id_is_c45(phy_addr)) { 12224 req->cl45_mdio = 1; 12225 req->phy_addr = mdio_phy_id_prtad(phy_addr); 12226 req->dev_addr = mdio_phy_id_devad(phy_addr); 12227 req->reg_addr = cpu_to_le16(reg); 12228 } 12229 12230 resp = hwrm_req_hold(bp, req); 12231 rc = hwrm_req_send(bp, req); 12232 if (!rc) 12233 *val = le16_to_cpu(resp->reg_data); 12234 hwrm_req_drop(bp, req); 12235 return rc; 12236 } 12237 12238 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 12239 u16 val) 12240 { 12241 struct hwrm_port_phy_mdio_write_input *req; 12242 int rc; 12243 12244 if (bp->hwrm_spec_code < 0x10a00) 12245 return -EOPNOTSUPP; 12246 12247 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 12248 if (rc) 12249 return rc; 12250 12251 req->port_id = cpu_to_le16(bp->pf.port_id); 12252 req->phy_addr = phy_addr; 12253 req->reg_addr = cpu_to_le16(reg & 0x1f); 12254 if (mdio_phy_id_is_c45(phy_addr)) { 12255 req->cl45_mdio = 1; 12256 req->phy_addr = mdio_phy_id_prtad(phy_addr); 12257 req->dev_addr = mdio_phy_id_devad(phy_addr); 12258 req->reg_addr = cpu_to_le16(reg); 12259 } 12260 req->reg_data = cpu_to_le16(val); 12261 12262 return hwrm_req_send(bp, req); 12263 } 12264 12265 /* rtnl_lock held */ 12266 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 12267 { 12268 struct mii_ioctl_data *mdio = if_mii(ifr); 12269 struct bnxt *bp = netdev_priv(dev); 12270 int rc; 12271 12272 switch (cmd) { 12273 case SIOCGMIIPHY: 12274 mdio->phy_id = bp->link_info.phy_addr; 12275 12276 fallthrough; 12277 case SIOCGMIIREG: { 12278 u16 mii_regval = 0; 12279 12280 if (!netif_running(dev)) 12281 return -EAGAIN; 12282 12283 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 12284 &mii_regval); 12285 mdio->val_out = mii_regval; 12286 return rc; 12287 } 12288 12289 case SIOCSMIIREG: 12290 if (!netif_running(dev)) 12291 return -EAGAIN; 12292 12293 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 12294 mdio->val_in); 12295 12296 case SIOCSHWTSTAMP: 12297 return bnxt_hwtstamp_set(dev, ifr); 12298 12299 case SIOCGHWTSTAMP: 12300 return bnxt_hwtstamp_get(dev, ifr); 12301 12302 default: 12303 /* do nothing */ 12304 break; 12305 } 12306 return -EOPNOTSUPP; 12307 } 12308 12309 static void bnxt_get_ring_stats(struct bnxt *bp, 12310 struct rtnl_link_stats64 *stats) 12311 { 12312 int i; 12313 12314 for (i = 0; i < bp->cp_nr_rings; i++) { 12315 struct bnxt_napi *bnapi = bp->bnapi[i]; 12316 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 12317 u64 *sw = cpr->stats.sw_stats; 12318 12319 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 12320 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 12321 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 12322 12323 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 12324 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 12325 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 12326 12327 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 12328 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 12329 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 12330 12331 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 12332 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 12333 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 12334 12335 stats->rx_missed_errors += 12336 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 12337 12338 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 12339 12340 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 12341 12342 stats->rx_dropped += 12343 cpr->sw_stats->rx.rx_netpoll_discards + 12344 cpr->sw_stats->rx.rx_oom_discards; 12345 } 12346 } 12347 12348 static void bnxt_add_prev_stats(struct bnxt *bp, 12349 struct rtnl_link_stats64 *stats) 12350 { 12351 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 12352 12353 stats->rx_packets += prev_stats->rx_packets; 12354 stats->tx_packets += prev_stats->tx_packets; 12355 stats->rx_bytes += prev_stats->rx_bytes; 12356 stats->tx_bytes += prev_stats->tx_bytes; 12357 stats->rx_missed_errors += prev_stats->rx_missed_errors; 12358 stats->multicast += prev_stats->multicast; 12359 stats->rx_dropped += prev_stats->rx_dropped; 12360 stats->tx_dropped += prev_stats->tx_dropped; 12361 } 12362 12363 static void 12364 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 12365 { 12366 struct bnxt *bp = netdev_priv(dev); 12367 12368 set_bit(BNXT_STATE_READ_STATS, &bp->state); 12369 /* Make sure bnxt_close_nic() sees that we are reading stats before 12370 * we check the BNXT_STATE_OPEN flag. 12371 */ 12372 smp_mb__after_atomic(); 12373 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12374 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 12375 *stats = bp->net_stats_prev; 12376 return; 12377 } 12378 12379 bnxt_get_ring_stats(bp, stats); 12380 bnxt_add_prev_stats(bp, stats); 12381 12382 if (bp->flags & BNXT_FLAG_PORT_STATS) { 12383 u64 *rx = bp->port_stats.sw_stats; 12384 u64 *tx = bp->port_stats.sw_stats + 12385 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 12386 12387 stats->rx_crc_errors = 12388 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 12389 stats->rx_frame_errors = 12390 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 12391 stats->rx_length_errors = 12392 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 12393 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 12394 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 12395 stats->rx_errors = 12396 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 12397 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 12398 stats->collisions = 12399 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 12400 stats->tx_fifo_errors = 12401 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 12402 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 12403 } 12404 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 12405 } 12406 12407 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 12408 struct bnxt_total_ring_err_stats *stats, 12409 struct bnxt_cp_ring_info *cpr) 12410 { 12411 struct bnxt_sw_stats *sw_stats = cpr->sw_stats; 12412 u64 *hw_stats = cpr->stats.sw_stats; 12413 12414 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 12415 stats->rx_total_resets += sw_stats->rx.rx_resets; 12416 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 12417 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 12418 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 12419 stats->rx_total_ring_discards += 12420 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 12421 stats->tx_total_resets += sw_stats->tx.tx_resets; 12422 stats->tx_total_ring_discards += 12423 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 12424 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 12425 } 12426 12427 void bnxt_get_ring_err_stats(struct bnxt *bp, 12428 struct bnxt_total_ring_err_stats *stats) 12429 { 12430 int i; 12431 12432 for (i = 0; i < bp->cp_nr_rings; i++) 12433 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 12434 } 12435 12436 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 12437 { 12438 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12439 struct net_device *dev = bp->dev; 12440 struct netdev_hw_addr *ha; 12441 u8 *haddr; 12442 int mc_count = 0; 12443 bool update = false; 12444 int off = 0; 12445 12446 netdev_for_each_mc_addr(ha, dev) { 12447 if (mc_count >= BNXT_MAX_MC_ADDRS) { 12448 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12449 vnic->mc_list_count = 0; 12450 return false; 12451 } 12452 haddr = ha->addr; 12453 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 12454 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 12455 update = true; 12456 } 12457 off += ETH_ALEN; 12458 mc_count++; 12459 } 12460 if (mc_count) 12461 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 12462 12463 if (mc_count != vnic->mc_list_count) { 12464 vnic->mc_list_count = mc_count; 12465 update = true; 12466 } 12467 return update; 12468 } 12469 12470 static bool bnxt_uc_list_updated(struct bnxt *bp) 12471 { 12472 struct net_device *dev = bp->dev; 12473 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12474 struct netdev_hw_addr *ha; 12475 int off = 0; 12476 12477 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 12478 return true; 12479 12480 netdev_for_each_uc_addr(ha, dev) { 12481 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 12482 return true; 12483 12484 off += ETH_ALEN; 12485 } 12486 return false; 12487 } 12488 12489 static void bnxt_set_rx_mode(struct net_device *dev) 12490 { 12491 struct bnxt *bp = netdev_priv(dev); 12492 struct bnxt_vnic_info *vnic; 12493 bool mc_update = false; 12494 bool uc_update; 12495 u32 mask; 12496 12497 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 12498 return; 12499 12500 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12501 mask = vnic->rx_mask; 12502 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 12503 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 12504 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 12505 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 12506 12507 if (dev->flags & IFF_PROMISC) 12508 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12509 12510 uc_update = bnxt_uc_list_updated(bp); 12511 12512 if (dev->flags & IFF_BROADCAST) 12513 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 12514 if (dev->flags & IFF_ALLMULTI) { 12515 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12516 vnic->mc_list_count = 0; 12517 } else if (dev->flags & IFF_MULTICAST) { 12518 mc_update = bnxt_mc_list_updated(bp, &mask); 12519 } 12520 12521 if (mask != vnic->rx_mask || uc_update || mc_update) { 12522 vnic->rx_mask = mask; 12523 12524 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 12525 } 12526 } 12527 12528 static int bnxt_cfg_rx_mode(struct bnxt *bp) 12529 { 12530 struct net_device *dev = bp->dev; 12531 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12532 struct netdev_hw_addr *ha; 12533 int i, off = 0, rc; 12534 bool uc_update; 12535 12536 netif_addr_lock_bh(dev); 12537 uc_update = bnxt_uc_list_updated(bp); 12538 netif_addr_unlock_bh(dev); 12539 12540 if (!uc_update) 12541 goto skip_uc; 12542 12543 for (i = 1; i < vnic->uc_filter_count; i++) { 12544 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; 12545 12546 bnxt_hwrm_l2_filter_free(bp, fltr); 12547 bnxt_del_l2_filter(bp, fltr); 12548 } 12549 12550 vnic->uc_filter_count = 1; 12551 12552 netif_addr_lock_bh(dev); 12553 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 12554 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12555 } else { 12556 netdev_for_each_uc_addr(ha, dev) { 12557 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 12558 off += ETH_ALEN; 12559 vnic->uc_filter_count++; 12560 } 12561 } 12562 netif_addr_unlock_bh(dev); 12563 12564 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 12565 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 12566 if (rc) { 12567 if (BNXT_VF(bp) && rc == -ENODEV) { 12568 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12569 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 12570 else 12571 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 12572 rc = 0; 12573 } else { 12574 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 12575 } 12576 vnic->uc_filter_count = i; 12577 return rc; 12578 } 12579 } 12580 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12581 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 12582 12583 skip_uc: 12584 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 12585 !bnxt_promisc_ok(bp)) 12586 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12587 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 12588 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 12589 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 12590 rc); 12591 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 12592 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12593 vnic->mc_list_count = 0; 12594 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 12595 } 12596 if (rc) 12597 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 12598 rc); 12599 12600 return rc; 12601 } 12602 12603 static bool bnxt_can_reserve_rings(struct bnxt *bp) 12604 { 12605 #ifdef CONFIG_BNXT_SRIOV 12606 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 12607 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12608 12609 /* No minimum rings were provisioned by the PF. Don't 12610 * reserve rings by default when device is down. 12611 */ 12612 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 12613 return true; 12614 12615 if (!netif_running(bp->dev)) 12616 return false; 12617 } 12618 #endif 12619 return true; 12620 } 12621 12622 /* If the chip and firmware supports RFS */ 12623 static bool bnxt_rfs_supported(struct bnxt *bp) 12624 { 12625 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 12626 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 12627 return true; 12628 return false; 12629 } 12630 /* 212 firmware is broken for aRFS */ 12631 if (BNXT_FW_MAJ(bp) == 212) 12632 return false; 12633 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 12634 return true; 12635 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 12636 return true; 12637 return false; 12638 } 12639 12640 /* If runtime conditions support RFS */ 12641 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) 12642 { 12643 struct bnxt_hw_rings hwr = {0}; 12644 int max_vnics, max_rss_ctxs; 12645 12646 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 12647 !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 12648 return bnxt_rfs_supported(bp); 12649 12650 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 12651 return false; 12652 12653 hwr.grp = bp->rx_nr_rings; 12654 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); 12655 if (new_rss_ctx) 12656 hwr.vnic++; 12657 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 12658 max_vnics = bnxt_get_max_func_vnics(bp); 12659 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 12660 12661 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { 12662 if (bp->rx_nr_rings > 1) 12663 netdev_warn(bp->dev, 12664 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 12665 min(max_rss_ctxs - 1, max_vnics - 1)); 12666 return false; 12667 } 12668 12669 if (!BNXT_NEW_RM(bp)) 12670 return true; 12671 12672 if (hwr.vnic == bp->hw_resc.resv_vnics && 12673 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 12674 return true; 12675 12676 bnxt_hwrm_reserve_rings(bp, &hwr); 12677 if (hwr.vnic <= bp->hw_resc.resv_vnics && 12678 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 12679 return true; 12680 12681 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 12682 hwr.vnic = 1; 12683 hwr.rss_ctx = 0; 12684 bnxt_hwrm_reserve_rings(bp, &hwr); 12685 return false; 12686 } 12687 12688 static netdev_features_t bnxt_fix_features(struct net_device *dev, 12689 netdev_features_t features) 12690 { 12691 struct bnxt *bp = netdev_priv(dev); 12692 netdev_features_t vlan_features; 12693 12694 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) 12695 features &= ~NETIF_F_NTUPLE; 12696 12697 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 12698 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12699 12700 if (!(features & NETIF_F_GRO)) 12701 features &= ~NETIF_F_GRO_HW; 12702 12703 if (features & NETIF_F_GRO_HW) 12704 features &= ~NETIF_F_LRO; 12705 12706 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 12707 * turned on or off together. 12708 */ 12709 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 12710 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 12711 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 12712 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 12713 else if (vlan_features) 12714 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 12715 } 12716 #ifdef CONFIG_BNXT_SRIOV 12717 if (BNXT_VF(bp) && bp->vf.vlan) 12718 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 12719 #endif 12720 return features; 12721 } 12722 12723 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, 12724 bool link_re_init, u32 flags, bool update_tpa) 12725 { 12726 bnxt_close_nic(bp, irq_re_init, link_re_init); 12727 bp->flags = flags; 12728 if (update_tpa) 12729 bnxt_set_ring_params(bp); 12730 return bnxt_open_nic(bp, irq_re_init, link_re_init); 12731 } 12732 12733 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 12734 { 12735 bool update_tpa = false, update_ntuple = false; 12736 struct bnxt *bp = netdev_priv(dev); 12737 u32 flags = bp->flags; 12738 u32 changes; 12739 int rc = 0; 12740 bool re_init = false; 12741 12742 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 12743 if (features & NETIF_F_GRO_HW) 12744 flags |= BNXT_FLAG_GRO; 12745 else if (features & NETIF_F_LRO) 12746 flags |= BNXT_FLAG_LRO; 12747 12748 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 12749 flags &= ~BNXT_FLAG_TPA; 12750 12751 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 12752 flags |= BNXT_FLAG_STRIP_VLAN; 12753 12754 if (features & NETIF_F_NTUPLE) 12755 flags |= BNXT_FLAG_RFS; 12756 else 12757 bnxt_clear_usr_fltrs(bp, true); 12758 12759 changes = flags ^ bp->flags; 12760 if (changes & BNXT_FLAG_TPA) { 12761 update_tpa = true; 12762 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 12763 (flags & BNXT_FLAG_TPA) == 0 || 12764 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 12765 re_init = true; 12766 } 12767 12768 if (changes & ~BNXT_FLAG_TPA) 12769 re_init = true; 12770 12771 if (changes & BNXT_FLAG_RFS) 12772 update_ntuple = true; 12773 12774 if (flags != bp->flags) { 12775 u32 old_flags = bp->flags; 12776 12777 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12778 bp->flags = flags; 12779 if (update_tpa) 12780 bnxt_set_ring_params(bp); 12781 return rc; 12782 } 12783 12784 if (update_ntuple) 12785 return bnxt_reinit_features(bp, true, false, flags, update_tpa); 12786 12787 if (re_init) 12788 return bnxt_reinit_features(bp, false, false, flags, update_tpa); 12789 12790 if (update_tpa) { 12791 bp->flags = flags; 12792 rc = bnxt_set_tpa(bp, 12793 (flags & BNXT_FLAG_TPA) ? 12794 true : false); 12795 if (rc) 12796 bp->flags = old_flags; 12797 } 12798 } 12799 return rc; 12800 } 12801 12802 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 12803 u8 **nextp) 12804 { 12805 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 12806 struct hop_jumbo_hdr *jhdr; 12807 int hdr_count = 0; 12808 u8 *nexthdr; 12809 int start; 12810 12811 /* Check that there are at most 2 IPv6 extension headers, no 12812 * fragment header, and each is <= 64 bytes. 12813 */ 12814 start = nw_off + sizeof(*ip6h); 12815 nexthdr = &ip6h->nexthdr; 12816 while (ipv6_ext_hdr(*nexthdr)) { 12817 struct ipv6_opt_hdr *hp; 12818 int hdrlen; 12819 12820 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 12821 *nexthdr == NEXTHDR_FRAGMENT) 12822 return false; 12823 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 12824 skb_headlen(skb), NULL); 12825 if (!hp) 12826 return false; 12827 if (*nexthdr == NEXTHDR_AUTH) 12828 hdrlen = ipv6_authlen(hp); 12829 else 12830 hdrlen = ipv6_optlen(hp); 12831 12832 if (hdrlen > 64) 12833 return false; 12834 12835 /* The ext header may be a hop-by-hop header inserted for 12836 * big TCP purposes. This will be removed before sending 12837 * from NIC, so do not count it. 12838 */ 12839 if (*nexthdr == NEXTHDR_HOP) { 12840 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 12841 goto increment_hdr; 12842 12843 jhdr = (struct hop_jumbo_hdr *)hp; 12844 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 12845 jhdr->nexthdr != IPPROTO_TCP) 12846 goto increment_hdr; 12847 12848 goto next_hdr; 12849 } 12850 increment_hdr: 12851 hdr_count++; 12852 next_hdr: 12853 nexthdr = &hp->nexthdr; 12854 start += hdrlen; 12855 } 12856 if (nextp) { 12857 /* Caller will check inner protocol */ 12858 if (skb->encapsulation) { 12859 *nextp = nexthdr; 12860 return true; 12861 } 12862 *nextp = NULL; 12863 } 12864 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 12865 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 12866 } 12867 12868 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 12869 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 12870 { 12871 struct udphdr *uh = udp_hdr(skb); 12872 __be16 udp_port = uh->dest; 12873 12874 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && 12875 udp_port != bp->vxlan_gpe_port) 12876 return false; 12877 if (skb->inner_protocol == htons(ETH_P_TEB)) { 12878 struct ethhdr *eh = inner_eth_hdr(skb); 12879 12880 switch (eh->h_proto) { 12881 case htons(ETH_P_IP): 12882 return true; 12883 case htons(ETH_P_IPV6): 12884 return bnxt_exthdr_check(bp, skb, 12885 skb_inner_network_offset(skb), 12886 NULL); 12887 } 12888 } else if (skb->inner_protocol == htons(ETH_P_IP)) { 12889 return true; 12890 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { 12891 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 12892 NULL); 12893 } 12894 return false; 12895 } 12896 12897 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 12898 { 12899 switch (l4_proto) { 12900 case IPPROTO_UDP: 12901 return bnxt_udp_tunl_check(bp, skb); 12902 case IPPROTO_IPIP: 12903 return true; 12904 case IPPROTO_GRE: { 12905 switch (skb->inner_protocol) { 12906 default: 12907 return false; 12908 case htons(ETH_P_IP): 12909 return true; 12910 case htons(ETH_P_IPV6): 12911 fallthrough; 12912 } 12913 } 12914 case IPPROTO_IPV6: 12915 /* Check ext headers of inner ipv6 */ 12916 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 12917 NULL); 12918 } 12919 return false; 12920 } 12921 12922 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 12923 struct net_device *dev, 12924 netdev_features_t features) 12925 { 12926 struct bnxt *bp = netdev_priv(dev); 12927 u8 *l4_proto; 12928 12929 features = vlan_features_check(skb, features); 12930 switch (vlan_get_protocol(skb)) { 12931 case htons(ETH_P_IP): 12932 if (!skb->encapsulation) 12933 return features; 12934 l4_proto = &ip_hdr(skb)->protocol; 12935 if (bnxt_tunl_check(bp, skb, *l4_proto)) 12936 return features; 12937 break; 12938 case htons(ETH_P_IPV6): 12939 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 12940 &l4_proto)) 12941 break; 12942 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 12943 return features; 12944 break; 12945 } 12946 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 12947 } 12948 12949 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 12950 u32 *reg_buf) 12951 { 12952 struct hwrm_dbg_read_direct_output *resp; 12953 struct hwrm_dbg_read_direct_input *req; 12954 __le32 *dbg_reg_buf; 12955 dma_addr_t mapping; 12956 int rc, i; 12957 12958 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 12959 if (rc) 12960 return rc; 12961 12962 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 12963 &mapping); 12964 if (!dbg_reg_buf) { 12965 rc = -ENOMEM; 12966 goto dbg_rd_reg_exit; 12967 } 12968 12969 req->host_dest_addr = cpu_to_le64(mapping); 12970 12971 resp = hwrm_req_hold(bp, req); 12972 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 12973 req->read_len32 = cpu_to_le32(num_words); 12974 12975 rc = hwrm_req_send(bp, req); 12976 if (rc || resp->error_code) { 12977 rc = -EIO; 12978 goto dbg_rd_reg_exit; 12979 } 12980 for (i = 0; i < num_words; i++) 12981 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 12982 12983 dbg_rd_reg_exit: 12984 hwrm_req_drop(bp, req); 12985 return rc; 12986 } 12987 12988 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 12989 u32 ring_id, u32 *prod, u32 *cons) 12990 { 12991 struct hwrm_dbg_ring_info_get_output *resp; 12992 struct hwrm_dbg_ring_info_get_input *req; 12993 int rc; 12994 12995 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 12996 if (rc) 12997 return rc; 12998 12999 req->ring_type = ring_type; 13000 req->fw_ring_id = cpu_to_le32(ring_id); 13001 resp = hwrm_req_hold(bp, req); 13002 rc = hwrm_req_send(bp, req); 13003 if (!rc) { 13004 *prod = le32_to_cpu(resp->producer_index); 13005 *cons = le32_to_cpu(resp->consumer_index); 13006 } 13007 hwrm_req_drop(bp, req); 13008 return rc; 13009 } 13010 13011 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 13012 { 13013 struct bnxt_tx_ring_info *txr; 13014 int i = bnapi->index, j; 13015 13016 bnxt_for_each_napi_tx(j, bnapi, txr) 13017 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 13018 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 13019 txr->tx_cons); 13020 } 13021 13022 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 13023 { 13024 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 13025 int i = bnapi->index; 13026 13027 if (!rxr) 13028 return; 13029 13030 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 13031 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 13032 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 13033 rxr->rx_sw_agg_prod); 13034 } 13035 13036 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 13037 { 13038 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13039 int i = bnapi->index; 13040 13041 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 13042 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 13043 } 13044 13045 static void bnxt_dbg_dump_states(struct bnxt *bp) 13046 { 13047 int i; 13048 struct bnxt_napi *bnapi; 13049 13050 for (i = 0; i < bp->cp_nr_rings; i++) { 13051 bnapi = bp->bnapi[i]; 13052 if (netif_msg_drv(bp)) { 13053 bnxt_dump_tx_sw_state(bnapi); 13054 bnxt_dump_rx_sw_state(bnapi); 13055 bnxt_dump_cp_sw_state(bnapi); 13056 } 13057 } 13058 } 13059 13060 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 13061 { 13062 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 13063 struct hwrm_ring_reset_input *req; 13064 struct bnxt_napi *bnapi = rxr->bnapi; 13065 struct bnxt_cp_ring_info *cpr; 13066 u16 cp_ring_id; 13067 int rc; 13068 13069 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 13070 if (rc) 13071 return rc; 13072 13073 cpr = &bnapi->cp_ring; 13074 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 13075 req->cmpl_ring = cpu_to_le16(cp_ring_id); 13076 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 13077 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 13078 return hwrm_req_send_silent(bp, req); 13079 } 13080 13081 static void bnxt_reset_task(struct bnxt *bp, bool silent) 13082 { 13083 if (!silent) 13084 bnxt_dbg_dump_states(bp); 13085 if (netif_running(bp->dev)) { 13086 bnxt_close_nic(bp, !silent, false); 13087 bnxt_open_nic(bp, !silent, false); 13088 } 13089 } 13090 13091 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 13092 { 13093 struct bnxt *bp = netdev_priv(dev); 13094 13095 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 13096 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 13097 } 13098 13099 static void bnxt_fw_health_check(struct bnxt *bp) 13100 { 13101 struct bnxt_fw_health *fw_health = bp->fw_health; 13102 struct pci_dev *pdev = bp->pdev; 13103 u32 val; 13104 13105 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 13106 return; 13107 13108 /* Make sure it is enabled before checking the tmr_counter. */ 13109 smp_rmb(); 13110 if (fw_health->tmr_counter) { 13111 fw_health->tmr_counter--; 13112 return; 13113 } 13114 13115 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 13116 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 13117 fw_health->arrests++; 13118 goto fw_reset; 13119 } 13120 13121 fw_health->last_fw_heartbeat = val; 13122 13123 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13124 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 13125 fw_health->discoveries++; 13126 goto fw_reset; 13127 } 13128 13129 fw_health->tmr_counter = fw_health->tmr_multiplier; 13130 return; 13131 13132 fw_reset: 13133 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 13134 } 13135 13136 static void bnxt_timer(struct timer_list *t) 13137 { 13138 struct bnxt *bp = from_timer(bp, t, timer); 13139 struct net_device *dev = bp->dev; 13140 13141 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 13142 return; 13143 13144 if (atomic_read(&bp->intr_sem) != 0) 13145 goto bnxt_restart_timer; 13146 13147 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 13148 bnxt_fw_health_check(bp); 13149 13150 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 13151 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 13152 13153 if (bnxt_tc_flower_enabled(bp)) 13154 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 13155 13156 #ifdef CONFIG_RFS_ACCEL 13157 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 13158 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 13159 #endif /*CONFIG_RFS_ACCEL*/ 13160 13161 if (bp->link_info.phy_retry) { 13162 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 13163 bp->link_info.phy_retry = false; 13164 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 13165 } else { 13166 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 13167 } 13168 } 13169 13170 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13171 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 13172 13173 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 13174 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 13175 13176 bnxt_restart_timer: 13177 mod_timer(&bp->timer, jiffies + bp->current_interval); 13178 } 13179 13180 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 13181 { 13182 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 13183 * set. If the device is being closed, bnxt_close() may be holding 13184 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 13185 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 13186 */ 13187 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13188 rtnl_lock(); 13189 } 13190 13191 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 13192 { 13193 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13194 rtnl_unlock(); 13195 } 13196 13197 /* Only called from bnxt_sp_task() */ 13198 static void bnxt_reset(struct bnxt *bp, bool silent) 13199 { 13200 bnxt_rtnl_lock_sp(bp); 13201 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 13202 bnxt_reset_task(bp, silent); 13203 bnxt_rtnl_unlock_sp(bp); 13204 } 13205 13206 /* Only called from bnxt_sp_task() */ 13207 static void bnxt_rx_ring_reset(struct bnxt *bp) 13208 { 13209 int i; 13210 13211 bnxt_rtnl_lock_sp(bp); 13212 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13213 bnxt_rtnl_unlock_sp(bp); 13214 return; 13215 } 13216 /* Disable and flush TPA before resetting the RX ring */ 13217 if (bp->flags & BNXT_FLAG_TPA) 13218 bnxt_set_tpa(bp, false); 13219 for (i = 0; i < bp->rx_nr_rings; i++) { 13220 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 13221 struct bnxt_cp_ring_info *cpr; 13222 int rc; 13223 13224 if (!rxr->bnapi->in_reset) 13225 continue; 13226 13227 rc = bnxt_hwrm_rx_ring_reset(bp, i); 13228 if (rc) { 13229 if (rc == -EINVAL || rc == -EOPNOTSUPP) 13230 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 13231 else 13232 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 13233 rc); 13234 bnxt_reset_task(bp, true); 13235 break; 13236 } 13237 bnxt_free_one_rx_ring_skbs(bp, i); 13238 rxr->rx_prod = 0; 13239 rxr->rx_agg_prod = 0; 13240 rxr->rx_sw_agg_prod = 0; 13241 rxr->rx_next_cons = 0; 13242 rxr->bnapi->in_reset = false; 13243 bnxt_alloc_one_rx_ring(bp, i); 13244 cpr = &rxr->bnapi->cp_ring; 13245 cpr->sw_stats->rx.rx_resets++; 13246 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13247 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 13248 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 13249 } 13250 if (bp->flags & BNXT_FLAG_TPA) 13251 bnxt_set_tpa(bp, true); 13252 bnxt_rtnl_unlock_sp(bp); 13253 } 13254 13255 static void bnxt_fw_fatal_close(struct bnxt *bp) 13256 { 13257 bnxt_tx_disable(bp); 13258 bnxt_disable_napi(bp); 13259 bnxt_disable_int_sync(bp); 13260 bnxt_free_irq(bp); 13261 bnxt_clear_int_mode(bp); 13262 pci_disable_device(bp->pdev); 13263 } 13264 13265 static void bnxt_fw_reset_close(struct bnxt *bp) 13266 { 13267 /* When firmware is in fatal state, quiesce device and disable 13268 * bus master to prevent any potential bad DMAs before freeing 13269 * kernel memory. 13270 */ 13271 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 13272 u16 val = 0; 13273 13274 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 13275 if (val == 0xffff) 13276 bp->fw_reset_min_dsecs = 0; 13277 bnxt_fw_fatal_close(bp); 13278 } 13279 __bnxt_close_nic(bp, true, false); 13280 bnxt_vf_reps_free(bp); 13281 bnxt_clear_int_mode(bp); 13282 bnxt_hwrm_func_drv_unrgtr(bp); 13283 if (pci_is_enabled(bp->pdev)) 13284 pci_disable_device(bp->pdev); 13285 bnxt_free_ctx_mem(bp); 13286 } 13287 13288 static bool is_bnxt_fw_ok(struct bnxt *bp) 13289 { 13290 struct bnxt_fw_health *fw_health = bp->fw_health; 13291 bool no_heartbeat = false, has_reset = false; 13292 u32 val; 13293 13294 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 13295 if (val == fw_health->last_fw_heartbeat) 13296 no_heartbeat = true; 13297 13298 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13299 if (val != fw_health->last_fw_reset_cnt) 13300 has_reset = true; 13301 13302 if (!no_heartbeat && has_reset) 13303 return true; 13304 13305 return false; 13306 } 13307 13308 /* rtnl_lock is acquired before calling this function */ 13309 static void bnxt_force_fw_reset(struct bnxt *bp) 13310 { 13311 struct bnxt_fw_health *fw_health = bp->fw_health; 13312 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 13313 u32 wait_dsecs; 13314 13315 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 13316 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 13317 return; 13318 13319 if (ptp) { 13320 spin_lock_bh(&ptp->ptp_lock); 13321 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13322 spin_unlock_bh(&ptp->ptp_lock); 13323 } else { 13324 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13325 } 13326 bnxt_fw_reset_close(bp); 13327 wait_dsecs = fw_health->master_func_wait_dsecs; 13328 if (fw_health->primary) { 13329 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 13330 wait_dsecs = 0; 13331 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 13332 } else { 13333 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 13334 wait_dsecs = fw_health->normal_func_wait_dsecs; 13335 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13336 } 13337 13338 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 13339 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 13340 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 13341 } 13342 13343 void bnxt_fw_exception(struct bnxt *bp) 13344 { 13345 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 13346 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 13347 bnxt_ulp_stop(bp); 13348 bnxt_rtnl_lock_sp(bp); 13349 bnxt_force_fw_reset(bp); 13350 bnxt_rtnl_unlock_sp(bp); 13351 } 13352 13353 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 13354 * < 0 on error. 13355 */ 13356 static int bnxt_get_registered_vfs(struct bnxt *bp) 13357 { 13358 #ifdef CONFIG_BNXT_SRIOV 13359 int rc; 13360 13361 if (!BNXT_PF(bp)) 13362 return 0; 13363 13364 rc = bnxt_hwrm_func_qcfg(bp); 13365 if (rc) { 13366 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 13367 return rc; 13368 } 13369 if (bp->pf.registered_vfs) 13370 return bp->pf.registered_vfs; 13371 if (bp->sriov_cfg) 13372 return 1; 13373 #endif 13374 return 0; 13375 } 13376 13377 void bnxt_fw_reset(struct bnxt *bp) 13378 { 13379 bnxt_ulp_stop(bp); 13380 bnxt_rtnl_lock_sp(bp); 13381 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 13382 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 13383 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 13384 int n = 0, tmo; 13385 13386 if (ptp) { 13387 spin_lock_bh(&ptp->ptp_lock); 13388 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13389 spin_unlock_bh(&ptp->ptp_lock); 13390 } else { 13391 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13392 } 13393 if (bp->pf.active_vfs && 13394 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 13395 n = bnxt_get_registered_vfs(bp); 13396 if (n < 0) { 13397 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 13398 n); 13399 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13400 dev_close(bp->dev); 13401 goto fw_reset_exit; 13402 } else if (n > 0) { 13403 u16 vf_tmo_dsecs = n * 10; 13404 13405 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 13406 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 13407 bp->fw_reset_state = 13408 BNXT_FW_RESET_STATE_POLL_VF; 13409 bnxt_queue_fw_reset_work(bp, HZ / 10); 13410 goto fw_reset_exit; 13411 } 13412 bnxt_fw_reset_close(bp); 13413 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 13414 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 13415 tmo = HZ / 10; 13416 } else { 13417 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13418 tmo = bp->fw_reset_min_dsecs * HZ / 10; 13419 } 13420 bnxt_queue_fw_reset_work(bp, tmo); 13421 } 13422 fw_reset_exit: 13423 bnxt_rtnl_unlock_sp(bp); 13424 } 13425 13426 static void bnxt_chk_missed_irq(struct bnxt *bp) 13427 { 13428 int i; 13429 13430 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 13431 return; 13432 13433 for (i = 0; i < bp->cp_nr_rings; i++) { 13434 struct bnxt_napi *bnapi = bp->bnapi[i]; 13435 struct bnxt_cp_ring_info *cpr; 13436 u32 fw_ring_id; 13437 int j; 13438 13439 if (!bnapi) 13440 continue; 13441 13442 cpr = &bnapi->cp_ring; 13443 for (j = 0; j < cpr->cp_ring_count; j++) { 13444 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 13445 u32 val[2]; 13446 13447 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 13448 continue; 13449 13450 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 13451 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 13452 continue; 13453 } 13454 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 13455 bnxt_dbg_hwrm_ring_info_get(bp, 13456 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 13457 fw_ring_id, &val[0], &val[1]); 13458 cpr->sw_stats->cmn.missed_irqs++; 13459 } 13460 } 13461 } 13462 13463 static void bnxt_cfg_ntp_filters(struct bnxt *); 13464 13465 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 13466 { 13467 struct bnxt_link_info *link_info = &bp->link_info; 13468 13469 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 13470 link_info->autoneg = BNXT_AUTONEG_SPEED; 13471 if (bp->hwrm_spec_code >= 0x10201) { 13472 if (link_info->auto_pause_setting & 13473 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 13474 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 13475 } else { 13476 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 13477 } 13478 bnxt_set_auto_speed(link_info); 13479 } else { 13480 bnxt_set_force_speed(link_info); 13481 link_info->req_duplex = link_info->duplex_setting; 13482 } 13483 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 13484 link_info->req_flow_ctrl = 13485 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 13486 else 13487 link_info->req_flow_ctrl = link_info->force_pause_setting; 13488 } 13489 13490 static void bnxt_fw_echo_reply(struct bnxt *bp) 13491 { 13492 struct bnxt_fw_health *fw_health = bp->fw_health; 13493 struct hwrm_func_echo_response_input *req; 13494 int rc; 13495 13496 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 13497 if (rc) 13498 return; 13499 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 13500 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 13501 hwrm_req_send(bp, req); 13502 } 13503 13504 static void bnxt_ulp_restart(struct bnxt *bp) 13505 { 13506 bnxt_ulp_stop(bp); 13507 bnxt_ulp_start(bp, 0); 13508 } 13509 13510 static void bnxt_sp_task(struct work_struct *work) 13511 { 13512 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 13513 13514 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13515 smp_mb__after_atomic(); 13516 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13517 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13518 return; 13519 } 13520 13521 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { 13522 bnxt_ulp_restart(bp); 13523 bnxt_reenable_sriov(bp); 13524 } 13525 13526 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 13527 bnxt_cfg_rx_mode(bp); 13528 13529 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 13530 bnxt_cfg_ntp_filters(bp); 13531 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 13532 bnxt_hwrm_exec_fwd_req(bp); 13533 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 13534 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 13535 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 13536 bnxt_hwrm_port_qstats(bp, 0); 13537 bnxt_hwrm_port_qstats_ext(bp, 0); 13538 bnxt_accumulate_all_stats(bp); 13539 } 13540 13541 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 13542 int rc; 13543 13544 mutex_lock(&bp->link_lock); 13545 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 13546 &bp->sp_event)) 13547 bnxt_hwrm_phy_qcaps(bp); 13548 13549 rc = bnxt_update_link(bp, true); 13550 if (rc) 13551 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 13552 rc); 13553 13554 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 13555 &bp->sp_event)) 13556 bnxt_init_ethtool_link_settings(bp); 13557 mutex_unlock(&bp->link_lock); 13558 } 13559 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 13560 int rc; 13561 13562 mutex_lock(&bp->link_lock); 13563 rc = bnxt_update_phy_setting(bp); 13564 mutex_unlock(&bp->link_lock); 13565 if (rc) { 13566 netdev_warn(bp->dev, "update phy settings retry failed\n"); 13567 } else { 13568 bp->link_info.phy_retry = false; 13569 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 13570 } 13571 } 13572 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 13573 mutex_lock(&bp->link_lock); 13574 bnxt_get_port_module_status(bp); 13575 mutex_unlock(&bp->link_lock); 13576 } 13577 13578 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 13579 bnxt_tc_flow_stats_work(bp); 13580 13581 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 13582 bnxt_chk_missed_irq(bp); 13583 13584 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 13585 bnxt_fw_echo_reply(bp); 13586 13587 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 13588 bnxt_hwmon_notify_event(bp); 13589 13590 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 13591 * must be the last functions to be called before exiting. 13592 */ 13593 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 13594 bnxt_reset(bp, false); 13595 13596 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 13597 bnxt_reset(bp, true); 13598 13599 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 13600 bnxt_rx_ring_reset(bp); 13601 13602 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 13603 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 13604 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 13605 bnxt_devlink_health_fw_report(bp); 13606 else 13607 bnxt_fw_reset(bp); 13608 } 13609 13610 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 13611 if (!is_bnxt_fw_ok(bp)) 13612 bnxt_devlink_health_fw_report(bp); 13613 } 13614 13615 smp_mb__before_atomic(); 13616 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13617 } 13618 13619 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 13620 int *max_cp); 13621 13622 /* Under rtnl_lock */ 13623 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 13624 int tx_xdp) 13625 { 13626 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 13627 struct bnxt_hw_rings hwr = {0}; 13628 int rx_rings = rx; 13629 13630 if (tcs) 13631 tx_sets = tcs; 13632 13633 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 13634 13635 if (max_rx < rx_rings) 13636 return -ENOMEM; 13637 13638 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13639 rx_rings <<= 1; 13640 13641 hwr.rx = rx_rings; 13642 hwr.tx = tx * tx_sets + tx_xdp; 13643 if (max_tx < hwr.tx) 13644 return -ENOMEM; 13645 13646 hwr.vnic = bnxt_get_total_vnics(bp, rx); 13647 13648 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); 13649 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 13650 if (max_cp < hwr.cp) 13651 return -ENOMEM; 13652 hwr.stat = hwr.cp; 13653 if (BNXT_NEW_RM(bp)) { 13654 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); 13655 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); 13656 hwr.grp = rx; 13657 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 13658 } 13659 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 13660 hwr.cp_p5 = hwr.tx + rx; 13661 return bnxt_hwrm_check_rings(bp, &hwr); 13662 } 13663 13664 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 13665 { 13666 if (bp->bar2) { 13667 pci_iounmap(pdev, bp->bar2); 13668 bp->bar2 = NULL; 13669 } 13670 13671 if (bp->bar1) { 13672 pci_iounmap(pdev, bp->bar1); 13673 bp->bar1 = NULL; 13674 } 13675 13676 if (bp->bar0) { 13677 pci_iounmap(pdev, bp->bar0); 13678 bp->bar0 = NULL; 13679 } 13680 } 13681 13682 static void bnxt_cleanup_pci(struct bnxt *bp) 13683 { 13684 bnxt_unmap_bars(bp, bp->pdev); 13685 pci_release_regions(bp->pdev); 13686 if (pci_is_enabled(bp->pdev)) 13687 pci_disable_device(bp->pdev); 13688 } 13689 13690 static void bnxt_init_dflt_coal(struct bnxt *bp) 13691 { 13692 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 13693 struct bnxt_coal *coal; 13694 u16 flags = 0; 13695 13696 if (coal_cap->cmpl_params & 13697 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 13698 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 13699 13700 /* Tick values in micro seconds. 13701 * 1 coal_buf x bufs_per_record = 1 completion record. 13702 */ 13703 coal = &bp->rx_coal; 13704 coal->coal_ticks = 10; 13705 coal->coal_bufs = 30; 13706 coal->coal_ticks_irq = 1; 13707 coal->coal_bufs_irq = 2; 13708 coal->idle_thresh = 50; 13709 coal->bufs_per_record = 2; 13710 coal->budget = 64; /* NAPI budget */ 13711 coal->flags = flags; 13712 13713 coal = &bp->tx_coal; 13714 coal->coal_ticks = 28; 13715 coal->coal_bufs = 30; 13716 coal->coal_ticks_irq = 2; 13717 coal->coal_bufs_irq = 2; 13718 coal->bufs_per_record = 1; 13719 coal->flags = flags; 13720 13721 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 13722 } 13723 13724 /* FW that pre-reserves 1 VNIC per function */ 13725 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 13726 { 13727 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 13728 13729 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13730 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 13731 return true; 13732 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13733 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 13734 return true; 13735 return false; 13736 } 13737 13738 static int bnxt_fw_init_one_p1(struct bnxt *bp) 13739 { 13740 int rc; 13741 13742 bp->fw_cap = 0; 13743 rc = bnxt_hwrm_ver_get(bp); 13744 /* FW may be unresponsive after FLR. FLR must complete within 100 msec 13745 * so wait before continuing with recovery. 13746 */ 13747 if (rc) 13748 msleep(100); 13749 bnxt_try_map_fw_health_reg(bp); 13750 if (rc) { 13751 rc = bnxt_try_recover_fw(bp); 13752 if (rc) 13753 return rc; 13754 rc = bnxt_hwrm_ver_get(bp); 13755 if (rc) 13756 return rc; 13757 } 13758 13759 bnxt_nvm_cfg_ver_get(bp); 13760 13761 rc = bnxt_hwrm_func_reset(bp); 13762 if (rc) 13763 return -ENODEV; 13764 13765 bnxt_hwrm_fw_set_time(bp); 13766 return 0; 13767 } 13768 13769 static int bnxt_fw_init_one_p2(struct bnxt *bp) 13770 { 13771 int rc; 13772 13773 /* Get the MAX capabilities for this function */ 13774 rc = bnxt_hwrm_func_qcaps(bp); 13775 if (rc) { 13776 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 13777 rc); 13778 return -ENODEV; 13779 } 13780 13781 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 13782 if (rc) 13783 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 13784 rc); 13785 13786 if (bnxt_alloc_fw_health(bp)) { 13787 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 13788 } else { 13789 rc = bnxt_hwrm_error_recovery_qcfg(bp); 13790 if (rc) 13791 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 13792 rc); 13793 } 13794 13795 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 13796 if (rc) 13797 return -ENODEV; 13798 13799 if (bnxt_fw_pre_resv_vnics(bp)) 13800 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 13801 13802 bnxt_hwrm_func_qcfg(bp); 13803 bnxt_hwrm_vnic_qcaps(bp); 13804 bnxt_hwrm_port_led_qcaps(bp); 13805 bnxt_ethtool_init(bp); 13806 if (bp->fw_cap & BNXT_FW_CAP_PTP) 13807 __bnxt_hwrm_ptp_qcfg(bp); 13808 bnxt_dcb_init(bp); 13809 bnxt_hwmon_init(bp); 13810 return 0; 13811 } 13812 13813 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 13814 { 13815 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; 13816 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 13817 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 13818 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 13819 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 13820 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 13821 bp->rss_hash_delta = bp->rss_hash_cfg; 13822 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 13823 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; 13824 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 13825 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 13826 } 13827 } 13828 13829 static void bnxt_set_dflt_rfs(struct bnxt *bp) 13830 { 13831 struct net_device *dev = bp->dev; 13832 13833 dev->hw_features &= ~NETIF_F_NTUPLE; 13834 dev->features &= ~NETIF_F_NTUPLE; 13835 bp->flags &= ~BNXT_FLAG_RFS; 13836 if (bnxt_rfs_supported(bp)) { 13837 dev->hw_features |= NETIF_F_NTUPLE; 13838 if (bnxt_rfs_capable(bp, false)) { 13839 bp->flags |= BNXT_FLAG_RFS; 13840 dev->features |= NETIF_F_NTUPLE; 13841 } 13842 } 13843 } 13844 13845 static void bnxt_fw_init_one_p3(struct bnxt *bp) 13846 { 13847 struct pci_dev *pdev = bp->pdev; 13848 13849 bnxt_set_dflt_rss_hash_type(bp); 13850 bnxt_set_dflt_rfs(bp); 13851 13852 bnxt_get_wol_settings(bp); 13853 if (bp->flags & BNXT_FLAG_WOL_CAP) 13854 device_set_wakeup_enable(&pdev->dev, bp->wol); 13855 else 13856 device_set_wakeup_capable(&pdev->dev, false); 13857 13858 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 13859 bnxt_hwrm_coal_params_qcaps(bp); 13860 } 13861 13862 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 13863 13864 int bnxt_fw_init_one(struct bnxt *bp) 13865 { 13866 int rc; 13867 13868 rc = bnxt_fw_init_one_p1(bp); 13869 if (rc) { 13870 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 13871 return rc; 13872 } 13873 rc = bnxt_fw_init_one_p2(bp); 13874 if (rc) { 13875 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 13876 return rc; 13877 } 13878 rc = bnxt_probe_phy(bp, false); 13879 if (rc) 13880 return rc; 13881 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 13882 if (rc) 13883 return rc; 13884 13885 bnxt_fw_init_one_p3(bp); 13886 return 0; 13887 } 13888 13889 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 13890 { 13891 struct bnxt_fw_health *fw_health = bp->fw_health; 13892 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 13893 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 13894 u32 reg_type, reg_off, delay_msecs; 13895 13896 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 13897 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 13898 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 13899 switch (reg_type) { 13900 case BNXT_FW_HEALTH_REG_TYPE_CFG: 13901 pci_write_config_dword(bp->pdev, reg_off, val); 13902 break; 13903 case BNXT_FW_HEALTH_REG_TYPE_GRC: 13904 writel(reg_off & BNXT_GRC_BASE_MASK, 13905 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 13906 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 13907 fallthrough; 13908 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 13909 writel(val, bp->bar0 + reg_off); 13910 break; 13911 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 13912 writel(val, bp->bar1 + reg_off); 13913 break; 13914 } 13915 if (delay_msecs) { 13916 pci_read_config_dword(bp->pdev, 0, &val); 13917 msleep(delay_msecs); 13918 } 13919 } 13920 13921 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 13922 { 13923 struct hwrm_func_qcfg_output *resp; 13924 struct hwrm_func_qcfg_input *req; 13925 bool result = true; /* firmware will enforce if unknown */ 13926 13927 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 13928 return result; 13929 13930 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 13931 return result; 13932 13933 req->fid = cpu_to_le16(0xffff); 13934 resp = hwrm_req_hold(bp, req); 13935 if (!hwrm_req_send(bp, req)) 13936 result = !!(le16_to_cpu(resp->flags) & 13937 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 13938 hwrm_req_drop(bp, req); 13939 return result; 13940 } 13941 13942 static void bnxt_reset_all(struct bnxt *bp) 13943 { 13944 struct bnxt_fw_health *fw_health = bp->fw_health; 13945 int i, rc; 13946 13947 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 13948 bnxt_fw_reset_via_optee(bp); 13949 bp->fw_reset_timestamp = jiffies; 13950 return; 13951 } 13952 13953 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 13954 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 13955 bnxt_fw_reset_writel(bp, i); 13956 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 13957 struct hwrm_fw_reset_input *req; 13958 13959 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 13960 if (!rc) { 13961 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 13962 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 13963 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 13964 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 13965 rc = hwrm_req_send(bp, req); 13966 } 13967 if (rc != -ENODEV) 13968 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 13969 } 13970 bp->fw_reset_timestamp = jiffies; 13971 } 13972 13973 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 13974 { 13975 return time_after(jiffies, bp->fw_reset_timestamp + 13976 (bp->fw_reset_max_dsecs * HZ / 10)); 13977 } 13978 13979 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 13980 { 13981 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13982 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 13983 bnxt_dl_health_fw_status_update(bp, false); 13984 bp->fw_reset_state = 0; 13985 dev_close(bp->dev); 13986 } 13987 13988 static void bnxt_fw_reset_task(struct work_struct *work) 13989 { 13990 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 13991 int rc = 0; 13992 13993 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 13994 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 13995 return; 13996 } 13997 13998 switch (bp->fw_reset_state) { 13999 case BNXT_FW_RESET_STATE_POLL_VF: { 14000 int n = bnxt_get_registered_vfs(bp); 14001 int tmo; 14002 14003 if (n < 0) { 14004 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 14005 n, jiffies_to_msecs(jiffies - 14006 bp->fw_reset_timestamp)); 14007 goto fw_reset_abort; 14008 } else if (n > 0) { 14009 if (bnxt_fw_reset_timeout(bp)) { 14010 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14011 bp->fw_reset_state = 0; 14012 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 14013 n); 14014 goto ulp_start; 14015 } 14016 bnxt_queue_fw_reset_work(bp, HZ / 10); 14017 return; 14018 } 14019 bp->fw_reset_timestamp = jiffies; 14020 rtnl_lock(); 14021 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 14022 bnxt_fw_reset_abort(bp, rc); 14023 rtnl_unlock(); 14024 goto ulp_start; 14025 } 14026 bnxt_fw_reset_close(bp); 14027 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14028 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 14029 tmo = HZ / 10; 14030 } else { 14031 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14032 tmo = bp->fw_reset_min_dsecs * HZ / 10; 14033 } 14034 rtnl_unlock(); 14035 bnxt_queue_fw_reset_work(bp, tmo); 14036 return; 14037 } 14038 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 14039 u32 val; 14040 14041 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 14042 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 14043 !bnxt_fw_reset_timeout(bp)) { 14044 bnxt_queue_fw_reset_work(bp, HZ / 5); 14045 return; 14046 } 14047 14048 if (!bp->fw_health->primary) { 14049 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 14050 14051 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14052 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 14053 return; 14054 } 14055 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 14056 } 14057 fallthrough; 14058 case BNXT_FW_RESET_STATE_RESET_FW: 14059 bnxt_reset_all(bp); 14060 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14061 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 14062 return; 14063 case BNXT_FW_RESET_STATE_ENABLE_DEV: 14064 bnxt_inv_fw_health_reg(bp); 14065 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 14066 !bp->fw_reset_min_dsecs) { 14067 u16 val; 14068 14069 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 14070 if (val == 0xffff) { 14071 if (bnxt_fw_reset_timeout(bp)) { 14072 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 14073 rc = -ETIMEDOUT; 14074 goto fw_reset_abort; 14075 } 14076 bnxt_queue_fw_reset_work(bp, HZ / 1000); 14077 return; 14078 } 14079 } 14080 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 14081 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 14082 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 14083 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 14084 bnxt_dl_remote_reload(bp); 14085 if (pci_enable_device(bp->pdev)) { 14086 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 14087 rc = -ENODEV; 14088 goto fw_reset_abort; 14089 } 14090 pci_set_master(bp->pdev); 14091 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 14092 fallthrough; 14093 case BNXT_FW_RESET_STATE_POLL_FW: 14094 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 14095 rc = bnxt_hwrm_poll(bp); 14096 if (rc) { 14097 if (bnxt_fw_reset_timeout(bp)) { 14098 netdev_err(bp->dev, "Firmware reset aborted\n"); 14099 goto fw_reset_abort_status; 14100 } 14101 bnxt_queue_fw_reset_work(bp, HZ / 5); 14102 return; 14103 } 14104 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 14105 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 14106 fallthrough; 14107 case BNXT_FW_RESET_STATE_OPENING: 14108 while (!rtnl_trylock()) { 14109 bnxt_queue_fw_reset_work(bp, HZ / 10); 14110 return; 14111 } 14112 rc = bnxt_open(bp->dev); 14113 if (rc) { 14114 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 14115 bnxt_fw_reset_abort(bp, rc); 14116 rtnl_unlock(); 14117 goto ulp_start; 14118 } 14119 14120 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 14121 bp->fw_health->enabled) { 14122 bp->fw_health->last_fw_reset_cnt = 14123 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14124 } 14125 bp->fw_reset_state = 0; 14126 /* Make sure fw_reset_state is 0 before clearing the flag */ 14127 smp_mb__before_atomic(); 14128 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14129 bnxt_ptp_reapply_pps(bp); 14130 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 14131 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 14132 bnxt_dl_health_fw_recovery_done(bp); 14133 bnxt_dl_health_fw_status_update(bp, true); 14134 } 14135 rtnl_unlock(); 14136 bnxt_ulp_start(bp, 0); 14137 bnxt_reenable_sriov(bp); 14138 rtnl_lock(); 14139 bnxt_vf_reps_alloc(bp); 14140 bnxt_vf_reps_open(bp); 14141 rtnl_unlock(); 14142 break; 14143 } 14144 return; 14145 14146 fw_reset_abort_status: 14147 if (bp->fw_health->status_reliable || 14148 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 14149 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 14150 14151 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 14152 } 14153 fw_reset_abort: 14154 rtnl_lock(); 14155 bnxt_fw_reset_abort(bp, rc); 14156 rtnl_unlock(); 14157 ulp_start: 14158 bnxt_ulp_start(bp, rc); 14159 } 14160 14161 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 14162 { 14163 int rc; 14164 struct bnxt *bp = netdev_priv(dev); 14165 14166 SET_NETDEV_DEV(dev, &pdev->dev); 14167 14168 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 14169 rc = pci_enable_device(pdev); 14170 if (rc) { 14171 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 14172 goto init_err; 14173 } 14174 14175 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 14176 dev_err(&pdev->dev, 14177 "Cannot find PCI device base address, aborting\n"); 14178 rc = -ENODEV; 14179 goto init_err_disable; 14180 } 14181 14182 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 14183 if (rc) { 14184 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 14185 goto init_err_disable; 14186 } 14187 14188 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 14189 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 14190 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 14191 rc = -EIO; 14192 goto init_err_release; 14193 } 14194 14195 pci_set_master(pdev); 14196 14197 bp->dev = dev; 14198 bp->pdev = pdev; 14199 14200 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 14201 * determines the BAR size. 14202 */ 14203 bp->bar0 = pci_ioremap_bar(pdev, 0); 14204 if (!bp->bar0) { 14205 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 14206 rc = -ENOMEM; 14207 goto init_err_release; 14208 } 14209 14210 bp->bar2 = pci_ioremap_bar(pdev, 4); 14211 if (!bp->bar2) { 14212 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 14213 rc = -ENOMEM; 14214 goto init_err_release; 14215 } 14216 14217 INIT_WORK(&bp->sp_task, bnxt_sp_task); 14218 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 14219 14220 spin_lock_init(&bp->ntp_fltr_lock); 14221 #if BITS_PER_LONG == 32 14222 spin_lock_init(&bp->db_lock); 14223 #endif 14224 14225 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 14226 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 14227 14228 timer_setup(&bp->timer, bnxt_timer, 0); 14229 bp->current_interval = BNXT_TIMER_INTERVAL; 14230 14231 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 14232 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 14233 14234 clear_bit(BNXT_STATE_OPEN, &bp->state); 14235 return 0; 14236 14237 init_err_release: 14238 bnxt_unmap_bars(bp, pdev); 14239 pci_release_regions(pdev); 14240 14241 init_err_disable: 14242 pci_disable_device(pdev); 14243 14244 init_err: 14245 return rc; 14246 } 14247 14248 /* rtnl_lock held */ 14249 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 14250 { 14251 struct sockaddr *addr = p; 14252 struct bnxt *bp = netdev_priv(dev); 14253 int rc = 0; 14254 14255 if (!is_valid_ether_addr(addr->sa_data)) 14256 return -EADDRNOTAVAIL; 14257 14258 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 14259 return 0; 14260 14261 rc = bnxt_approve_mac(bp, addr->sa_data, true); 14262 if (rc) 14263 return rc; 14264 14265 eth_hw_addr_set(dev, addr->sa_data); 14266 bnxt_clear_usr_fltrs(bp, true); 14267 if (netif_running(dev)) { 14268 bnxt_close_nic(bp, false, false); 14269 rc = bnxt_open_nic(bp, false, false); 14270 } 14271 14272 return rc; 14273 } 14274 14275 /* rtnl_lock held */ 14276 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 14277 { 14278 struct bnxt *bp = netdev_priv(dev); 14279 14280 if (netif_running(dev)) 14281 bnxt_close_nic(bp, true, false); 14282 14283 WRITE_ONCE(dev->mtu, new_mtu); 14284 bnxt_set_ring_params(bp); 14285 14286 if (netif_running(dev)) 14287 return bnxt_open_nic(bp, true, false); 14288 14289 return 0; 14290 } 14291 14292 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 14293 { 14294 struct bnxt *bp = netdev_priv(dev); 14295 bool sh = false; 14296 int rc, tx_cp; 14297 14298 if (tc > bp->max_tc) { 14299 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 14300 tc, bp->max_tc); 14301 return -EINVAL; 14302 } 14303 14304 if (bp->num_tc == tc) 14305 return 0; 14306 14307 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 14308 sh = true; 14309 14310 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 14311 sh, tc, bp->tx_nr_rings_xdp); 14312 if (rc) 14313 return rc; 14314 14315 /* Needs to close the device and do hw resource re-allocations */ 14316 if (netif_running(bp->dev)) 14317 bnxt_close_nic(bp, true, false); 14318 14319 if (tc) { 14320 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 14321 netdev_set_num_tc(dev, tc); 14322 bp->num_tc = tc; 14323 } else { 14324 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 14325 netdev_reset_tc(dev); 14326 bp->num_tc = 0; 14327 } 14328 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 14329 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 14330 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 14331 tx_cp + bp->rx_nr_rings; 14332 14333 if (netif_running(bp->dev)) 14334 return bnxt_open_nic(bp, true, false); 14335 14336 return 0; 14337 } 14338 14339 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 14340 void *cb_priv) 14341 { 14342 struct bnxt *bp = cb_priv; 14343 14344 if (!bnxt_tc_flower_enabled(bp) || 14345 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 14346 return -EOPNOTSUPP; 14347 14348 switch (type) { 14349 case TC_SETUP_CLSFLOWER: 14350 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 14351 default: 14352 return -EOPNOTSUPP; 14353 } 14354 } 14355 14356 LIST_HEAD(bnxt_block_cb_list); 14357 14358 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 14359 void *type_data) 14360 { 14361 struct bnxt *bp = netdev_priv(dev); 14362 14363 switch (type) { 14364 case TC_SETUP_BLOCK: 14365 return flow_block_cb_setup_simple(type_data, 14366 &bnxt_block_cb_list, 14367 bnxt_setup_tc_block_cb, 14368 bp, bp, true); 14369 case TC_SETUP_QDISC_MQPRIO: { 14370 struct tc_mqprio_qopt *mqprio = type_data; 14371 14372 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 14373 14374 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 14375 } 14376 default: 14377 return -EOPNOTSUPP; 14378 } 14379 } 14380 14381 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, 14382 const struct sk_buff *skb) 14383 { 14384 struct bnxt_vnic_info *vnic; 14385 14386 if (skb) 14387 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 14388 14389 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 14390 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); 14391 } 14392 14393 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, 14394 u32 idx) 14395 { 14396 struct hlist_head *head; 14397 int bit_id; 14398 14399 spin_lock_bh(&bp->ntp_fltr_lock); 14400 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); 14401 if (bit_id < 0) { 14402 spin_unlock_bh(&bp->ntp_fltr_lock); 14403 return -ENOMEM; 14404 } 14405 14406 fltr->base.sw_id = (u16)bit_id; 14407 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; 14408 fltr->base.flags |= BNXT_ACT_RING_DST; 14409 head = &bp->ntp_fltr_hash_tbl[idx]; 14410 hlist_add_head_rcu(&fltr->base.hash, head); 14411 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 14412 bnxt_insert_usr_fltr(bp, &fltr->base); 14413 bp->ntp_fltr_count++; 14414 spin_unlock_bh(&bp->ntp_fltr_lock); 14415 return 0; 14416 } 14417 14418 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 14419 struct bnxt_ntuple_filter *f2) 14420 { 14421 struct bnxt_flow_masks *masks1 = &f1->fmasks; 14422 struct bnxt_flow_masks *masks2 = &f2->fmasks; 14423 struct flow_keys *keys1 = &f1->fkeys; 14424 struct flow_keys *keys2 = &f2->fkeys; 14425 14426 if (keys1->basic.n_proto != keys2->basic.n_proto || 14427 keys1->basic.ip_proto != keys2->basic.ip_proto) 14428 return false; 14429 14430 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 14431 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 14432 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || 14433 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || 14434 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) 14435 return false; 14436 } else { 14437 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src, 14438 &keys2->addrs.v6addrs.src) || 14439 !ipv6_addr_equal(&masks1->addrs.v6addrs.src, 14440 &masks2->addrs.v6addrs.src) || 14441 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst, 14442 &keys2->addrs.v6addrs.dst) || 14443 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst, 14444 &masks2->addrs.v6addrs.dst)) 14445 return false; 14446 } 14447 14448 return keys1->ports.src == keys2->ports.src && 14449 masks1->ports.src == masks2->ports.src && 14450 keys1->ports.dst == keys2->ports.dst && 14451 masks1->ports.dst == masks2->ports.dst && 14452 keys1->control.flags == keys2->control.flags && 14453 f1->l2_fltr == f2->l2_fltr; 14454 } 14455 14456 struct bnxt_ntuple_filter * 14457 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, 14458 struct bnxt_ntuple_filter *fltr, u32 idx) 14459 { 14460 struct bnxt_ntuple_filter *f; 14461 struct hlist_head *head; 14462 14463 head = &bp->ntp_fltr_hash_tbl[idx]; 14464 hlist_for_each_entry_rcu(f, head, base.hash) { 14465 if (bnxt_fltr_match(f, fltr)) 14466 return f; 14467 } 14468 return NULL; 14469 } 14470 14471 #ifdef CONFIG_RFS_ACCEL 14472 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 14473 u16 rxq_index, u32 flow_id) 14474 { 14475 struct bnxt *bp = netdev_priv(dev); 14476 struct bnxt_ntuple_filter *fltr, *new_fltr; 14477 struct flow_keys *fkeys; 14478 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 14479 struct bnxt_l2_filter *l2_fltr; 14480 int rc = 0, idx; 14481 u32 flags; 14482 14483 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { 14484 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 14485 atomic_inc(&l2_fltr->refcnt); 14486 } else { 14487 struct bnxt_l2_key key; 14488 14489 ether_addr_copy(key.dst_mac_addr, eth->h_dest); 14490 key.vlan = 0; 14491 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); 14492 if (!l2_fltr) 14493 return -EINVAL; 14494 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { 14495 bnxt_del_l2_filter(bp, l2_fltr); 14496 return -EINVAL; 14497 } 14498 } 14499 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 14500 if (!new_fltr) { 14501 bnxt_del_l2_filter(bp, l2_fltr); 14502 return -ENOMEM; 14503 } 14504 14505 fkeys = &new_fltr->fkeys; 14506 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 14507 rc = -EPROTONOSUPPORT; 14508 goto err_free; 14509 } 14510 14511 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 14512 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 14513 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 14514 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 14515 rc = -EPROTONOSUPPORT; 14516 goto err_free; 14517 } 14518 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; 14519 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 14520 if (bp->hwrm_spec_code < 0x10601) { 14521 rc = -EPROTONOSUPPORT; 14522 goto err_free; 14523 } 14524 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; 14525 } 14526 flags = fkeys->control.flags; 14527 if (((flags & FLOW_DIS_ENCAPSULATION) && 14528 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 14529 rc = -EPROTONOSUPPORT; 14530 goto err_free; 14531 } 14532 new_fltr->l2_fltr = l2_fltr; 14533 14534 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); 14535 rcu_read_lock(); 14536 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 14537 if (fltr) { 14538 rc = fltr->base.sw_id; 14539 rcu_read_unlock(); 14540 goto err_free; 14541 } 14542 rcu_read_unlock(); 14543 14544 new_fltr->flow_id = flow_id; 14545 new_fltr->base.rxq = rxq_index; 14546 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 14547 if (!rc) { 14548 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 14549 return new_fltr->base.sw_id; 14550 } 14551 14552 err_free: 14553 bnxt_del_l2_filter(bp, l2_fltr); 14554 kfree(new_fltr); 14555 return rc; 14556 } 14557 #endif 14558 14559 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) 14560 { 14561 spin_lock_bh(&bp->ntp_fltr_lock); 14562 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 14563 spin_unlock_bh(&bp->ntp_fltr_lock); 14564 return; 14565 } 14566 hlist_del_rcu(&fltr->base.hash); 14567 bnxt_del_one_usr_fltr(bp, &fltr->base); 14568 bp->ntp_fltr_count--; 14569 spin_unlock_bh(&bp->ntp_fltr_lock); 14570 bnxt_del_l2_filter(bp, fltr->l2_fltr); 14571 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 14572 kfree_rcu(fltr, base.rcu); 14573 } 14574 14575 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 14576 { 14577 #ifdef CONFIG_RFS_ACCEL 14578 int i; 14579 14580 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 14581 struct hlist_head *head; 14582 struct hlist_node *tmp; 14583 struct bnxt_ntuple_filter *fltr; 14584 int rc; 14585 14586 head = &bp->ntp_fltr_hash_tbl[i]; 14587 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 14588 bool del = false; 14589 14590 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { 14591 if (fltr->base.flags & BNXT_ACT_NO_AGING) 14592 continue; 14593 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, 14594 fltr->flow_id, 14595 fltr->base.sw_id)) { 14596 bnxt_hwrm_cfa_ntuple_filter_free(bp, 14597 fltr); 14598 del = true; 14599 } 14600 } else { 14601 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 14602 fltr); 14603 if (rc) 14604 del = true; 14605 else 14606 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 14607 } 14608 14609 if (del) 14610 bnxt_del_ntp_filter(bp, fltr); 14611 } 14612 } 14613 #endif 14614 } 14615 14616 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 14617 unsigned int entry, struct udp_tunnel_info *ti) 14618 { 14619 struct bnxt *bp = netdev_priv(netdev); 14620 unsigned int cmd; 14621 14622 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 14623 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 14624 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 14625 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; 14626 else 14627 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; 14628 14629 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 14630 } 14631 14632 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 14633 unsigned int entry, struct udp_tunnel_info *ti) 14634 { 14635 struct bnxt *bp = netdev_priv(netdev); 14636 unsigned int cmd; 14637 14638 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 14639 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 14640 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 14641 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 14642 else 14643 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; 14644 14645 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 14646 } 14647 14648 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 14649 .set_port = bnxt_udp_tunnel_set_port, 14650 .unset_port = bnxt_udp_tunnel_unset_port, 14651 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 14652 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 14653 .tables = { 14654 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 14655 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 14656 }, 14657 }, bnxt_udp_tunnels_p7 = { 14658 .set_port = bnxt_udp_tunnel_set_port, 14659 .unset_port = bnxt_udp_tunnel_unset_port, 14660 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 14661 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 14662 .tables = { 14663 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 14664 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 14665 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, 14666 }, 14667 }; 14668 14669 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 14670 struct net_device *dev, u32 filter_mask, 14671 int nlflags) 14672 { 14673 struct bnxt *bp = netdev_priv(dev); 14674 14675 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 14676 nlflags, filter_mask, NULL); 14677 } 14678 14679 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 14680 u16 flags, struct netlink_ext_ack *extack) 14681 { 14682 struct bnxt *bp = netdev_priv(dev); 14683 struct nlattr *attr, *br_spec; 14684 int rem, rc = 0; 14685 14686 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 14687 return -EOPNOTSUPP; 14688 14689 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 14690 if (!br_spec) 14691 return -EINVAL; 14692 14693 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { 14694 u16 mode; 14695 14696 mode = nla_get_u16(attr); 14697 if (mode == bp->br_mode) 14698 break; 14699 14700 rc = bnxt_hwrm_set_br_mode(bp, mode); 14701 if (!rc) 14702 bp->br_mode = mode; 14703 break; 14704 } 14705 return rc; 14706 } 14707 14708 int bnxt_get_port_parent_id(struct net_device *dev, 14709 struct netdev_phys_item_id *ppid) 14710 { 14711 struct bnxt *bp = netdev_priv(dev); 14712 14713 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 14714 return -EOPNOTSUPP; 14715 14716 /* The PF and it's VF-reps only support the switchdev framework */ 14717 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 14718 return -EOPNOTSUPP; 14719 14720 ppid->id_len = sizeof(bp->dsn); 14721 memcpy(ppid->id, bp->dsn, ppid->id_len); 14722 14723 return 0; 14724 } 14725 14726 static const struct net_device_ops bnxt_netdev_ops = { 14727 .ndo_open = bnxt_open, 14728 .ndo_start_xmit = bnxt_start_xmit, 14729 .ndo_stop = bnxt_close, 14730 .ndo_get_stats64 = bnxt_get_stats64, 14731 .ndo_set_rx_mode = bnxt_set_rx_mode, 14732 .ndo_eth_ioctl = bnxt_ioctl, 14733 .ndo_validate_addr = eth_validate_addr, 14734 .ndo_set_mac_address = bnxt_change_mac_addr, 14735 .ndo_change_mtu = bnxt_change_mtu, 14736 .ndo_fix_features = bnxt_fix_features, 14737 .ndo_set_features = bnxt_set_features, 14738 .ndo_features_check = bnxt_features_check, 14739 .ndo_tx_timeout = bnxt_tx_timeout, 14740 #ifdef CONFIG_BNXT_SRIOV 14741 .ndo_get_vf_config = bnxt_get_vf_config, 14742 .ndo_set_vf_mac = bnxt_set_vf_mac, 14743 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 14744 .ndo_set_vf_rate = bnxt_set_vf_bw, 14745 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 14746 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 14747 .ndo_set_vf_trust = bnxt_set_vf_trust, 14748 #endif 14749 .ndo_setup_tc = bnxt_setup_tc, 14750 #ifdef CONFIG_RFS_ACCEL 14751 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 14752 #endif 14753 .ndo_bpf = bnxt_xdp, 14754 .ndo_xdp_xmit = bnxt_xdp_xmit, 14755 .ndo_bridge_getlink = bnxt_bridge_getlink, 14756 .ndo_bridge_setlink = bnxt_bridge_setlink, 14757 }; 14758 14759 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, 14760 struct netdev_queue_stats_rx *stats) 14761 { 14762 struct bnxt *bp = netdev_priv(dev); 14763 struct bnxt_cp_ring_info *cpr; 14764 u64 *sw; 14765 14766 cpr = &bp->bnapi[i]->cp_ring; 14767 sw = cpr->stats.sw_stats; 14768 14769 stats->packets = 0; 14770 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 14771 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 14772 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 14773 14774 stats->bytes = 0; 14775 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 14776 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 14777 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 14778 14779 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards; 14780 } 14781 14782 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, 14783 struct netdev_queue_stats_tx *stats) 14784 { 14785 struct bnxt *bp = netdev_priv(dev); 14786 struct bnxt_napi *bnapi; 14787 u64 *sw; 14788 14789 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; 14790 sw = bnapi->cp_ring.stats.sw_stats; 14791 14792 stats->packets = 0; 14793 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 14794 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 14795 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 14796 14797 stats->bytes = 0; 14798 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 14799 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 14800 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 14801 } 14802 14803 static void bnxt_get_base_stats(struct net_device *dev, 14804 struct netdev_queue_stats_rx *rx, 14805 struct netdev_queue_stats_tx *tx) 14806 { 14807 struct bnxt *bp = netdev_priv(dev); 14808 14809 rx->packets = bp->net_stats_prev.rx_packets; 14810 rx->bytes = bp->net_stats_prev.rx_bytes; 14811 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; 14812 14813 tx->packets = bp->net_stats_prev.tx_packets; 14814 tx->bytes = bp->net_stats_prev.tx_bytes; 14815 } 14816 14817 static const struct netdev_stat_ops bnxt_stat_ops = { 14818 .get_queue_stats_rx = bnxt_get_queue_stats_rx, 14819 .get_queue_stats_tx = bnxt_get_queue_stats_tx, 14820 .get_base_stats = bnxt_get_base_stats, 14821 }; 14822 14823 static void bnxt_remove_one(struct pci_dev *pdev) 14824 { 14825 struct net_device *dev = pci_get_drvdata(pdev); 14826 struct bnxt *bp = netdev_priv(dev); 14827 14828 if (BNXT_PF(bp)) 14829 bnxt_sriov_disable(bp); 14830 14831 bnxt_rdma_aux_device_del(bp); 14832 14833 bnxt_ptp_clear(bp); 14834 unregister_netdev(dev); 14835 14836 bnxt_rdma_aux_device_uninit(bp); 14837 14838 bnxt_free_l2_filters(bp, true); 14839 bnxt_free_ntp_fltrs(bp, true); 14840 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 14841 bnxt_clear_rss_ctxs(bp, true); 14842 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14843 /* Flush any pending tasks */ 14844 cancel_work_sync(&bp->sp_task); 14845 cancel_delayed_work_sync(&bp->fw_reset_task); 14846 bp->sp_event = 0; 14847 14848 bnxt_dl_fw_reporters_destroy(bp); 14849 bnxt_dl_unregister(bp); 14850 bnxt_shutdown_tc(bp); 14851 14852 bnxt_clear_int_mode(bp); 14853 bnxt_hwrm_func_drv_unrgtr(bp); 14854 bnxt_free_hwrm_resources(bp); 14855 bnxt_hwmon_uninit(bp); 14856 bnxt_ethtool_free(bp); 14857 bnxt_dcb_free(bp); 14858 kfree(bp->ptp_cfg); 14859 bp->ptp_cfg = NULL; 14860 kfree(bp->fw_health); 14861 bp->fw_health = NULL; 14862 bnxt_cleanup_pci(bp); 14863 bnxt_free_ctx_mem(bp); 14864 kfree(bp->rss_indir_tbl); 14865 bp->rss_indir_tbl = NULL; 14866 bnxt_free_port_stats(bp); 14867 free_netdev(dev); 14868 } 14869 14870 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 14871 { 14872 int rc = 0; 14873 struct bnxt_link_info *link_info = &bp->link_info; 14874 14875 bp->phy_flags = 0; 14876 rc = bnxt_hwrm_phy_qcaps(bp); 14877 if (rc) { 14878 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 14879 rc); 14880 return rc; 14881 } 14882 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 14883 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 14884 else 14885 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 14886 if (!fw_dflt) 14887 return 0; 14888 14889 mutex_lock(&bp->link_lock); 14890 rc = bnxt_update_link(bp, false); 14891 if (rc) { 14892 mutex_unlock(&bp->link_lock); 14893 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 14894 rc); 14895 return rc; 14896 } 14897 14898 /* Older firmware does not have supported_auto_speeds, so assume 14899 * that all supported speeds can be autonegotiated. 14900 */ 14901 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 14902 link_info->support_auto_speeds = link_info->support_speeds; 14903 14904 bnxt_init_ethtool_link_settings(bp); 14905 mutex_unlock(&bp->link_lock); 14906 return 0; 14907 } 14908 14909 static int bnxt_get_max_irq(struct pci_dev *pdev) 14910 { 14911 u16 ctrl; 14912 14913 if (!pdev->msix_cap) 14914 return 1; 14915 14916 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 14917 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 14918 } 14919 14920 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 14921 int *max_cp) 14922 { 14923 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 14924 int max_ring_grps = 0, max_irq; 14925 14926 *max_tx = hw_resc->max_tx_rings; 14927 *max_rx = hw_resc->max_rx_rings; 14928 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 14929 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 14930 bnxt_get_ulp_msix_num_in_use(bp), 14931 hw_resc->max_stat_ctxs - 14932 bnxt_get_ulp_stat_ctxs_in_use(bp)); 14933 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 14934 *max_cp = min_t(int, *max_cp, max_irq); 14935 max_ring_grps = hw_resc->max_hw_ring_grps; 14936 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 14937 *max_cp -= 1; 14938 *max_rx -= 2; 14939 } 14940 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14941 *max_rx >>= 1; 14942 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 14943 int rc; 14944 14945 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 14946 if (rc) { 14947 *max_rx = 0; 14948 *max_tx = 0; 14949 } 14950 /* On P5 chips, max_cp output param should be available NQs */ 14951 *max_cp = max_irq; 14952 } 14953 *max_rx = min_t(int, *max_rx, max_ring_grps); 14954 } 14955 14956 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 14957 { 14958 int rx, tx, cp; 14959 14960 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 14961 *max_rx = rx; 14962 *max_tx = tx; 14963 if (!rx || !tx || !cp) 14964 return -ENOMEM; 14965 14966 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 14967 } 14968 14969 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 14970 bool shared) 14971 { 14972 int rc; 14973 14974 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 14975 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 14976 /* Not enough rings, try disabling agg rings. */ 14977 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 14978 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 14979 if (rc) { 14980 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 14981 bp->flags |= BNXT_FLAG_AGG_RINGS; 14982 return rc; 14983 } 14984 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 14985 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 14986 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 14987 bnxt_set_ring_params(bp); 14988 } 14989 14990 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 14991 int max_cp, max_stat, max_irq; 14992 14993 /* Reserve minimum resources for RoCE */ 14994 max_cp = bnxt_get_max_func_cp_rings(bp); 14995 max_stat = bnxt_get_max_func_stat_ctxs(bp); 14996 max_irq = bnxt_get_max_func_irqs(bp); 14997 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 14998 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 14999 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 15000 return 0; 15001 15002 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 15003 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 15004 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 15005 max_cp = min_t(int, max_cp, max_irq); 15006 max_cp = min_t(int, max_cp, max_stat); 15007 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 15008 if (rc) 15009 rc = 0; 15010 } 15011 return rc; 15012 } 15013 15014 /* In initial default shared ring setting, each shared ring must have a 15015 * RX/TX ring pair. 15016 */ 15017 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 15018 { 15019 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 15020 bp->rx_nr_rings = bp->cp_nr_rings; 15021 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 15022 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 15023 } 15024 15025 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 15026 { 15027 int dflt_rings, max_rx_rings, max_tx_rings, rc; 15028 int avail_msix; 15029 15030 if (!bnxt_can_reserve_rings(bp)) 15031 return 0; 15032 15033 if (sh) 15034 bp->flags |= BNXT_FLAG_SHARED_RINGS; 15035 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 15036 /* Reduce default rings on multi-port cards so that total default 15037 * rings do not exceed CPU count. 15038 */ 15039 if (bp->port_count > 1) { 15040 int max_rings = 15041 max_t(int, num_online_cpus() / bp->port_count, 1); 15042 15043 dflt_rings = min_t(int, dflt_rings, max_rings); 15044 } 15045 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 15046 if (rc) 15047 return rc; 15048 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 15049 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 15050 if (sh) 15051 bnxt_trim_dflt_sh_rings(bp); 15052 else 15053 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 15054 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 15055 15056 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; 15057 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { 15058 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); 15059 15060 bnxt_set_ulp_msix_num(bp, ulp_num_msix); 15061 bnxt_set_dflt_ulp_stat_ctxs(bp); 15062 } 15063 15064 rc = __bnxt_reserve_rings(bp); 15065 if (rc && rc != -ENODEV) 15066 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 15067 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15068 if (sh) 15069 bnxt_trim_dflt_sh_rings(bp); 15070 15071 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 15072 if (bnxt_need_reserve_rings(bp)) { 15073 rc = __bnxt_reserve_rings(bp); 15074 if (rc && rc != -ENODEV) 15075 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 15076 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15077 } 15078 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 15079 bp->rx_nr_rings++; 15080 bp->cp_nr_rings++; 15081 } 15082 if (rc) { 15083 bp->tx_nr_rings = 0; 15084 bp->rx_nr_rings = 0; 15085 } 15086 return rc; 15087 } 15088 15089 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 15090 { 15091 int rc; 15092 15093 if (bp->tx_nr_rings) 15094 return 0; 15095 15096 bnxt_ulp_irq_stop(bp); 15097 bnxt_clear_int_mode(bp); 15098 rc = bnxt_set_dflt_rings(bp, true); 15099 if (rc) { 15100 if (BNXT_VF(bp) && rc == -ENODEV) 15101 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 15102 else 15103 netdev_err(bp->dev, "Not enough rings available.\n"); 15104 goto init_dflt_ring_err; 15105 } 15106 rc = bnxt_init_int_mode(bp); 15107 if (rc) 15108 goto init_dflt_ring_err; 15109 15110 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15111 15112 bnxt_set_dflt_rfs(bp); 15113 15114 init_dflt_ring_err: 15115 bnxt_ulp_irq_restart(bp, rc); 15116 return rc; 15117 } 15118 15119 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 15120 { 15121 int rc; 15122 15123 ASSERT_RTNL(); 15124 bnxt_hwrm_func_qcaps(bp); 15125 15126 if (netif_running(bp->dev)) 15127 __bnxt_close_nic(bp, true, false); 15128 15129 bnxt_ulp_irq_stop(bp); 15130 bnxt_clear_int_mode(bp); 15131 rc = bnxt_init_int_mode(bp); 15132 bnxt_ulp_irq_restart(bp, rc); 15133 15134 if (netif_running(bp->dev)) { 15135 if (rc) 15136 dev_close(bp->dev); 15137 else 15138 rc = bnxt_open_nic(bp, true, false); 15139 } 15140 15141 return rc; 15142 } 15143 15144 static int bnxt_init_mac_addr(struct bnxt *bp) 15145 { 15146 int rc = 0; 15147 15148 if (BNXT_PF(bp)) { 15149 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 15150 } else { 15151 #ifdef CONFIG_BNXT_SRIOV 15152 struct bnxt_vf_info *vf = &bp->vf; 15153 bool strict_approval = true; 15154 15155 if (is_valid_ether_addr(vf->mac_addr)) { 15156 /* overwrite netdev dev_addr with admin VF MAC */ 15157 eth_hw_addr_set(bp->dev, vf->mac_addr); 15158 /* Older PF driver or firmware may not approve this 15159 * correctly. 15160 */ 15161 strict_approval = false; 15162 } else { 15163 eth_hw_addr_random(bp->dev); 15164 } 15165 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 15166 #endif 15167 } 15168 return rc; 15169 } 15170 15171 static void bnxt_vpd_read_info(struct bnxt *bp) 15172 { 15173 struct pci_dev *pdev = bp->pdev; 15174 unsigned int vpd_size, kw_len; 15175 int pos, size; 15176 u8 *vpd_data; 15177 15178 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 15179 if (IS_ERR(vpd_data)) { 15180 pci_warn(pdev, "Unable to read VPD\n"); 15181 return; 15182 } 15183 15184 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 15185 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 15186 if (pos < 0) 15187 goto read_sn; 15188 15189 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 15190 memcpy(bp->board_partno, &vpd_data[pos], size); 15191 15192 read_sn: 15193 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 15194 PCI_VPD_RO_KEYWORD_SERIALNO, 15195 &kw_len); 15196 if (pos < 0) 15197 goto exit; 15198 15199 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 15200 memcpy(bp->board_serialno, &vpd_data[pos], size); 15201 exit: 15202 kfree(vpd_data); 15203 } 15204 15205 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 15206 { 15207 struct pci_dev *pdev = bp->pdev; 15208 u64 qword; 15209 15210 qword = pci_get_dsn(pdev); 15211 if (!qword) { 15212 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 15213 return -EOPNOTSUPP; 15214 } 15215 15216 put_unaligned_le64(qword, dsn); 15217 15218 bp->flags |= BNXT_FLAG_DSN_VALID; 15219 return 0; 15220 } 15221 15222 static int bnxt_map_db_bar(struct bnxt *bp) 15223 { 15224 if (!bp->db_size) 15225 return -ENODEV; 15226 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 15227 if (!bp->bar1) 15228 return -ENOMEM; 15229 return 0; 15230 } 15231 15232 void bnxt_print_device_info(struct bnxt *bp) 15233 { 15234 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 15235 board_info[bp->board_idx].name, 15236 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 15237 15238 pcie_print_link_status(bp->pdev); 15239 } 15240 15241 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 15242 { 15243 struct bnxt_hw_resc *hw_resc; 15244 struct net_device *dev; 15245 struct bnxt *bp; 15246 int rc, max_irqs; 15247 15248 if (pci_is_bridge(pdev)) 15249 return -ENODEV; 15250 15251 /* Clear any pending DMA transactions from crash kernel 15252 * while loading driver in capture kernel. 15253 */ 15254 if (is_kdump_kernel()) { 15255 pci_clear_master(pdev); 15256 pcie_flr(pdev); 15257 } 15258 15259 max_irqs = bnxt_get_max_irq(pdev); 15260 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 15261 max_irqs); 15262 if (!dev) 15263 return -ENOMEM; 15264 15265 bp = netdev_priv(dev); 15266 bp->board_idx = ent->driver_data; 15267 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 15268 bnxt_set_max_func_irqs(bp, max_irqs); 15269 15270 if (bnxt_vf_pciid(bp->board_idx)) 15271 bp->flags |= BNXT_FLAG_VF; 15272 15273 /* No devlink port registration in case of a VF */ 15274 if (BNXT_PF(bp)) 15275 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 15276 15277 if (pdev->msix_cap) 15278 bp->flags |= BNXT_FLAG_MSIX_CAP; 15279 15280 rc = bnxt_init_board(pdev, dev); 15281 if (rc < 0) 15282 goto init_err_free; 15283 15284 dev->netdev_ops = &bnxt_netdev_ops; 15285 dev->stat_ops = &bnxt_stat_ops; 15286 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 15287 dev->ethtool_ops = &bnxt_ethtool_ops; 15288 pci_set_drvdata(pdev, dev); 15289 15290 rc = bnxt_alloc_hwrm_resources(bp); 15291 if (rc) 15292 goto init_err_pci_clean; 15293 15294 mutex_init(&bp->hwrm_cmd_lock); 15295 mutex_init(&bp->link_lock); 15296 15297 rc = bnxt_fw_init_one_p1(bp); 15298 if (rc) 15299 goto init_err_pci_clean; 15300 15301 if (BNXT_PF(bp)) 15302 bnxt_vpd_read_info(bp); 15303 15304 if (BNXT_CHIP_P5_PLUS(bp)) { 15305 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 15306 if (BNXT_CHIP_P7(bp)) 15307 bp->flags |= BNXT_FLAG_CHIP_P7; 15308 } 15309 15310 rc = bnxt_alloc_rss_indir_tbl(bp, NULL); 15311 if (rc) 15312 goto init_err_pci_clean; 15313 15314 rc = bnxt_fw_init_one_p2(bp); 15315 if (rc) 15316 goto init_err_pci_clean; 15317 15318 rc = bnxt_map_db_bar(bp); 15319 if (rc) { 15320 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 15321 rc); 15322 goto init_err_pci_clean; 15323 } 15324 15325 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 15326 NETIF_F_TSO | NETIF_F_TSO6 | 15327 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 15328 NETIF_F_GSO_IPXIP4 | 15329 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 15330 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 15331 NETIF_F_RXCSUM | NETIF_F_GRO; 15332 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 15333 dev->hw_features |= NETIF_F_GSO_UDP_L4; 15334 15335 if (BNXT_SUPPORTS_TPA(bp)) 15336 dev->hw_features |= NETIF_F_LRO; 15337 15338 dev->hw_enc_features = 15339 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 15340 NETIF_F_TSO | NETIF_F_TSO6 | 15341 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 15342 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 15343 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 15344 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 15345 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 15346 if (bp->flags & BNXT_FLAG_CHIP_P7) 15347 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; 15348 else 15349 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 15350 15351 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 15352 NETIF_F_GSO_GRE_CSUM; 15353 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 15354 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 15355 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 15356 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 15357 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 15358 if (BNXT_SUPPORTS_TPA(bp)) 15359 dev->hw_features |= NETIF_F_GRO_HW; 15360 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 15361 if (dev->features & NETIF_F_GRO_HW) 15362 dev->features &= ~NETIF_F_LRO; 15363 dev->priv_flags |= IFF_UNICAST_FLT; 15364 15365 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 15366 15367 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 15368 NETDEV_XDP_ACT_RX_SG; 15369 15370 #ifdef CONFIG_BNXT_SRIOV 15371 init_waitqueue_head(&bp->sriov_cfg_wait); 15372 #endif 15373 if (BNXT_SUPPORTS_TPA(bp)) { 15374 bp->gro_func = bnxt_gro_func_5730x; 15375 if (BNXT_CHIP_P4(bp)) 15376 bp->gro_func = bnxt_gro_func_5731x; 15377 else if (BNXT_CHIP_P5_PLUS(bp)) 15378 bp->gro_func = bnxt_gro_func_5750x; 15379 } 15380 if (!BNXT_CHIP_P4_PLUS(bp)) 15381 bp->flags |= BNXT_FLAG_DOUBLE_DB; 15382 15383 rc = bnxt_init_mac_addr(bp); 15384 if (rc) { 15385 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 15386 rc = -EADDRNOTAVAIL; 15387 goto init_err_pci_clean; 15388 } 15389 15390 if (BNXT_PF(bp)) { 15391 /* Read the adapter's DSN to use as the eswitch switch_id */ 15392 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 15393 } 15394 15395 /* MTU range: 60 - FW defined max */ 15396 dev->min_mtu = ETH_ZLEN; 15397 dev->max_mtu = bp->max_mtu; 15398 15399 rc = bnxt_probe_phy(bp, true); 15400 if (rc) 15401 goto init_err_pci_clean; 15402 15403 hw_resc = &bp->hw_resc; 15404 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + 15405 BNXT_L2_FLTR_MAX_FLTR; 15406 /* Older firmware may not report these filters properly */ 15407 if (bp->max_fltr < BNXT_MAX_FLTR) 15408 bp->max_fltr = BNXT_MAX_FLTR; 15409 bnxt_init_l2_fltr_tbl(bp); 15410 bnxt_set_rx_skb_mode(bp, false); 15411 bnxt_set_tpa_flags(bp); 15412 bnxt_set_ring_params(bp); 15413 bnxt_rdma_aux_device_init(bp); 15414 rc = bnxt_set_dflt_rings(bp, true); 15415 if (rc) { 15416 if (BNXT_VF(bp) && rc == -ENODEV) { 15417 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 15418 } else { 15419 netdev_err(bp->dev, "Not enough rings available.\n"); 15420 rc = -ENOMEM; 15421 } 15422 goto init_err_pci_clean; 15423 } 15424 15425 bnxt_fw_init_one_p3(bp); 15426 15427 bnxt_init_dflt_coal(bp); 15428 15429 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 15430 bp->flags |= BNXT_FLAG_STRIP_VLAN; 15431 15432 rc = bnxt_init_int_mode(bp); 15433 if (rc) 15434 goto init_err_pci_clean; 15435 15436 /* No TC has been set yet and rings may have been trimmed due to 15437 * limited MSIX, so we re-initialize the TX rings per TC. 15438 */ 15439 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 15440 15441 if (BNXT_PF(bp)) { 15442 if (!bnxt_pf_wq) { 15443 bnxt_pf_wq = 15444 create_singlethread_workqueue("bnxt_pf_wq"); 15445 if (!bnxt_pf_wq) { 15446 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 15447 rc = -ENOMEM; 15448 goto init_err_pci_clean; 15449 } 15450 } 15451 rc = bnxt_init_tc(bp); 15452 if (rc) 15453 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 15454 rc); 15455 } 15456 15457 bnxt_inv_fw_health_reg(bp); 15458 rc = bnxt_dl_register(bp); 15459 if (rc) 15460 goto init_err_dl; 15461 15462 INIT_LIST_HEAD(&bp->usr_fltr_list); 15463 15464 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 15465 bnxt_init_multi_rss_ctx(bp); 15466 15467 15468 rc = register_netdev(dev); 15469 if (rc) 15470 goto init_err_cleanup; 15471 15472 bnxt_dl_fw_reporters_create(bp); 15473 15474 bnxt_rdma_aux_device_add(bp); 15475 15476 bnxt_print_device_info(bp); 15477 15478 pci_save_state(pdev); 15479 15480 return 0; 15481 init_err_cleanup: 15482 bnxt_rdma_aux_device_uninit(bp); 15483 bnxt_dl_unregister(bp); 15484 init_err_dl: 15485 bnxt_shutdown_tc(bp); 15486 bnxt_clear_int_mode(bp); 15487 15488 init_err_pci_clean: 15489 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 15490 bnxt_clear_rss_ctxs(bp, true); 15491 bnxt_hwrm_func_drv_unrgtr(bp); 15492 bnxt_free_hwrm_resources(bp); 15493 bnxt_hwmon_uninit(bp); 15494 bnxt_ethtool_free(bp); 15495 bnxt_ptp_clear(bp); 15496 kfree(bp->ptp_cfg); 15497 bp->ptp_cfg = NULL; 15498 kfree(bp->fw_health); 15499 bp->fw_health = NULL; 15500 bnxt_cleanup_pci(bp); 15501 bnxt_free_ctx_mem(bp); 15502 kfree(bp->rss_indir_tbl); 15503 bp->rss_indir_tbl = NULL; 15504 15505 init_err_free: 15506 free_netdev(dev); 15507 return rc; 15508 } 15509 15510 static void bnxt_shutdown(struct pci_dev *pdev) 15511 { 15512 struct net_device *dev = pci_get_drvdata(pdev); 15513 struct bnxt *bp; 15514 15515 if (!dev) 15516 return; 15517 15518 rtnl_lock(); 15519 bp = netdev_priv(dev); 15520 if (!bp) 15521 goto shutdown_exit; 15522 15523 if (netif_running(dev)) 15524 dev_close(dev); 15525 15526 bnxt_clear_int_mode(bp); 15527 pci_disable_device(pdev); 15528 15529 if (system_state == SYSTEM_POWER_OFF) { 15530 pci_wake_from_d3(pdev, bp->wol); 15531 pci_set_power_state(pdev, PCI_D3hot); 15532 } 15533 15534 shutdown_exit: 15535 rtnl_unlock(); 15536 } 15537 15538 #ifdef CONFIG_PM_SLEEP 15539 static int bnxt_suspend(struct device *device) 15540 { 15541 struct net_device *dev = dev_get_drvdata(device); 15542 struct bnxt *bp = netdev_priv(dev); 15543 int rc = 0; 15544 15545 bnxt_ulp_stop(bp); 15546 15547 rtnl_lock(); 15548 if (netif_running(dev)) { 15549 netif_device_detach(dev); 15550 rc = bnxt_close(dev); 15551 } 15552 bnxt_hwrm_func_drv_unrgtr(bp); 15553 pci_disable_device(bp->pdev); 15554 bnxt_free_ctx_mem(bp); 15555 rtnl_unlock(); 15556 return rc; 15557 } 15558 15559 static int bnxt_resume(struct device *device) 15560 { 15561 struct net_device *dev = dev_get_drvdata(device); 15562 struct bnxt *bp = netdev_priv(dev); 15563 int rc = 0; 15564 15565 rtnl_lock(); 15566 rc = pci_enable_device(bp->pdev); 15567 if (rc) { 15568 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 15569 rc); 15570 goto resume_exit; 15571 } 15572 pci_set_master(bp->pdev); 15573 if (bnxt_hwrm_ver_get(bp)) { 15574 rc = -ENODEV; 15575 goto resume_exit; 15576 } 15577 rc = bnxt_hwrm_func_reset(bp); 15578 if (rc) { 15579 rc = -EBUSY; 15580 goto resume_exit; 15581 } 15582 15583 rc = bnxt_hwrm_func_qcaps(bp); 15584 if (rc) 15585 goto resume_exit; 15586 15587 bnxt_clear_reservations(bp, true); 15588 15589 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 15590 rc = -ENODEV; 15591 goto resume_exit; 15592 } 15593 15594 bnxt_get_wol_settings(bp); 15595 if (netif_running(dev)) { 15596 rc = bnxt_open(dev); 15597 if (!rc) 15598 netif_device_attach(dev); 15599 } 15600 15601 resume_exit: 15602 rtnl_unlock(); 15603 bnxt_ulp_start(bp, rc); 15604 if (!rc) 15605 bnxt_reenable_sriov(bp); 15606 return rc; 15607 } 15608 15609 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 15610 #define BNXT_PM_OPS (&bnxt_pm_ops) 15611 15612 #else 15613 15614 #define BNXT_PM_OPS NULL 15615 15616 #endif /* CONFIG_PM_SLEEP */ 15617 15618 /** 15619 * bnxt_io_error_detected - called when PCI error is detected 15620 * @pdev: Pointer to PCI device 15621 * @state: The current pci connection state 15622 * 15623 * This function is called after a PCI bus error affecting 15624 * this device has been detected. 15625 */ 15626 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 15627 pci_channel_state_t state) 15628 { 15629 struct net_device *netdev = pci_get_drvdata(pdev); 15630 struct bnxt *bp = netdev_priv(netdev); 15631 bool abort = false; 15632 15633 netdev_info(netdev, "PCI I/O error detected\n"); 15634 15635 bnxt_ulp_stop(bp); 15636 15637 rtnl_lock(); 15638 netif_device_detach(netdev); 15639 15640 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 15641 netdev_err(bp->dev, "Firmware reset already in progress\n"); 15642 abort = true; 15643 } 15644 15645 if (abort || state == pci_channel_io_perm_failure) { 15646 rtnl_unlock(); 15647 return PCI_ERS_RESULT_DISCONNECT; 15648 } 15649 15650 /* Link is not reliable anymore if state is pci_channel_io_frozen 15651 * so we disable bus master to prevent any potential bad DMAs before 15652 * freeing kernel memory. 15653 */ 15654 if (state == pci_channel_io_frozen) { 15655 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 15656 bnxt_fw_fatal_close(bp); 15657 } 15658 15659 if (netif_running(netdev)) 15660 __bnxt_close_nic(bp, true, true); 15661 15662 if (pci_is_enabled(pdev)) 15663 pci_disable_device(pdev); 15664 bnxt_free_ctx_mem(bp); 15665 rtnl_unlock(); 15666 15667 /* Request a slot slot reset. */ 15668 return PCI_ERS_RESULT_NEED_RESET; 15669 } 15670 15671 /** 15672 * bnxt_io_slot_reset - called after the pci bus has been reset. 15673 * @pdev: Pointer to PCI device 15674 * 15675 * Restart the card from scratch, as if from a cold-boot. 15676 * At this point, the card has exprienced a hard reset, 15677 * followed by fixups by BIOS, and has its config space 15678 * set up identically to what it was at cold boot. 15679 */ 15680 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 15681 { 15682 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 15683 struct net_device *netdev = pci_get_drvdata(pdev); 15684 struct bnxt *bp = netdev_priv(netdev); 15685 int retry = 0; 15686 int err = 0; 15687 int off; 15688 15689 netdev_info(bp->dev, "PCI Slot Reset\n"); 15690 15691 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 15692 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) 15693 msleep(900); 15694 15695 rtnl_lock(); 15696 15697 if (pci_enable_device(pdev)) { 15698 dev_err(&pdev->dev, 15699 "Cannot re-enable PCI device after reset.\n"); 15700 } else { 15701 pci_set_master(pdev); 15702 /* Upon fatal error, our device internal logic that latches to 15703 * BAR value is getting reset and will restore only upon 15704 * rewritting the BARs. 15705 * 15706 * As pci_restore_state() does not re-write the BARs if the 15707 * value is same as saved value earlier, driver needs to 15708 * write the BARs to 0 to force restore, in case of fatal error. 15709 */ 15710 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 15711 &bp->state)) { 15712 for (off = PCI_BASE_ADDRESS_0; 15713 off <= PCI_BASE_ADDRESS_5; off += 4) 15714 pci_write_config_dword(bp->pdev, off, 0); 15715 } 15716 pci_restore_state(pdev); 15717 pci_save_state(pdev); 15718 15719 bnxt_inv_fw_health_reg(bp); 15720 bnxt_try_map_fw_health_reg(bp); 15721 15722 /* In some PCIe AER scenarios, firmware may take up to 15723 * 10 seconds to become ready in the worst case. 15724 */ 15725 do { 15726 err = bnxt_try_recover_fw(bp); 15727 if (!err) 15728 break; 15729 retry++; 15730 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 15731 15732 if (err) { 15733 dev_err(&pdev->dev, "Firmware not ready\n"); 15734 goto reset_exit; 15735 } 15736 15737 err = bnxt_hwrm_func_reset(bp); 15738 if (!err) 15739 result = PCI_ERS_RESULT_RECOVERED; 15740 15741 bnxt_ulp_irq_stop(bp); 15742 bnxt_clear_int_mode(bp); 15743 err = bnxt_init_int_mode(bp); 15744 bnxt_ulp_irq_restart(bp, err); 15745 } 15746 15747 reset_exit: 15748 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 15749 bnxt_clear_reservations(bp, true); 15750 rtnl_unlock(); 15751 15752 return result; 15753 } 15754 15755 /** 15756 * bnxt_io_resume - called when traffic can start flowing again. 15757 * @pdev: Pointer to PCI device 15758 * 15759 * This callback is called when the error recovery driver tells 15760 * us that its OK to resume normal operation. 15761 */ 15762 static void bnxt_io_resume(struct pci_dev *pdev) 15763 { 15764 struct net_device *netdev = pci_get_drvdata(pdev); 15765 struct bnxt *bp = netdev_priv(netdev); 15766 int err; 15767 15768 netdev_info(bp->dev, "PCI Slot Resume\n"); 15769 rtnl_lock(); 15770 15771 err = bnxt_hwrm_func_qcaps(bp); 15772 if (!err && netif_running(netdev)) 15773 err = bnxt_open(netdev); 15774 15775 if (!err) 15776 netif_device_attach(netdev); 15777 15778 rtnl_unlock(); 15779 bnxt_ulp_start(bp, err); 15780 if (!err) 15781 bnxt_reenable_sriov(bp); 15782 } 15783 15784 static const struct pci_error_handlers bnxt_err_handler = { 15785 .error_detected = bnxt_io_error_detected, 15786 .slot_reset = bnxt_io_slot_reset, 15787 .resume = bnxt_io_resume 15788 }; 15789 15790 static struct pci_driver bnxt_pci_driver = { 15791 .name = DRV_MODULE_NAME, 15792 .id_table = bnxt_pci_tbl, 15793 .probe = bnxt_init_one, 15794 .remove = bnxt_remove_one, 15795 .shutdown = bnxt_shutdown, 15796 .driver.pm = BNXT_PM_OPS, 15797 .err_handler = &bnxt_err_handler, 15798 #if defined(CONFIG_BNXT_SRIOV) 15799 .sriov_configure = bnxt_sriov_configure, 15800 #endif 15801 }; 15802 15803 static int __init bnxt_init(void) 15804 { 15805 int err; 15806 15807 bnxt_debug_init(); 15808 err = pci_register_driver(&bnxt_pci_driver); 15809 if (err) { 15810 bnxt_debug_exit(); 15811 return err; 15812 } 15813 15814 return 0; 15815 } 15816 15817 static void __exit bnxt_exit(void) 15818 { 15819 pci_unregister_driver(&bnxt_pci_driver); 15820 if (bnxt_pf_wq) 15821 destroy_workqueue(bnxt_pf_wq); 15822 bnxt_debug_exit(); 15823 } 15824 15825 module_init(bnxt_init); 15826 module_exit(bnxt_exit); 15827