1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_queues.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_hwrm.h" 62 #include "bnxt_ulp.h" 63 #include "bnxt_sriov.h" 64 #include "bnxt_ethtool.h" 65 #include "bnxt_dcb.h" 66 #include "bnxt_xdp.h" 67 #include "bnxt_ptp.h" 68 #include "bnxt_vfr.h" 69 #include "bnxt_tc.h" 70 #include "bnxt_devlink.h" 71 #include "bnxt_debugfs.h" 72 #include "bnxt_hwmon.h" 73 74 #define BNXT_TX_TIMEOUT (5 * HZ) 75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 76 NETIF_MSG_TX_ERR) 77 78 MODULE_LICENSE("GPL"); 79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 80 81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 83 #define BNXT_RX_COPY_THRESH 256 84 85 #define BNXT_TX_PUSH_THRESH 164 86 87 /* indexed by enum board_idx */ 88 static const struct { 89 char *name; 90 } board_info[] = { 91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 123 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 124 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 125 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, 126 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 127 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 128 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 129 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 130 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 131 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 132 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 133 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 134 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 135 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 136 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 137 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 138 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 139 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 140 }; 141 142 static const struct pci_device_id bnxt_pci_tbl[] = { 143 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 144 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 145 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 146 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 147 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 148 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 149 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 150 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 151 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 152 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 153 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 154 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 155 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 156 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 157 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 158 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 159 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 160 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 162 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 163 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 164 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 165 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 166 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 167 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 168 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 170 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 171 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 172 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 175 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 176 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 177 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 178 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 179 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 180 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 181 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, 182 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, 183 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, 184 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, 185 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 186 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 187 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 189 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 190 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 192 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 193 #ifdef CONFIG_BNXT_SRIOV 194 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 195 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 196 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 197 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 198 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 199 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 201 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 202 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 203 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 204 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 205 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 206 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 207 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 208 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 209 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 210 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 211 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 212 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 213 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 214 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 215 #endif 216 { 0 } 217 }; 218 219 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 220 221 static const u16 bnxt_vf_req_snif[] = { 222 HWRM_FUNC_CFG, 223 HWRM_FUNC_VF_CFG, 224 HWRM_PORT_PHY_QCFG, 225 HWRM_CFA_L2_FILTER_ALLOC, 226 }; 227 228 static const u16 bnxt_async_events_arr[] = { 229 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 230 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 231 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 232 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 233 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 234 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 235 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 236 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 237 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 238 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 239 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 240 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 241 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 242 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 243 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 244 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 245 }; 246 247 static struct workqueue_struct *bnxt_pf_wq; 248 249 static bool bnxt_vf_pciid(enum board_idx idx) 250 { 251 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 252 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 253 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 254 idx == NETXTREME_E_P5_VF_HV); 255 } 256 257 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 258 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 259 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 260 261 #define BNXT_CP_DB_IRQ_DIS(db) \ 262 writel(DB_CP_IRQ_DIS_FLAGS, db) 263 264 #define BNXT_DB_CQ(db, idx) \ 265 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 266 267 #define BNXT_DB_NQ_P5(db, idx) \ 268 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 269 (db)->doorbell) 270 271 #define BNXT_DB_NQ_P7(db, idx) \ 272 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ 273 DB_RING_IDX(db, idx), (db)->doorbell) 274 275 #define BNXT_DB_CQ_ARM(db, idx) \ 276 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 277 278 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 279 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 280 DB_RING_IDX(db, idx), (db)->doorbell) 281 282 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 283 { 284 if (bp->flags & BNXT_FLAG_CHIP_P7) 285 BNXT_DB_NQ_P7(db, idx); 286 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 287 BNXT_DB_NQ_P5(db, idx); 288 else 289 BNXT_DB_CQ(db, idx); 290 } 291 292 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 293 { 294 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 295 BNXT_DB_NQ_ARM_P5(db, idx); 296 else 297 BNXT_DB_CQ_ARM(db, idx); 298 } 299 300 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 301 { 302 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 303 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 304 DB_RING_IDX(db, idx), db->doorbell); 305 else 306 BNXT_DB_CQ(db, idx); 307 } 308 309 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 310 { 311 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 312 return; 313 314 if (BNXT_PF(bp)) 315 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 316 else 317 schedule_delayed_work(&bp->fw_reset_task, delay); 318 } 319 320 static void __bnxt_queue_sp_work(struct bnxt *bp) 321 { 322 if (BNXT_PF(bp)) 323 queue_work(bnxt_pf_wq, &bp->sp_task); 324 else 325 schedule_work(&bp->sp_task); 326 } 327 328 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 329 { 330 set_bit(event, &bp->sp_event); 331 __bnxt_queue_sp_work(bp); 332 } 333 334 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 335 { 336 if (!rxr->bnapi->in_reset) { 337 rxr->bnapi->in_reset = true; 338 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 339 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 340 else 341 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 342 __bnxt_queue_sp_work(bp); 343 } 344 rxr->rx_next_cons = 0xffff; 345 } 346 347 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 348 u16 curr) 349 { 350 struct bnxt_napi *bnapi = txr->bnapi; 351 352 if (bnapi->tx_fault) 353 return; 354 355 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 356 txr->txq_index, txr->tx_hw_cons, 357 txr->tx_cons, txr->tx_prod, curr); 358 WARN_ON_ONCE(1); 359 bnapi->tx_fault = 1; 360 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 361 } 362 363 const u16 bnxt_lhint_arr[] = { 364 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 365 TX_BD_FLAGS_LHINT_512_TO_1023, 366 TX_BD_FLAGS_LHINT_1024_TO_2047, 367 TX_BD_FLAGS_LHINT_1024_TO_2047, 368 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 369 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 370 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 371 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 372 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 373 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 374 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 375 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 376 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 377 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 378 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 379 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 380 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 381 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 382 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 383 }; 384 385 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 386 { 387 struct metadata_dst *md_dst = skb_metadata_dst(skb); 388 389 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 390 return 0; 391 392 return md_dst->u.port_info.port_id; 393 } 394 395 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 396 u16 prod) 397 { 398 /* Sync BD data before updating doorbell */ 399 wmb(); 400 bnxt_db_write(bp, &txr->tx_db, prod); 401 txr->kick_pending = 0; 402 } 403 404 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 405 { 406 struct bnxt *bp = netdev_priv(dev); 407 struct tx_bd *txbd, *txbd0; 408 struct tx_bd_ext *txbd1; 409 struct netdev_queue *txq; 410 int i; 411 dma_addr_t mapping; 412 unsigned int length, pad = 0; 413 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 414 u16 prod, last_frag; 415 struct pci_dev *pdev = bp->pdev; 416 struct bnxt_tx_ring_info *txr; 417 struct bnxt_sw_tx_bd *tx_buf; 418 __le32 lflags = 0; 419 420 i = skb_get_queue_mapping(skb); 421 if (unlikely(i >= bp->tx_nr_rings)) { 422 dev_kfree_skb_any(skb); 423 dev_core_stats_tx_dropped_inc(dev); 424 return NETDEV_TX_OK; 425 } 426 427 txq = netdev_get_tx_queue(dev, i); 428 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 429 prod = txr->tx_prod; 430 431 free_size = bnxt_tx_avail(bp, txr); 432 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 433 /* We must have raced with NAPI cleanup */ 434 if (net_ratelimit() && txr->kick_pending) 435 netif_warn(bp, tx_err, dev, 436 "bnxt: ring busy w/ flush pending!\n"); 437 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 438 bp->tx_wake_thresh)) 439 return NETDEV_TX_BUSY; 440 } 441 442 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 443 goto tx_free; 444 445 length = skb->len; 446 len = skb_headlen(skb); 447 last_frag = skb_shinfo(skb)->nr_frags; 448 449 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 450 451 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 452 tx_buf->skb = skb; 453 tx_buf->nr_frags = last_frag; 454 455 vlan_tag_flags = 0; 456 cfa_action = bnxt_xmit_get_cfa_action(skb); 457 if (skb_vlan_tag_present(skb)) { 458 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 459 skb_vlan_tag_get(skb); 460 /* Currently supports 8021Q, 8021AD vlan offloads 461 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 462 */ 463 if (skb->vlan_proto == htons(ETH_P_8021Q)) 464 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 465 } 466 467 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 468 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 469 470 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && 471 atomic_dec_if_positive(&ptp->tx_avail) >= 0) { 472 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, 473 &ptp->tx_hdr_off)) { 474 if (vlan_tag_flags) 475 ptp->tx_hdr_off += VLAN_HLEN; 476 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 477 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 478 } else { 479 atomic_inc(&bp->ptp_cfg->tx_avail); 480 } 481 } 482 } 483 484 if (unlikely(skb->no_fcs)) 485 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 486 487 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 488 !lflags) { 489 struct tx_push_buffer *tx_push_buf = txr->tx_push; 490 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 491 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 492 void __iomem *db = txr->tx_db.doorbell; 493 void *pdata = tx_push_buf->data; 494 u64 *end; 495 int j, push_len; 496 497 /* Set COAL_NOW to be ready quickly for the next push */ 498 tx_push->tx_bd_len_flags_type = 499 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 500 TX_BD_TYPE_LONG_TX_BD | 501 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 502 TX_BD_FLAGS_COAL_NOW | 503 TX_BD_FLAGS_PACKET_END | 504 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 505 506 if (skb->ip_summed == CHECKSUM_PARTIAL) 507 tx_push1->tx_bd_hsize_lflags = 508 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 509 else 510 tx_push1->tx_bd_hsize_lflags = 0; 511 512 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 513 tx_push1->tx_bd_cfa_action = 514 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 515 516 end = pdata + length; 517 end = PTR_ALIGN(end, 8) - 1; 518 *end = 0; 519 520 skb_copy_from_linear_data(skb, pdata, len); 521 pdata += len; 522 for (j = 0; j < last_frag; j++) { 523 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 524 void *fptr; 525 526 fptr = skb_frag_address_safe(frag); 527 if (!fptr) 528 goto normal_tx; 529 530 memcpy(pdata, fptr, skb_frag_size(frag)); 531 pdata += skb_frag_size(frag); 532 } 533 534 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 535 txbd->tx_bd_haddr = txr->data_mapping; 536 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 537 prod = NEXT_TX(prod); 538 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 539 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 540 memcpy(txbd, tx_push1, sizeof(*txbd)); 541 prod = NEXT_TX(prod); 542 tx_push->doorbell = 543 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 544 DB_RING_IDX(&txr->tx_db, prod)); 545 WRITE_ONCE(txr->tx_prod, prod); 546 547 tx_buf->is_push = 1; 548 netdev_tx_sent_queue(txq, skb->len); 549 wmb(); /* Sync is_push and byte queue before pushing data */ 550 551 push_len = (length + sizeof(*tx_push) + 7) / 8; 552 if (push_len > 16) { 553 __iowrite64_copy(db, tx_push_buf, 16); 554 __iowrite32_copy(db + 4, tx_push_buf + 1, 555 (push_len - 16) << 1); 556 } else { 557 __iowrite64_copy(db, tx_push_buf, push_len); 558 } 559 560 goto tx_done; 561 } 562 563 normal_tx: 564 if (length < BNXT_MIN_PKT_SIZE) { 565 pad = BNXT_MIN_PKT_SIZE - length; 566 if (skb_pad(skb, pad)) 567 /* SKB already freed. */ 568 goto tx_kick_pending; 569 length = BNXT_MIN_PKT_SIZE; 570 } 571 572 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 573 574 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 575 goto tx_free; 576 577 dma_unmap_addr_set(tx_buf, mapping, mapping); 578 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 579 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 580 581 txbd->tx_bd_haddr = cpu_to_le64(mapping); 582 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 583 584 prod = NEXT_TX(prod); 585 txbd1 = (struct tx_bd_ext *) 586 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 587 588 txbd1->tx_bd_hsize_lflags = lflags; 589 if (skb_is_gso(skb)) { 590 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); 591 u32 hdr_len; 592 593 if (skb->encapsulation) { 594 if (udp_gso) 595 hdr_len = skb_inner_transport_offset(skb) + 596 sizeof(struct udphdr); 597 else 598 hdr_len = skb_inner_tcp_all_headers(skb); 599 } else if (udp_gso) { 600 hdr_len = skb_transport_offset(skb) + 601 sizeof(struct udphdr); 602 } else { 603 hdr_len = skb_tcp_all_headers(skb); 604 } 605 606 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 607 TX_BD_FLAGS_T_IPID | 608 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 609 length = skb_shinfo(skb)->gso_size; 610 txbd1->tx_bd_mss = cpu_to_le32(length); 611 length += hdr_len; 612 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 613 txbd1->tx_bd_hsize_lflags |= 614 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 615 txbd1->tx_bd_mss = 0; 616 } 617 618 length >>= 9; 619 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 620 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 621 skb->len); 622 i = 0; 623 goto tx_dma_error; 624 } 625 flags |= bnxt_lhint_arr[length]; 626 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 627 628 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 629 txbd1->tx_bd_cfa_action = 630 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 631 txbd0 = txbd; 632 for (i = 0; i < last_frag; i++) { 633 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 634 635 prod = NEXT_TX(prod); 636 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 637 638 len = skb_frag_size(frag); 639 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 640 DMA_TO_DEVICE); 641 642 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 643 goto tx_dma_error; 644 645 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 646 dma_unmap_addr_set(tx_buf, mapping, mapping); 647 648 txbd->tx_bd_haddr = cpu_to_le64(mapping); 649 650 flags = len << TX_BD_LEN_SHIFT; 651 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 652 } 653 654 flags &= ~TX_BD_LEN; 655 txbd->tx_bd_len_flags_type = 656 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 657 TX_BD_FLAGS_PACKET_END); 658 659 netdev_tx_sent_queue(txq, skb->len); 660 661 skb_tx_timestamp(skb); 662 663 prod = NEXT_TX(prod); 664 WRITE_ONCE(txr->tx_prod, prod); 665 666 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 667 bnxt_txr_db_kick(bp, txr, prod); 668 } else { 669 if (free_size >= bp->tx_wake_thresh) 670 txbd0->tx_bd_len_flags_type |= 671 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 672 txr->kick_pending = 1; 673 } 674 675 tx_done: 676 677 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 678 if (netdev_xmit_more() && !tx_buf->is_push) { 679 txbd0->tx_bd_len_flags_type &= 680 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); 681 bnxt_txr_db_kick(bp, txr, prod); 682 } 683 684 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 685 bp->tx_wake_thresh); 686 } 687 return NETDEV_TX_OK; 688 689 tx_dma_error: 690 if (BNXT_TX_PTP_IS_SET(lflags)) 691 atomic_inc(&bp->ptp_cfg->tx_avail); 692 693 last_frag = i; 694 695 /* start back at beginning and unmap skb */ 696 prod = txr->tx_prod; 697 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 698 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 699 skb_headlen(skb), DMA_TO_DEVICE); 700 prod = NEXT_TX(prod); 701 702 /* unmap remaining mapped pages */ 703 for (i = 0; i < last_frag; i++) { 704 prod = NEXT_TX(prod); 705 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 706 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 707 skb_frag_size(&skb_shinfo(skb)->frags[i]), 708 DMA_TO_DEVICE); 709 } 710 711 tx_free: 712 dev_kfree_skb_any(skb); 713 tx_kick_pending: 714 if (txr->kick_pending) 715 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 716 txr->tx_buf_ring[txr->tx_prod].skb = NULL; 717 dev_core_stats_tx_dropped_inc(dev); 718 return NETDEV_TX_OK; 719 } 720 721 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 722 int budget) 723 { 724 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 725 struct pci_dev *pdev = bp->pdev; 726 u16 hw_cons = txr->tx_hw_cons; 727 unsigned int tx_bytes = 0; 728 u16 cons = txr->tx_cons; 729 int tx_pkts = 0; 730 731 while (RING_TX(bp, cons) != hw_cons) { 732 struct bnxt_sw_tx_bd *tx_buf; 733 struct sk_buff *skb; 734 int j, last; 735 736 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 737 cons = NEXT_TX(cons); 738 skb = tx_buf->skb; 739 tx_buf->skb = NULL; 740 741 if (unlikely(!skb)) { 742 bnxt_sched_reset_txr(bp, txr, cons); 743 return; 744 } 745 746 tx_pkts++; 747 tx_bytes += skb->len; 748 749 if (tx_buf->is_push) { 750 tx_buf->is_push = 0; 751 goto next_tx_int; 752 } 753 754 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 755 skb_headlen(skb), DMA_TO_DEVICE); 756 last = tx_buf->nr_frags; 757 758 for (j = 0; j < last; j++) { 759 cons = NEXT_TX(cons); 760 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 761 dma_unmap_page( 762 &pdev->dev, 763 dma_unmap_addr(tx_buf, mapping), 764 skb_frag_size(&skb_shinfo(skb)->frags[j]), 765 DMA_TO_DEVICE); 766 } 767 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 768 if (BNXT_CHIP_P5(bp)) { 769 /* PTP worker takes ownership of the skb */ 770 if (!bnxt_get_tx_ts_p5(bp, skb)) 771 skb = NULL; 772 else 773 atomic_inc(&bp->ptp_cfg->tx_avail); 774 } 775 } 776 777 next_tx_int: 778 cons = NEXT_TX(cons); 779 780 dev_consume_skb_any(skb); 781 } 782 783 WRITE_ONCE(txr->tx_cons, cons); 784 785 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 786 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 787 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 788 } 789 790 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 791 { 792 struct bnxt_tx_ring_info *txr; 793 int i; 794 795 bnxt_for_each_napi_tx(i, bnapi, txr) { 796 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) 797 __bnxt_tx_int(bp, txr, budget); 798 } 799 bnapi->events &= ~BNXT_TX_CMP_EVENT; 800 } 801 802 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 803 struct bnxt_rx_ring_info *rxr, 804 unsigned int *offset, 805 gfp_t gfp) 806 { 807 struct page *page; 808 809 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 810 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 811 BNXT_RX_PAGE_SIZE); 812 } else { 813 page = page_pool_dev_alloc_pages(rxr->page_pool); 814 *offset = 0; 815 } 816 if (!page) 817 return NULL; 818 819 *mapping = page_pool_get_dma_addr(page) + *offset; 820 return page; 821 } 822 823 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 824 gfp_t gfp) 825 { 826 u8 *data; 827 struct pci_dev *pdev = bp->pdev; 828 829 if (gfp == GFP_ATOMIC) 830 data = napi_alloc_frag(bp->rx_buf_size); 831 else 832 data = netdev_alloc_frag(bp->rx_buf_size); 833 if (!data) 834 return NULL; 835 836 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 837 bp->rx_buf_use_size, bp->rx_dir, 838 DMA_ATTR_WEAK_ORDERING); 839 840 if (dma_mapping_error(&pdev->dev, *mapping)) { 841 skb_free_frag(data); 842 data = NULL; 843 } 844 return data; 845 } 846 847 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 848 u16 prod, gfp_t gfp) 849 { 850 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 851 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 852 dma_addr_t mapping; 853 854 if (BNXT_RX_PAGE_MODE(bp)) { 855 unsigned int offset; 856 struct page *page = 857 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 858 859 if (!page) 860 return -ENOMEM; 861 862 mapping += bp->rx_dma_offset; 863 rx_buf->data = page; 864 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 865 } else { 866 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); 867 868 if (!data) 869 return -ENOMEM; 870 871 rx_buf->data = data; 872 rx_buf->data_ptr = data + bp->rx_offset; 873 } 874 rx_buf->mapping = mapping; 875 876 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 877 return 0; 878 } 879 880 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 881 { 882 u16 prod = rxr->rx_prod; 883 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 884 struct bnxt *bp = rxr->bnapi->bp; 885 struct rx_bd *cons_bd, *prod_bd; 886 887 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 888 cons_rx_buf = &rxr->rx_buf_ring[cons]; 889 890 prod_rx_buf->data = data; 891 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 892 893 prod_rx_buf->mapping = cons_rx_buf->mapping; 894 895 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 896 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 897 898 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 899 } 900 901 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 902 { 903 u16 next, max = rxr->rx_agg_bmap_size; 904 905 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 906 if (next >= max) 907 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 908 return next; 909 } 910 911 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 912 struct bnxt_rx_ring_info *rxr, 913 u16 prod, gfp_t gfp) 914 { 915 struct rx_bd *rxbd = 916 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 917 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 918 struct page *page; 919 dma_addr_t mapping; 920 u16 sw_prod = rxr->rx_sw_agg_prod; 921 unsigned int offset = 0; 922 923 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 924 925 if (!page) 926 return -ENOMEM; 927 928 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 929 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 930 931 __set_bit(sw_prod, rxr->rx_agg_bmap); 932 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 933 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 934 935 rx_agg_buf->page = page; 936 rx_agg_buf->offset = offset; 937 rx_agg_buf->mapping = mapping; 938 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 939 rxbd->rx_bd_opaque = sw_prod; 940 return 0; 941 } 942 943 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 944 struct bnxt_cp_ring_info *cpr, 945 u16 cp_cons, u16 curr) 946 { 947 struct rx_agg_cmp *agg; 948 949 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 950 agg = (struct rx_agg_cmp *) 951 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 952 return agg; 953 } 954 955 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 956 struct bnxt_rx_ring_info *rxr, 957 u16 agg_id, u16 curr) 958 { 959 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 960 961 return &tpa_info->agg_arr[curr]; 962 } 963 964 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 965 u16 start, u32 agg_bufs, bool tpa) 966 { 967 struct bnxt_napi *bnapi = cpr->bnapi; 968 struct bnxt *bp = bnapi->bp; 969 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 970 u16 prod = rxr->rx_agg_prod; 971 u16 sw_prod = rxr->rx_sw_agg_prod; 972 bool p5_tpa = false; 973 u32 i; 974 975 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 976 p5_tpa = true; 977 978 for (i = 0; i < agg_bufs; i++) { 979 u16 cons; 980 struct rx_agg_cmp *agg; 981 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 982 struct rx_bd *prod_bd; 983 struct page *page; 984 985 if (p5_tpa) 986 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 987 else 988 agg = bnxt_get_agg(bp, cpr, idx, start + i); 989 cons = agg->rx_agg_cmp_opaque; 990 __clear_bit(cons, rxr->rx_agg_bmap); 991 992 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 993 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 994 995 __set_bit(sw_prod, rxr->rx_agg_bmap); 996 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 997 cons_rx_buf = &rxr->rx_agg_ring[cons]; 998 999 /* It is possible for sw_prod to be equal to cons, so 1000 * set cons_rx_buf->page to NULL first. 1001 */ 1002 page = cons_rx_buf->page; 1003 cons_rx_buf->page = NULL; 1004 prod_rx_buf->page = page; 1005 prod_rx_buf->offset = cons_rx_buf->offset; 1006 1007 prod_rx_buf->mapping = cons_rx_buf->mapping; 1008 1009 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1010 1011 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 1012 prod_bd->rx_bd_opaque = sw_prod; 1013 1014 prod = NEXT_RX_AGG(prod); 1015 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1016 } 1017 rxr->rx_agg_prod = prod; 1018 rxr->rx_sw_agg_prod = sw_prod; 1019 } 1020 1021 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 1022 struct bnxt_rx_ring_info *rxr, 1023 u16 cons, void *data, u8 *data_ptr, 1024 dma_addr_t dma_addr, 1025 unsigned int offset_and_len) 1026 { 1027 unsigned int len = offset_and_len & 0xffff; 1028 struct page *page = data; 1029 u16 prod = rxr->rx_prod; 1030 struct sk_buff *skb; 1031 int err; 1032 1033 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1034 if (unlikely(err)) { 1035 bnxt_reuse_rx_data(rxr, cons, data); 1036 return NULL; 1037 } 1038 dma_addr -= bp->rx_dma_offset; 1039 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1040 bp->rx_dir); 1041 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1042 if (!skb) { 1043 page_pool_recycle_direct(rxr->page_pool, page); 1044 return NULL; 1045 } 1046 skb_mark_for_recycle(skb); 1047 skb_reserve(skb, bp->rx_offset); 1048 __skb_put(skb, len); 1049 1050 return skb; 1051 } 1052 1053 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1054 struct bnxt_rx_ring_info *rxr, 1055 u16 cons, void *data, u8 *data_ptr, 1056 dma_addr_t dma_addr, 1057 unsigned int offset_and_len) 1058 { 1059 unsigned int payload = offset_and_len >> 16; 1060 unsigned int len = offset_and_len & 0xffff; 1061 skb_frag_t *frag; 1062 struct page *page = data; 1063 u16 prod = rxr->rx_prod; 1064 struct sk_buff *skb; 1065 int off, err; 1066 1067 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1068 if (unlikely(err)) { 1069 bnxt_reuse_rx_data(rxr, cons, data); 1070 return NULL; 1071 } 1072 dma_addr -= bp->rx_dma_offset; 1073 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1074 bp->rx_dir); 1075 1076 if (unlikely(!payload)) 1077 payload = eth_get_headlen(bp->dev, data_ptr, len); 1078 1079 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1080 if (!skb) { 1081 page_pool_recycle_direct(rxr->page_pool, page); 1082 return NULL; 1083 } 1084 1085 skb_mark_for_recycle(skb); 1086 off = (void *)data_ptr - page_address(page); 1087 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1088 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1089 payload + NET_IP_ALIGN); 1090 1091 frag = &skb_shinfo(skb)->frags[0]; 1092 skb_frag_size_sub(frag, payload); 1093 skb_frag_off_add(frag, payload); 1094 skb->data_len -= payload; 1095 skb->tail += payload; 1096 1097 return skb; 1098 } 1099 1100 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1101 struct bnxt_rx_ring_info *rxr, u16 cons, 1102 void *data, u8 *data_ptr, 1103 dma_addr_t dma_addr, 1104 unsigned int offset_and_len) 1105 { 1106 u16 prod = rxr->rx_prod; 1107 struct sk_buff *skb; 1108 int err; 1109 1110 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1111 if (unlikely(err)) { 1112 bnxt_reuse_rx_data(rxr, cons, data); 1113 return NULL; 1114 } 1115 1116 skb = napi_build_skb(data, bp->rx_buf_size); 1117 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1118 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 1119 if (!skb) { 1120 skb_free_frag(data); 1121 return NULL; 1122 } 1123 1124 skb_reserve(skb, bp->rx_offset); 1125 skb_put(skb, offset_and_len & 0xffff); 1126 return skb; 1127 } 1128 1129 static u32 __bnxt_rx_agg_pages(struct bnxt *bp, 1130 struct bnxt_cp_ring_info *cpr, 1131 struct skb_shared_info *shinfo, 1132 u16 idx, u32 agg_bufs, bool tpa, 1133 struct xdp_buff *xdp) 1134 { 1135 struct bnxt_napi *bnapi = cpr->bnapi; 1136 struct pci_dev *pdev = bp->pdev; 1137 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1138 u16 prod = rxr->rx_agg_prod; 1139 u32 i, total_frag_len = 0; 1140 bool p5_tpa = false; 1141 1142 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1143 p5_tpa = true; 1144 1145 for (i = 0; i < agg_bufs; i++) { 1146 skb_frag_t *frag = &shinfo->frags[i]; 1147 u16 cons, frag_len; 1148 struct rx_agg_cmp *agg; 1149 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1150 struct page *page; 1151 dma_addr_t mapping; 1152 1153 if (p5_tpa) 1154 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1155 else 1156 agg = bnxt_get_agg(bp, cpr, idx, i); 1157 cons = agg->rx_agg_cmp_opaque; 1158 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1159 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1160 1161 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1162 skb_frag_fill_page_desc(frag, cons_rx_buf->page, 1163 cons_rx_buf->offset, frag_len); 1164 shinfo->nr_frags = i + 1; 1165 __clear_bit(cons, rxr->rx_agg_bmap); 1166 1167 /* It is possible for bnxt_alloc_rx_page() to allocate 1168 * a sw_prod index that equals the cons index, so we 1169 * need to clear the cons entry now. 1170 */ 1171 mapping = cons_rx_buf->mapping; 1172 page = cons_rx_buf->page; 1173 cons_rx_buf->page = NULL; 1174 1175 if (xdp && page_is_pfmemalloc(page)) 1176 xdp_buff_set_frag_pfmemalloc(xdp); 1177 1178 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1179 --shinfo->nr_frags; 1180 cons_rx_buf->page = page; 1181 1182 /* Update prod since possibly some pages have been 1183 * allocated already. 1184 */ 1185 rxr->rx_agg_prod = prod; 1186 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1187 return 0; 1188 } 1189 1190 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1191 bp->rx_dir); 1192 1193 total_frag_len += frag_len; 1194 prod = NEXT_RX_AGG(prod); 1195 } 1196 rxr->rx_agg_prod = prod; 1197 return total_frag_len; 1198 } 1199 1200 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, 1201 struct bnxt_cp_ring_info *cpr, 1202 struct sk_buff *skb, u16 idx, 1203 u32 agg_bufs, bool tpa) 1204 { 1205 struct skb_shared_info *shinfo = skb_shinfo(skb); 1206 u32 total_frag_len = 0; 1207 1208 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, 1209 agg_bufs, tpa, NULL); 1210 if (!total_frag_len) { 1211 skb_mark_for_recycle(skb); 1212 dev_kfree_skb(skb); 1213 return NULL; 1214 } 1215 1216 skb->data_len += total_frag_len; 1217 skb->len += total_frag_len; 1218 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; 1219 return skb; 1220 } 1221 1222 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, 1223 struct bnxt_cp_ring_info *cpr, 1224 struct xdp_buff *xdp, u16 idx, 1225 u32 agg_bufs, bool tpa) 1226 { 1227 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1228 u32 total_frag_len = 0; 1229 1230 if (!xdp_buff_has_frags(xdp)) 1231 shinfo->nr_frags = 0; 1232 1233 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, 1234 idx, agg_bufs, tpa, xdp); 1235 if (total_frag_len) { 1236 xdp_buff_set_frags_flag(xdp); 1237 shinfo->nr_frags = agg_bufs; 1238 shinfo->xdp_frags_size = total_frag_len; 1239 } 1240 return total_frag_len; 1241 } 1242 1243 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1244 u8 agg_bufs, u32 *raw_cons) 1245 { 1246 u16 last; 1247 struct rx_agg_cmp *agg; 1248 1249 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1250 last = RING_CMP(*raw_cons); 1251 agg = (struct rx_agg_cmp *) 1252 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1253 return RX_AGG_CMP_VALID(agg, *raw_cons); 1254 } 1255 1256 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1257 unsigned int len, 1258 dma_addr_t mapping) 1259 { 1260 struct bnxt *bp = bnapi->bp; 1261 struct pci_dev *pdev = bp->pdev; 1262 struct sk_buff *skb; 1263 1264 skb = napi_alloc_skb(&bnapi->napi, len); 1265 if (!skb) 1266 return NULL; 1267 1268 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1269 bp->rx_dir); 1270 1271 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1272 len + NET_IP_ALIGN); 1273 1274 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1275 bp->rx_dir); 1276 1277 skb_put(skb, len); 1278 return skb; 1279 } 1280 1281 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1282 u32 *raw_cons, void *cmp) 1283 { 1284 struct rx_cmp *rxcmp = cmp; 1285 u32 tmp_raw_cons = *raw_cons; 1286 u8 cmp_type, agg_bufs = 0; 1287 1288 cmp_type = RX_CMP_TYPE(rxcmp); 1289 1290 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1291 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1292 RX_CMP_AGG_BUFS) >> 1293 RX_CMP_AGG_BUFS_SHIFT; 1294 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1295 struct rx_tpa_end_cmp *tpa_end = cmp; 1296 1297 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1298 return 0; 1299 1300 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1301 } 1302 1303 if (agg_bufs) { 1304 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1305 return -EBUSY; 1306 } 1307 *raw_cons = tmp_raw_cons; 1308 return 0; 1309 } 1310 1311 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1312 { 1313 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1314 u16 idx = agg_id & MAX_TPA_P5_MASK; 1315 1316 if (test_bit(idx, map->agg_idx_bmap)) 1317 idx = find_first_zero_bit(map->agg_idx_bmap, 1318 BNXT_AGG_IDX_BMAP_SIZE); 1319 __set_bit(idx, map->agg_idx_bmap); 1320 map->agg_id_tbl[agg_id] = idx; 1321 return idx; 1322 } 1323 1324 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1325 { 1326 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1327 1328 __clear_bit(idx, map->agg_idx_bmap); 1329 } 1330 1331 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1332 { 1333 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1334 1335 return map->agg_id_tbl[agg_id]; 1336 } 1337 1338 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, 1339 struct rx_tpa_start_cmp *tpa_start, 1340 struct rx_tpa_start_cmp_ext *tpa_start1) 1341 { 1342 tpa_info->cfa_code_valid = 1; 1343 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1344 tpa_info->vlan_valid = 0; 1345 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1346 tpa_info->vlan_valid = 1; 1347 tpa_info->metadata = 1348 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1349 } 1350 } 1351 1352 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, 1353 struct rx_tpa_start_cmp *tpa_start, 1354 struct rx_tpa_start_cmp_ext *tpa_start1) 1355 { 1356 tpa_info->vlan_valid = 0; 1357 if (TPA_START_VLAN_VALID(tpa_start)) { 1358 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 1359 u32 vlan_proto = ETH_P_8021Q; 1360 1361 tpa_info->vlan_valid = 1; 1362 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 1363 vlan_proto = ETH_P_8021AD; 1364 tpa_info->metadata = vlan_proto << 16 | 1365 TPA_START_METADATA0_TCI(tpa_start1); 1366 } 1367 } 1368 1369 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1370 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 1371 struct rx_tpa_start_cmp_ext *tpa_start1) 1372 { 1373 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1374 struct bnxt_tpa_info *tpa_info; 1375 u16 cons, prod, agg_id; 1376 struct rx_bd *prod_bd; 1377 dma_addr_t mapping; 1378 1379 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1380 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1381 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1382 } else { 1383 agg_id = TPA_START_AGG_ID(tpa_start); 1384 } 1385 cons = tpa_start->rx_tpa_start_cmp_opaque; 1386 prod = rxr->rx_prod; 1387 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1388 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1389 tpa_info = &rxr->rx_tpa[agg_id]; 1390 1391 if (unlikely(cons != rxr->rx_next_cons || 1392 TPA_START_ERROR(tpa_start))) { 1393 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1394 cons, rxr->rx_next_cons, 1395 TPA_START_ERROR_CODE(tpa_start1)); 1396 bnxt_sched_reset_rxr(bp, rxr); 1397 return; 1398 } 1399 prod_rx_buf->data = tpa_info->data; 1400 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1401 1402 mapping = tpa_info->mapping; 1403 prod_rx_buf->mapping = mapping; 1404 1405 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1406 1407 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1408 1409 tpa_info->data = cons_rx_buf->data; 1410 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1411 cons_rx_buf->data = NULL; 1412 tpa_info->mapping = cons_rx_buf->mapping; 1413 1414 tpa_info->len = 1415 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1416 RX_TPA_START_CMP_LEN_SHIFT; 1417 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1418 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1419 tpa_info->gso_type = SKB_GSO_TCPV4; 1420 if (TPA_START_IS_IPV6(tpa_start1)) 1421 tpa_info->gso_type = SKB_GSO_TCPV6; 1422 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1423 else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && 1424 TPA_START_HASH_TYPE(tpa_start) == 3) 1425 tpa_info->gso_type = SKB_GSO_TCPV6; 1426 tpa_info->rss_hash = 1427 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1428 } else { 1429 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1430 tpa_info->gso_type = 0; 1431 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1432 } 1433 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1434 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1435 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 1436 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); 1437 else 1438 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 1439 tpa_info->agg_count = 0; 1440 1441 rxr->rx_prod = NEXT_RX(prod); 1442 cons = RING_RX(bp, NEXT_RX(cons)); 1443 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1444 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1445 1446 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1447 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1448 cons_rx_buf->data = NULL; 1449 } 1450 1451 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1452 { 1453 if (agg_bufs) 1454 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1455 } 1456 1457 #ifdef CONFIG_INET 1458 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1459 { 1460 struct udphdr *uh = NULL; 1461 1462 if (ip_proto == htons(ETH_P_IP)) { 1463 struct iphdr *iph = (struct iphdr *)skb->data; 1464 1465 if (iph->protocol == IPPROTO_UDP) 1466 uh = (struct udphdr *)(iph + 1); 1467 } else { 1468 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1469 1470 if (iph->nexthdr == IPPROTO_UDP) 1471 uh = (struct udphdr *)(iph + 1); 1472 } 1473 if (uh) { 1474 if (uh->check) 1475 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1476 else 1477 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1478 } 1479 } 1480 #endif 1481 1482 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1483 int payload_off, int tcp_ts, 1484 struct sk_buff *skb) 1485 { 1486 #ifdef CONFIG_INET 1487 struct tcphdr *th; 1488 int len, nw_off; 1489 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1490 u32 hdr_info = tpa_info->hdr_info; 1491 bool loopback = false; 1492 1493 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1494 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1495 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1496 1497 /* If the packet is an internal loopback packet, the offsets will 1498 * have an extra 4 bytes. 1499 */ 1500 if (inner_mac_off == 4) { 1501 loopback = true; 1502 } else if (inner_mac_off > 4) { 1503 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1504 ETH_HLEN - 2)); 1505 1506 /* We only support inner iPv4/ipv6. If we don't see the 1507 * correct protocol ID, it must be a loopback packet where 1508 * the offsets are off by 4. 1509 */ 1510 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1511 loopback = true; 1512 } 1513 if (loopback) { 1514 /* internal loopback packet, subtract all offsets by 4 */ 1515 inner_ip_off -= 4; 1516 inner_mac_off -= 4; 1517 outer_ip_off -= 4; 1518 } 1519 1520 nw_off = inner_ip_off - ETH_HLEN; 1521 skb_set_network_header(skb, nw_off); 1522 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1523 struct ipv6hdr *iph = ipv6_hdr(skb); 1524 1525 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1526 len = skb->len - skb_transport_offset(skb); 1527 th = tcp_hdr(skb); 1528 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1529 } else { 1530 struct iphdr *iph = ip_hdr(skb); 1531 1532 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1533 len = skb->len - skb_transport_offset(skb); 1534 th = tcp_hdr(skb); 1535 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1536 } 1537 1538 if (inner_mac_off) { /* tunnel */ 1539 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1540 ETH_HLEN - 2)); 1541 1542 bnxt_gro_tunnel(skb, proto); 1543 } 1544 #endif 1545 return skb; 1546 } 1547 1548 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1549 int payload_off, int tcp_ts, 1550 struct sk_buff *skb) 1551 { 1552 #ifdef CONFIG_INET 1553 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1554 u32 hdr_info = tpa_info->hdr_info; 1555 int iphdr_len, nw_off; 1556 1557 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1558 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1559 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1560 1561 nw_off = inner_ip_off - ETH_HLEN; 1562 skb_set_network_header(skb, nw_off); 1563 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1564 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1565 skb_set_transport_header(skb, nw_off + iphdr_len); 1566 1567 if (inner_mac_off) { /* tunnel */ 1568 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1569 ETH_HLEN - 2)); 1570 1571 bnxt_gro_tunnel(skb, proto); 1572 } 1573 #endif 1574 return skb; 1575 } 1576 1577 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1578 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1579 1580 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1581 int payload_off, int tcp_ts, 1582 struct sk_buff *skb) 1583 { 1584 #ifdef CONFIG_INET 1585 struct tcphdr *th; 1586 int len, nw_off, tcp_opt_len = 0; 1587 1588 if (tcp_ts) 1589 tcp_opt_len = 12; 1590 1591 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1592 struct iphdr *iph; 1593 1594 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1595 ETH_HLEN; 1596 skb_set_network_header(skb, nw_off); 1597 iph = ip_hdr(skb); 1598 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1599 len = skb->len - skb_transport_offset(skb); 1600 th = tcp_hdr(skb); 1601 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1602 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1603 struct ipv6hdr *iph; 1604 1605 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1606 ETH_HLEN; 1607 skb_set_network_header(skb, nw_off); 1608 iph = ipv6_hdr(skb); 1609 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1610 len = skb->len - skb_transport_offset(skb); 1611 th = tcp_hdr(skb); 1612 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1613 } else { 1614 dev_kfree_skb_any(skb); 1615 return NULL; 1616 } 1617 1618 if (nw_off) /* tunnel */ 1619 bnxt_gro_tunnel(skb, skb->protocol); 1620 #endif 1621 return skb; 1622 } 1623 1624 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1625 struct bnxt_tpa_info *tpa_info, 1626 struct rx_tpa_end_cmp *tpa_end, 1627 struct rx_tpa_end_cmp_ext *tpa_end1, 1628 struct sk_buff *skb) 1629 { 1630 #ifdef CONFIG_INET 1631 int payload_off; 1632 u16 segs; 1633 1634 segs = TPA_END_TPA_SEGS(tpa_end); 1635 if (segs == 1) 1636 return skb; 1637 1638 NAPI_GRO_CB(skb)->count = segs; 1639 skb_shinfo(skb)->gso_size = 1640 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1641 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1642 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1643 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1644 else 1645 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1646 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1647 if (likely(skb)) 1648 tcp_gro_complete(skb); 1649 #endif 1650 return skb; 1651 } 1652 1653 /* Given the cfa_code of a received packet determine which 1654 * netdev (vf-rep or PF) the packet is destined to. 1655 */ 1656 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1657 { 1658 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1659 1660 /* if vf-rep dev is NULL, the must belongs to the PF */ 1661 return dev ? dev : bp->dev; 1662 } 1663 1664 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1665 struct bnxt_cp_ring_info *cpr, 1666 u32 *raw_cons, 1667 struct rx_tpa_end_cmp *tpa_end, 1668 struct rx_tpa_end_cmp_ext *tpa_end1, 1669 u8 *event) 1670 { 1671 struct bnxt_napi *bnapi = cpr->bnapi; 1672 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1673 struct net_device *dev = bp->dev; 1674 u8 *data_ptr, agg_bufs; 1675 unsigned int len; 1676 struct bnxt_tpa_info *tpa_info; 1677 dma_addr_t mapping; 1678 struct sk_buff *skb; 1679 u16 idx = 0, agg_id; 1680 void *data; 1681 bool gro; 1682 1683 if (unlikely(bnapi->in_reset)) { 1684 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1685 1686 if (rc < 0) 1687 return ERR_PTR(-EBUSY); 1688 return NULL; 1689 } 1690 1691 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1692 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1693 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1694 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1695 tpa_info = &rxr->rx_tpa[agg_id]; 1696 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1697 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1698 agg_bufs, tpa_info->agg_count); 1699 agg_bufs = tpa_info->agg_count; 1700 } 1701 tpa_info->agg_count = 0; 1702 *event |= BNXT_AGG_EVENT; 1703 bnxt_free_agg_idx(rxr, agg_id); 1704 idx = agg_id; 1705 gro = !!(bp->flags & BNXT_FLAG_GRO); 1706 } else { 1707 agg_id = TPA_END_AGG_ID(tpa_end); 1708 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1709 tpa_info = &rxr->rx_tpa[agg_id]; 1710 idx = RING_CMP(*raw_cons); 1711 if (agg_bufs) { 1712 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1713 return ERR_PTR(-EBUSY); 1714 1715 *event |= BNXT_AGG_EVENT; 1716 idx = NEXT_CMP(idx); 1717 } 1718 gro = !!TPA_END_GRO(tpa_end); 1719 } 1720 data = tpa_info->data; 1721 data_ptr = tpa_info->data_ptr; 1722 prefetch(data_ptr); 1723 len = tpa_info->len; 1724 mapping = tpa_info->mapping; 1725 1726 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1727 bnxt_abort_tpa(cpr, idx, agg_bufs); 1728 if (agg_bufs > MAX_SKB_FRAGS) 1729 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1730 agg_bufs, (int)MAX_SKB_FRAGS); 1731 return NULL; 1732 } 1733 1734 if (len <= bp->rx_copy_thresh) { 1735 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1736 if (!skb) { 1737 bnxt_abort_tpa(cpr, idx, agg_bufs); 1738 cpr->sw_stats.rx.rx_oom_discards += 1; 1739 return NULL; 1740 } 1741 } else { 1742 u8 *new_data; 1743 dma_addr_t new_mapping; 1744 1745 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); 1746 if (!new_data) { 1747 bnxt_abort_tpa(cpr, idx, agg_bufs); 1748 cpr->sw_stats.rx.rx_oom_discards += 1; 1749 return NULL; 1750 } 1751 1752 tpa_info->data = new_data; 1753 tpa_info->data_ptr = new_data + bp->rx_offset; 1754 tpa_info->mapping = new_mapping; 1755 1756 skb = napi_build_skb(data, bp->rx_buf_size); 1757 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1758 bp->rx_buf_use_size, bp->rx_dir, 1759 DMA_ATTR_WEAK_ORDERING); 1760 1761 if (!skb) { 1762 skb_free_frag(data); 1763 bnxt_abort_tpa(cpr, idx, agg_bufs); 1764 cpr->sw_stats.rx.rx_oom_discards += 1; 1765 return NULL; 1766 } 1767 skb_reserve(skb, bp->rx_offset); 1768 skb_put(skb, len); 1769 } 1770 1771 if (agg_bufs) { 1772 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); 1773 if (!skb) { 1774 /* Page reuse already handled by bnxt_rx_pages(). */ 1775 cpr->sw_stats.rx.rx_oom_discards += 1; 1776 return NULL; 1777 } 1778 } 1779 1780 if (tpa_info->cfa_code_valid) 1781 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); 1782 skb->protocol = eth_type_trans(skb, dev); 1783 1784 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1785 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1786 1787 if (tpa_info->vlan_valid && 1788 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1789 __be16 vlan_proto = htons(tpa_info->metadata >> 1790 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1791 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1792 1793 if (eth_type_vlan(vlan_proto)) { 1794 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1795 } else { 1796 dev_kfree_skb(skb); 1797 return NULL; 1798 } 1799 } 1800 1801 skb_checksum_none_assert(skb); 1802 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1803 skb->ip_summed = CHECKSUM_UNNECESSARY; 1804 skb->csum_level = 1805 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1806 } 1807 1808 if (gro) 1809 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1810 1811 return skb; 1812 } 1813 1814 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1815 struct rx_agg_cmp *rx_agg) 1816 { 1817 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1818 struct bnxt_tpa_info *tpa_info; 1819 1820 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1821 tpa_info = &rxr->rx_tpa[agg_id]; 1822 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1823 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1824 } 1825 1826 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1827 struct sk_buff *skb) 1828 { 1829 skb_mark_for_recycle(skb); 1830 1831 if (skb->dev != bp->dev) { 1832 /* this packet belongs to a vf-rep */ 1833 bnxt_vf_rep_rx(bp, skb); 1834 return; 1835 } 1836 skb_record_rx_queue(skb, bnapi->index); 1837 napi_gro_receive(&bnapi->napi, skb); 1838 } 1839 1840 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, 1841 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) 1842 { 1843 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 1844 1845 if (BNXT_PTP_RX_TS_VALID(flags)) 1846 goto ts_valid; 1847 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) 1848 return false; 1849 1850 ts_valid: 1851 *cmpl_ts = ts; 1852 return true; 1853 } 1854 1855 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, 1856 struct rx_cmp *rxcmp, 1857 struct rx_cmp_ext *rxcmp1) 1858 { 1859 __be16 vlan_proto; 1860 u16 vtag; 1861 1862 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1863 __le32 flags2 = rxcmp1->rx_cmp_flags2; 1864 u32 meta_data; 1865 1866 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 1867 return skb; 1868 1869 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1870 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1871 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 1872 if (eth_type_vlan(vlan_proto)) 1873 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1874 else 1875 goto vlan_err; 1876 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 1877 if (RX_CMP_VLAN_VALID(rxcmp)) { 1878 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 1879 1880 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 1881 vlan_proto = htons(ETH_P_8021Q); 1882 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 1883 vlan_proto = htons(ETH_P_8021AD); 1884 else 1885 goto vlan_err; 1886 vtag = RX_CMP_METADATA0_TCI(rxcmp1); 1887 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1888 } 1889 } 1890 return skb; 1891 vlan_err: 1892 dev_kfree_skb(skb); 1893 return NULL; 1894 } 1895 1896 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, 1897 struct rx_cmp *rxcmp) 1898 { 1899 u8 ext_op; 1900 1901 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); 1902 switch (ext_op) { 1903 case EXT_OP_INNER_4: 1904 case EXT_OP_OUTER_4: 1905 case EXT_OP_INNFL_3: 1906 case EXT_OP_OUTFL_3: 1907 return PKT_HASH_TYPE_L4; 1908 default: 1909 return PKT_HASH_TYPE_L3; 1910 } 1911 } 1912 1913 /* returns the following: 1914 * 1 - 1 packet successfully received 1915 * 0 - successful TPA_START, packet not completed yet 1916 * -EBUSY - completion ring does not have all the agg buffers yet 1917 * -ENOMEM - packet aborted due to out of memory 1918 * -EIO - packet aborted due to hw error indicated in BD 1919 */ 1920 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1921 u32 *raw_cons, u8 *event) 1922 { 1923 struct bnxt_napi *bnapi = cpr->bnapi; 1924 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1925 struct net_device *dev = bp->dev; 1926 struct rx_cmp *rxcmp; 1927 struct rx_cmp_ext *rxcmp1; 1928 u32 tmp_raw_cons = *raw_cons; 1929 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1930 struct bnxt_sw_rx_bd *rx_buf; 1931 unsigned int len; 1932 u8 *data_ptr, agg_bufs, cmp_type; 1933 bool xdp_active = false; 1934 dma_addr_t dma_addr; 1935 struct sk_buff *skb; 1936 struct xdp_buff xdp; 1937 u32 flags, misc; 1938 u32 cmpl_ts; 1939 void *data; 1940 int rc = 0; 1941 1942 rxcmp = (struct rx_cmp *) 1943 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1944 1945 cmp_type = RX_CMP_TYPE(rxcmp); 1946 1947 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1948 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1949 goto next_rx_no_prod_no_len; 1950 } 1951 1952 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1953 cp_cons = RING_CMP(tmp_raw_cons); 1954 rxcmp1 = (struct rx_cmp_ext *) 1955 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1956 1957 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1958 return -EBUSY; 1959 1960 /* The valid test of the entry must be done first before 1961 * reading any further. 1962 */ 1963 dma_rmb(); 1964 prod = rxr->rx_prod; 1965 1966 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 1967 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 1968 bnxt_tpa_start(bp, rxr, cmp_type, 1969 (struct rx_tpa_start_cmp *)rxcmp, 1970 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1971 1972 *event |= BNXT_RX_EVENT; 1973 goto next_rx_no_prod_no_len; 1974 1975 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1976 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1977 (struct rx_tpa_end_cmp *)rxcmp, 1978 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1979 1980 if (IS_ERR(skb)) 1981 return -EBUSY; 1982 1983 rc = -ENOMEM; 1984 if (likely(skb)) { 1985 bnxt_deliver_skb(bp, bnapi, skb); 1986 rc = 1; 1987 } 1988 *event |= BNXT_RX_EVENT; 1989 goto next_rx_no_prod_no_len; 1990 } 1991 1992 cons = rxcmp->rx_cmp_opaque; 1993 if (unlikely(cons != rxr->rx_next_cons)) { 1994 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 1995 1996 /* 0xffff is forced error, don't print it */ 1997 if (rxr->rx_next_cons != 0xffff) 1998 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1999 cons, rxr->rx_next_cons); 2000 bnxt_sched_reset_rxr(bp, rxr); 2001 if (rc1) 2002 return rc1; 2003 goto next_rx_no_prod_no_len; 2004 } 2005 rx_buf = &rxr->rx_buf_ring[cons]; 2006 data = rx_buf->data; 2007 data_ptr = rx_buf->data_ptr; 2008 prefetch(data_ptr); 2009 2010 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 2011 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 2012 2013 if (agg_bufs) { 2014 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 2015 return -EBUSY; 2016 2017 cp_cons = NEXT_CMP(cp_cons); 2018 *event |= BNXT_AGG_EVENT; 2019 } 2020 *event |= BNXT_RX_EVENT; 2021 2022 rx_buf->data = NULL; 2023 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 2024 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 2025 2026 bnxt_reuse_rx_data(rxr, cons, data); 2027 if (agg_bufs) 2028 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 2029 false); 2030 2031 rc = -EIO; 2032 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 2033 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; 2034 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 2035 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 2036 netdev_warn_once(bp->dev, "RX buffer error %x\n", 2037 rx_err); 2038 bnxt_sched_reset_rxr(bp, rxr); 2039 } 2040 } 2041 goto next_rx_no_len; 2042 } 2043 2044 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 2045 len = flags >> RX_CMP_LEN_SHIFT; 2046 dma_addr = rx_buf->mapping; 2047 2048 if (bnxt_xdp_attached(bp, rxr)) { 2049 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 2050 if (agg_bufs) { 2051 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, 2052 cp_cons, agg_bufs, 2053 false); 2054 if (!frag_len) { 2055 cpr->sw_stats.rx.rx_oom_discards += 1; 2056 rc = -ENOMEM; 2057 goto next_rx; 2058 } 2059 } 2060 xdp_active = true; 2061 } 2062 2063 if (xdp_active) { 2064 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { 2065 rc = 1; 2066 goto next_rx; 2067 } 2068 } 2069 2070 if (len <= bp->rx_copy_thresh) { 2071 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 2072 bnxt_reuse_rx_data(rxr, cons, data); 2073 if (!skb) { 2074 if (agg_bufs) { 2075 if (!xdp_active) 2076 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 2077 agg_bufs, false); 2078 else 2079 bnxt_xdp_buff_frags_free(rxr, &xdp); 2080 } 2081 cpr->sw_stats.rx.rx_oom_discards += 1; 2082 rc = -ENOMEM; 2083 goto next_rx; 2084 } 2085 } else { 2086 u32 payload; 2087 2088 if (rx_buf->data_ptr == data_ptr) 2089 payload = misc & RX_CMP_PAYLOAD_OFFSET; 2090 else 2091 payload = 0; 2092 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 2093 payload | len); 2094 if (!skb) { 2095 cpr->sw_stats.rx.rx_oom_discards += 1; 2096 rc = -ENOMEM; 2097 goto next_rx; 2098 } 2099 } 2100 2101 if (agg_bufs) { 2102 if (!xdp_active) { 2103 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); 2104 if (!skb) { 2105 cpr->sw_stats.rx.rx_oom_discards += 1; 2106 rc = -ENOMEM; 2107 goto next_rx; 2108 } 2109 } else { 2110 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); 2111 if (!skb) { 2112 /* we should be able to free the old skb here */ 2113 bnxt_xdp_buff_frags_free(rxr, &xdp); 2114 cpr->sw_stats.rx.rx_oom_discards += 1; 2115 rc = -ENOMEM; 2116 goto next_rx; 2117 } 2118 } 2119 } 2120 2121 if (RX_CMP_HASH_VALID(rxcmp)) { 2122 enum pkt_hash_types type; 2123 2124 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2125 type = bnxt_rss_ext_op(bp, rxcmp); 2126 } else { 2127 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 2128 2129 /* RSS profiles 1 and 3 with extract code 0 for inner 2130 * 4-tuple 2131 */ 2132 if (hash_type != 1 && hash_type != 3) 2133 type = PKT_HASH_TYPE_L3; 2134 else 2135 type = PKT_HASH_TYPE_L4; 2136 } 2137 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 2138 } 2139 2140 if (cmp_type == CMP_TYPE_RX_L2_CMP) 2141 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); 2142 skb->protocol = eth_type_trans(skb, dev); 2143 2144 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { 2145 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 2146 if (!skb) 2147 goto next_rx; 2148 } 2149 2150 skb_checksum_none_assert(skb); 2151 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2152 if (dev->features & NETIF_F_RXCSUM) { 2153 skb->ip_summed = CHECKSUM_UNNECESSARY; 2154 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2155 } 2156 } else { 2157 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2158 if (dev->features & NETIF_F_RXCSUM) 2159 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; 2160 } 2161 } 2162 2163 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { 2164 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2165 u64 ns, ts; 2166 2167 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2168 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2169 2170 spin_lock_bh(&ptp->ptp_lock); 2171 ns = timecounter_cyc2time(&ptp->tc, ts); 2172 spin_unlock_bh(&ptp->ptp_lock); 2173 memset(skb_hwtstamps(skb), 0, 2174 sizeof(*skb_hwtstamps(skb))); 2175 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2176 } 2177 } 2178 } 2179 bnxt_deliver_skb(bp, bnapi, skb); 2180 rc = 1; 2181 2182 next_rx: 2183 cpr->rx_packets += 1; 2184 cpr->rx_bytes += len; 2185 2186 next_rx_no_len: 2187 rxr->rx_prod = NEXT_RX(prod); 2188 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2189 2190 next_rx_no_prod_no_len: 2191 *raw_cons = tmp_raw_cons; 2192 2193 return rc; 2194 } 2195 2196 /* In netpoll mode, if we are using a combined completion ring, we need to 2197 * discard the rx packets and recycle the buffers. 2198 */ 2199 static int bnxt_force_rx_discard(struct bnxt *bp, 2200 struct bnxt_cp_ring_info *cpr, 2201 u32 *raw_cons, u8 *event) 2202 { 2203 u32 tmp_raw_cons = *raw_cons; 2204 struct rx_cmp_ext *rxcmp1; 2205 struct rx_cmp *rxcmp; 2206 u16 cp_cons; 2207 u8 cmp_type; 2208 int rc; 2209 2210 cp_cons = RING_CMP(tmp_raw_cons); 2211 rxcmp = (struct rx_cmp *) 2212 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2213 2214 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2215 cp_cons = RING_CMP(tmp_raw_cons); 2216 rxcmp1 = (struct rx_cmp_ext *) 2217 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2218 2219 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2220 return -EBUSY; 2221 2222 /* The valid test of the entry must be done first before 2223 * reading any further. 2224 */ 2225 dma_rmb(); 2226 cmp_type = RX_CMP_TYPE(rxcmp); 2227 if (cmp_type == CMP_TYPE_RX_L2_CMP || 2228 cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2229 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2230 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2231 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2232 struct rx_tpa_end_cmp_ext *tpa_end1; 2233 2234 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2235 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2236 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2237 } 2238 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2239 if (rc && rc != -EBUSY) 2240 cpr->sw_stats.rx.rx_netpoll_discards += 1; 2241 return rc; 2242 } 2243 2244 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2245 { 2246 struct bnxt_fw_health *fw_health = bp->fw_health; 2247 u32 reg = fw_health->regs[reg_idx]; 2248 u32 reg_type, reg_off, val = 0; 2249 2250 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2251 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2252 switch (reg_type) { 2253 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2254 pci_read_config_dword(bp->pdev, reg_off, &val); 2255 break; 2256 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2257 reg_off = fw_health->mapped_regs[reg_idx]; 2258 fallthrough; 2259 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2260 val = readl(bp->bar0 + reg_off); 2261 break; 2262 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2263 val = readl(bp->bar1 + reg_off); 2264 break; 2265 } 2266 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2267 val &= fw_health->fw_reset_inprog_reg_mask; 2268 return val; 2269 } 2270 2271 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2272 { 2273 int i; 2274 2275 for (i = 0; i < bp->rx_nr_rings; i++) { 2276 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2277 struct bnxt_ring_grp_info *grp_info; 2278 2279 grp_info = &bp->grp_info[grp_idx]; 2280 if (grp_info->agg_fw_ring_id == ring_id) 2281 return grp_idx; 2282 } 2283 return INVALID_HW_RING_ID; 2284 } 2285 2286 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2287 { 2288 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2289 2290 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 2291 return link_info->force_link_speed2; 2292 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2293 return link_info->force_pam4_link_speed; 2294 return link_info->force_link_speed; 2295 } 2296 2297 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2298 { 2299 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2300 2301 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2302 link_info->req_link_speed = link_info->force_link_speed2; 2303 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2304 switch (link_info->req_link_speed) { 2305 case BNXT_LINK_SPEED_50GB_PAM4: 2306 case BNXT_LINK_SPEED_100GB_PAM4: 2307 case BNXT_LINK_SPEED_200GB_PAM4: 2308 case BNXT_LINK_SPEED_400GB_PAM4: 2309 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2310 break; 2311 case BNXT_LINK_SPEED_100GB_PAM4_112: 2312 case BNXT_LINK_SPEED_200GB_PAM4_112: 2313 case BNXT_LINK_SPEED_400GB_PAM4_112: 2314 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; 2315 break; 2316 default: 2317 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2318 } 2319 return; 2320 } 2321 link_info->req_link_speed = link_info->force_link_speed; 2322 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2323 if (link_info->force_pam4_link_speed) { 2324 link_info->req_link_speed = link_info->force_pam4_link_speed; 2325 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2326 } 2327 } 2328 2329 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2330 { 2331 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2332 2333 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2334 link_info->advertising = link_info->auto_link_speeds2; 2335 return; 2336 } 2337 link_info->advertising = link_info->auto_link_speeds; 2338 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2339 } 2340 2341 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2342 { 2343 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2344 2345 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2346 if (link_info->req_link_speed != link_info->force_link_speed2) 2347 return true; 2348 return false; 2349 } 2350 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2351 link_info->req_link_speed != link_info->force_link_speed) 2352 return true; 2353 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2354 link_info->req_link_speed != link_info->force_pam4_link_speed) 2355 return true; 2356 return false; 2357 } 2358 2359 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2360 { 2361 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2362 2363 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2364 if (link_info->advertising != link_info->auto_link_speeds2) 2365 return true; 2366 return false; 2367 } 2368 if (link_info->advertising != link_info->auto_link_speeds || 2369 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2370 return true; 2371 return false; 2372 } 2373 2374 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2375 ((data2) & \ 2376 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2377 2378 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2379 (((data2) & \ 2380 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2381 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2382 2383 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2384 ((data1) & \ 2385 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2386 2387 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2388 (((data1) & \ 2389 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2390 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2391 2392 /* Return true if the workqueue has to be scheduled */ 2393 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2394 { 2395 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2396 2397 switch (err_type) { 2398 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2399 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2400 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2401 break; 2402 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2403 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2404 break; 2405 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2406 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2407 break; 2408 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2409 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2410 char *threshold_type; 2411 bool notify = false; 2412 char *dir_str; 2413 2414 switch (type) { 2415 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2416 threshold_type = "warning"; 2417 break; 2418 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2419 threshold_type = "critical"; 2420 break; 2421 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2422 threshold_type = "fatal"; 2423 break; 2424 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2425 threshold_type = "shutdown"; 2426 break; 2427 default: 2428 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2429 return false; 2430 } 2431 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2432 dir_str = "above"; 2433 notify = true; 2434 } else { 2435 dir_str = "below"; 2436 } 2437 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2438 dir_str, threshold_type); 2439 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2440 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2441 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2442 if (notify) { 2443 bp->thermal_threshold_type = type; 2444 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2445 return true; 2446 } 2447 return false; 2448 } 2449 default: 2450 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2451 err_type); 2452 break; 2453 } 2454 return false; 2455 } 2456 2457 #define BNXT_GET_EVENT_PORT(data) \ 2458 ((data) & \ 2459 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2460 2461 #define BNXT_EVENT_RING_TYPE(data2) \ 2462 ((data2) & \ 2463 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2464 2465 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2466 (BNXT_EVENT_RING_TYPE(data2) == \ 2467 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2468 2469 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2470 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2471 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2472 2473 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2474 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2475 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2476 2477 #define BNXT_PHC_BITS 48 2478 2479 static int bnxt_async_event_process(struct bnxt *bp, 2480 struct hwrm_async_event_cmpl *cmpl) 2481 { 2482 u16 event_id = le16_to_cpu(cmpl->event_id); 2483 u32 data1 = le32_to_cpu(cmpl->event_data1); 2484 u32 data2 = le32_to_cpu(cmpl->event_data2); 2485 2486 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2487 event_id, data1, data2); 2488 2489 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2490 switch (event_id) { 2491 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2492 struct bnxt_link_info *link_info = &bp->link_info; 2493 2494 if (BNXT_VF(bp)) 2495 goto async_event_process_exit; 2496 2497 /* print unsupported speed warning in forced speed mode only */ 2498 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2499 (data1 & 0x20000)) { 2500 u16 fw_speed = bnxt_get_force_speed(link_info); 2501 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2502 2503 if (speed != SPEED_UNKNOWN) 2504 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2505 speed); 2506 } 2507 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2508 } 2509 fallthrough; 2510 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2511 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2512 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2513 fallthrough; 2514 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2515 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2516 break; 2517 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2518 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2519 break; 2520 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2521 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2522 2523 if (BNXT_VF(bp)) 2524 break; 2525 2526 if (bp->pf.port_id != port_id) 2527 break; 2528 2529 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2530 break; 2531 } 2532 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2533 if (BNXT_PF(bp)) 2534 goto async_event_process_exit; 2535 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2536 break; 2537 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2538 char *type_str = "Solicited"; 2539 2540 if (!bp->fw_health) 2541 goto async_event_process_exit; 2542 2543 bp->fw_reset_timestamp = jiffies; 2544 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2545 if (!bp->fw_reset_min_dsecs) 2546 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2547 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2548 if (!bp->fw_reset_max_dsecs) 2549 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2550 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2551 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2552 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2553 type_str = "Fatal"; 2554 bp->fw_health->fatalities++; 2555 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2556 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2557 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2558 type_str = "Non-fatal"; 2559 bp->fw_health->survivals++; 2560 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2561 } 2562 netif_warn(bp, hw, bp->dev, 2563 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2564 type_str, data1, data2, 2565 bp->fw_reset_min_dsecs * 100, 2566 bp->fw_reset_max_dsecs * 100); 2567 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2568 break; 2569 } 2570 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2571 struct bnxt_fw_health *fw_health = bp->fw_health; 2572 char *status_desc = "healthy"; 2573 u32 status; 2574 2575 if (!fw_health) 2576 goto async_event_process_exit; 2577 2578 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2579 fw_health->enabled = false; 2580 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2581 break; 2582 } 2583 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2584 fw_health->tmr_multiplier = 2585 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2586 bp->current_interval * 10); 2587 fw_health->tmr_counter = fw_health->tmr_multiplier; 2588 if (!fw_health->enabled) 2589 fw_health->last_fw_heartbeat = 2590 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2591 fw_health->last_fw_reset_cnt = 2592 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2593 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2594 if (status != BNXT_FW_STATUS_HEALTHY) 2595 status_desc = "unhealthy"; 2596 netif_info(bp, drv, bp->dev, 2597 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2598 fw_health->primary ? "primary" : "backup", status, 2599 status_desc, fw_health->last_fw_reset_cnt); 2600 if (!fw_health->enabled) { 2601 /* Make sure tmr_counter is set and visible to 2602 * bnxt_health_check() before setting enabled to true. 2603 */ 2604 smp_wmb(); 2605 fw_health->enabled = true; 2606 } 2607 goto async_event_process_exit; 2608 } 2609 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2610 netif_notice(bp, hw, bp->dev, 2611 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2612 data1, data2); 2613 goto async_event_process_exit; 2614 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2615 struct bnxt_rx_ring_info *rxr; 2616 u16 grp_idx; 2617 2618 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2619 goto async_event_process_exit; 2620 2621 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2622 BNXT_EVENT_RING_TYPE(data2), data1); 2623 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2624 goto async_event_process_exit; 2625 2626 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2627 if (grp_idx == INVALID_HW_RING_ID) { 2628 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2629 data1); 2630 goto async_event_process_exit; 2631 } 2632 rxr = bp->bnapi[grp_idx]->rx_ring; 2633 bnxt_sched_reset_rxr(bp, rxr); 2634 goto async_event_process_exit; 2635 } 2636 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2637 struct bnxt_fw_health *fw_health = bp->fw_health; 2638 2639 netif_notice(bp, hw, bp->dev, 2640 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2641 data1, data2); 2642 if (fw_health) { 2643 fw_health->echo_req_data1 = data1; 2644 fw_health->echo_req_data2 = data2; 2645 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2646 break; 2647 } 2648 goto async_event_process_exit; 2649 } 2650 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2651 bnxt_ptp_pps_event(bp, data1, data2); 2652 goto async_event_process_exit; 2653 } 2654 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2655 if (bnxt_event_error_report(bp, data1, data2)) 2656 break; 2657 goto async_event_process_exit; 2658 } 2659 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2660 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2661 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2662 if (BNXT_PTP_USE_RTC(bp)) { 2663 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2664 u64 ns; 2665 2666 if (!ptp) 2667 goto async_event_process_exit; 2668 2669 spin_lock_bh(&ptp->ptp_lock); 2670 bnxt_ptp_update_current_time(bp); 2671 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2672 BNXT_PHC_BITS) | ptp->current_time); 2673 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2674 spin_unlock_bh(&ptp->ptp_lock); 2675 } 2676 break; 2677 } 2678 goto async_event_process_exit; 2679 } 2680 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2681 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2682 2683 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2684 goto async_event_process_exit; 2685 } 2686 default: 2687 goto async_event_process_exit; 2688 } 2689 __bnxt_queue_sp_work(bp); 2690 async_event_process_exit: 2691 return 0; 2692 } 2693 2694 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2695 { 2696 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2697 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2698 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2699 (struct hwrm_fwd_req_cmpl *)txcmp; 2700 2701 switch (cmpl_type) { 2702 case CMPL_BASE_TYPE_HWRM_DONE: 2703 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2704 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2705 break; 2706 2707 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2708 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2709 2710 if ((vf_id < bp->pf.first_vf_id) || 2711 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2712 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2713 vf_id); 2714 return -EINVAL; 2715 } 2716 2717 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2718 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2719 break; 2720 2721 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2722 bnxt_async_event_process(bp, 2723 (struct hwrm_async_event_cmpl *)txcmp); 2724 break; 2725 2726 default: 2727 break; 2728 } 2729 2730 return 0; 2731 } 2732 2733 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2734 { 2735 struct bnxt_napi *bnapi = dev_instance; 2736 struct bnxt *bp = bnapi->bp; 2737 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2738 u32 cons = RING_CMP(cpr->cp_raw_cons); 2739 2740 cpr->event_ctr++; 2741 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2742 napi_schedule(&bnapi->napi); 2743 return IRQ_HANDLED; 2744 } 2745 2746 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2747 { 2748 u32 raw_cons = cpr->cp_raw_cons; 2749 u16 cons = RING_CMP(raw_cons); 2750 struct tx_cmp *txcmp; 2751 2752 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2753 2754 return TX_CMP_VALID(txcmp, raw_cons); 2755 } 2756 2757 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2758 { 2759 struct bnxt_napi *bnapi = dev_instance; 2760 struct bnxt *bp = bnapi->bp; 2761 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2762 u32 cons = RING_CMP(cpr->cp_raw_cons); 2763 u32 int_status; 2764 2765 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2766 2767 if (!bnxt_has_work(bp, cpr)) { 2768 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2769 /* return if erroneous interrupt */ 2770 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2771 return IRQ_NONE; 2772 } 2773 2774 /* disable ring IRQ */ 2775 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2776 2777 /* Return here if interrupt is shared and is disabled. */ 2778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2779 return IRQ_HANDLED; 2780 2781 napi_schedule(&bnapi->napi); 2782 return IRQ_HANDLED; 2783 } 2784 2785 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2786 int budget) 2787 { 2788 struct bnxt_napi *bnapi = cpr->bnapi; 2789 u32 raw_cons = cpr->cp_raw_cons; 2790 u32 cons; 2791 int rx_pkts = 0; 2792 u8 event = 0; 2793 struct tx_cmp *txcmp; 2794 2795 cpr->has_more_work = 0; 2796 cpr->had_work_done = 1; 2797 while (1) { 2798 u8 cmp_type; 2799 int rc; 2800 2801 cons = RING_CMP(raw_cons); 2802 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2803 2804 if (!TX_CMP_VALID(txcmp, raw_cons)) 2805 break; 2806 2807 /* The valid test of the entry must be done first before 2808 * reading any further. 2809 */ 2810 dma_rmb(); 2811 cmp_type = TX_CMP_TYPE(txcmp); 2812 if (cmp_type == CMP_TYPE_TX_L2_CMP || 2813 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { 2814 u32 opaque = txcmp->tx_cmp_opaque; 2815 struct bnxt_tx_ring_info *txr; 2816 u16 tx_freed; 2817 2818 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 2819 event |= BNXT_TX_CMP_EVENT; 2820 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) 2821 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); 2822 else 2823 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 2824 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 2825 bp->tx_ring_mask; 2826 /* return full budget so NAPI will complete. */ 2827 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 2828 rx_pkts = budget; 2829 raw_cons = NEXT_RAW_CMP(raw_cons); 2830 if (budget) 2831 cpr->has_more_work = 1; 2832 break; 2833 } 2834 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 2835 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2836 if (likely(budget)) 2837 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2838 else 2839 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2840 &event); 2841 if (likely(rc >= 0)) 2842 rx_pkts += rc; 2843 /* Increment rx_pkts when rc is -ENOMEM to count towards 2844 * the NAPI budget. Otherwise, we may potentially loop 2845 * here forever if we consistently cannot allocate 2846 * buffers. 2847 */ 2848 else if (rc == -ENOMEM && budget) 2849 rx_pkts++; 2850 else if (rc == -EBUSY) /* partial completion */ 2851 break; 2852 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 2853 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 2854 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { 2855 bnxt_hwrm_handler(bp, txcmp); 2856 } 2857 raw_cons = NEXT_RAW_CMP(raw_cons); 2858 2859 if (rx_pkts && rx_pkts == budget) { 2860 cpr->has_more_work = 1; 2861 break; 2862 } 2863 } 2864 2865 if (event & BNXT_REDIRECT_EVENT) 2866 xdp_do_flush(); 2867 2868 if (event & BNXT_TX_EVENT) { 2869 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 2870 u16 prod = txr->tx_prod; 2871 2872 /* Sync BD data before updating doorbell */ 2873 wmb(); 2874 2875 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2876 } 2877 2878 cpr->cp_raw_cons = raw_cons; 2879 bnapi->events |= event; 2880 return rx_pkts; 2881 } 2882 2883 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2884 int budget) 2885 { 2886 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 2887 bnapi->tx_int(bp, bnapi, budget); 2888 2889 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 2890 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2891 2892 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2893 } 2894 if (bnapi->events & BNXT_AGG_EVENT) { 2895 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2896 2897 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2898 } 2899 bnapi->events &= BNXT_TX_CMP_EVENT; 2900 } 2901 2902 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2903 int budget) 2904 { 2905 struct bnxt_napi *bnapi = cpr->bnapi; 2906 int rx_pkts; 2907 2908 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2909 2910 /* ACK completion ring before freeing tx ring and producing new 2911 * buffers in rx/agg rings to prevent overflowing the completion 2912 * ring. 2913 */ 2914 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2915 2916 __bnxt_poll_work_done(bp, bnapi, budget); 2917 return rx_pkts; 2918 } 2919 2920 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2921 { 2922 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2923 struct bnxt *bp = bnapi->bp; 2924 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2925 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2926 struct tx_cmp *txcmp; 2927 struct rx_cmp_ext *rxcmp1; 2928 u32 cp_cons, tmp_raw_cons; 2929 u32 raw_cons = cpr->cp_raw_cons; 2930 bool flush_xdp = false; 2931 u32 rx_pkts = 0; 2932 u8 event = 0; 2933 2934 while (1) { 2935 int rc; 2936 2937 cp_cons = RING_CMP(raw_cons); 2938 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2939 2940 if (!TX_CMP_VALID(txcmp, raw_cons)) 2941 break; 2942 2943 /* The valid test of the entry must be done first before 2944 * reading any further. 2945 */ 2946 dma_rmb(); 2947 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2948 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2949 cp_cons = RING_CMP(tmp_raw_cons); 2950 rxcmp1 = (struct rx_cmp_ext *) 2951 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2952 2953 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2954 break; 2955 2956 /* force an error to recycle the buffer */ 2957 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2958 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2959 2960 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2961 if (likely(rc == -EIO) && budget) 2962 rx_pkts++; 2963 else if (rc == -EBUSY) /* partial completion */ 2964 break; 2965 if (event & BNXT_REDIRECT_EVENT) 2966 flush_xdp = true; 2967 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2968 CMPL_BASE_TYPE_HWRM_DONE)) { 2969 bnxt_hwrm_handler(bp, txcmp); 2970 } else { 2971 netdev_err(bp->dev, 2972 "Invalid completion received on special ring\n"); 2973 } 2974 raw_cons = NEXT_RAW_CMP(raw_cons); 2975 2976 if (rx_pkts == budget) 2977 break; 2978 } 2979 2980 cpr->cp_raw_cons = raw_cons; 2981 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2982 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2983 2984 if (event & BNXT_AGG_EVENT) 2985 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2986 if (flush_xdp) 2987 xdp_do_flush(); 2988 2989 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2990 napi_complete_done(napi, rx_pkts); 2991 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2992 } 2993 return rx_pkts; 2994 } 2995 2996 static int bnxt_poll(struct napi_struct *napi, int budget) 2997 { 2998 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2999 struct bnxt *bp = bnapi->bp; 3000 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3001 int work_done = 0; 3002 3003 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3004 napi_complete(napi); 3005 return 0; 3006 } 3007 while (1) { 3008 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 3009 3010 if (work_done >= budget) { 3011 if (!budget) 3012 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3013 break; 3014 } 3015 3016 if (!bnxt_has_work(bp, cpr)) { 3017 if (napi_complete_done(napi, work_done)) 3018 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3019 break; 3020 } 3021 } 3022 if (bp->flags & BNXT_FLAG_DIM) { 3023 struct dim_sample dim_sample = {}; 3024 3025 dim_update_sample(cpr->event_ctr, 3026 cpr->rx_packets, 3027 cpr->rx_bytes, 3028 &dim_sample); 3029 net_dim(&cpr->dim, dim_sample); 3030 } 3031 return work_done; 3032 } 3033 3034 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 3035 { 3036 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3037 int i, work_done = 0; 3038 3039 for (i = 0; i < cpr->cp_ring_count; i++) { 3040 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3041 3042 if (cpr2->had_nqe_notify) { 3043 work_done += __bnxt_poll_work(bp, cpr2, 3044 budget - work_done); 3045 cpr->has_more_work |= cpr2->has_more_work; 3046 } 3047 } 3048 return work_done; 3049 } 3050 3051 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3052 u64 dbr_type, int budget) 3053 { 3054 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3055 int i; 3056 3057 for (i = 0; i < cpr->cp_ring_count; i++) { 3058 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3059 struct bnxt_db_info *db; 3060 3061 if (cpr2->had_work_done) { 3062 u32 tgl = 0; 3063 3064 if (dbr_type == DBR_TYPE_CQ_ARMALL) { 3065 cpr2->had_nqe_notify = 0; 3066 tgl = cpr2->toggle; 3067 } 3068 db = &cpr2->cp_db; 3069 bnxt_writeq(bp, 3070 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 3071 DB_RING_IDX(db, cpr2->cp_raw_cons), 3072 db->doorbell); 3073 cpr2->had_work_done = 0; 3074 } 3075 } 3076 __bnxt_poll_work_done(bp, bnapi, budget); 3077 } 3078 3079 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 3080 { 3081 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3082 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3083 struct bnxt_cp_ring_info *cpr_rx; 3084 u32 raw_cons = cpr->cp_raw_cons; 3085 struct bnxt *bp = bnapi->bp; 3086 struct nqe_cn *nqcmp; 3087 int work_done = 0; 3088 u32 cons; 3089 3090 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3091 napi_complete(napi); 3092 return 0; 3093 } 3094 if (cpr->has_more_work) { 3095 cpr->has_more_work = 0; 3096 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 3097 } 3098 while (1) { 3099 u16 type; 3100 3101 cons = RING_CMP(raw_cons); 3102 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3103 3104 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 3105 if (cpr->has_more_work) 3106 break; 3107 3108 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 3109 budget); 3110 cpr->cp_raw_cons = raw_cons; 3111 if (napi_complete_done(napi, work_done)) 3112 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 3113 cpr->cp_raw_cons); 3114 goto poll_done; 3115 } 3116 3117 /* The valid test of the entry must be done first before 3118 * reading any further. 3119 */ 3120 dma_rmb(); 3121 3122 type = le16_to_cpu(nqcmp->type); 3123 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 3124 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 3125 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 3126 struct bnxt_cp_ring_info *cpr2; 3127 3128 /* No more budget for RX work */ 3129 if (budget && work_done >= budget && 3130 cq_type == BNXT_NQ_HDL_TYPE_RX) 3131 break; 3132 3133 idx = BNXT_NQ_HDL_IDX(idx); 3134 cpr2 = &cpr->cp_ring_arr[idx]; 3135 cpr2->had_nqe_notify = 1; 3136 cpr2->toggle = NQE_CN_TOGGLE(type); 3137 work_done += __bnxt_poll_work(bp, cpr2, 3138 budget - work_done); 3139 cpr->has_more_work |= cpr2->has_more_work; 3140 } else { 3141 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 3142 } 3143 raw_cons = NEXT_RAW_CMP(raw_cons); 3144 } 3145 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 3146 if (raw_cons != cpr->cp_raw_cons) { 3147 cpr->cp_raw_cons = raw_cons; 3148 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 3149 } 3150 poll_done: 3151 cpr_rx = &cpr->cp_ring_arr[0]; 3152 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 3153 (bp->flags & BNXT_FLAG_DIM)) { 3154 struct dim_sample dim_sample = {}; 3155 3156 dim_update_sample(cpr->event_ctr, 3157 cpr_rx->rx_packets, 3158 cpr_rx->rx_bytes, 3159 &dim_sample); 3160 net_dim(&cpr->dim, dim_sample); 3161 } 3162 return work_done; 3163 } 3164 3165 static void bnxt_free_tx_skbs(struct bnxt *bp) 3166 { 3167 int i, max_idx; 3168 struct pci_dev *pdev = bp->pdev; 3169 3170 if (!bp->tx_ring) 3171 return; 3172 3173 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 3174 for (i = 0; i < bp->tx_nr_rings; i++) { 3175 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3176 int j; 3177 3178 if (!txr->tx_buf_ring) 3179 continue; 3180 3181 for (j = 0; j < max_idx;) { 3182 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 3183 struct sk_buff *skb; 3184 int k, last; 3185 3186 if (i < bp->tx_nr_rings_xdp && 3187 tx_buf->action == XDP_REDIRECT) { 3188 dma_unmap_single(&pdev->dev, 3189 dma_unmap_addr(tx_buf, mapping), 3190 dma_unmap_len(tx_buf, len), 3191 DMA_TO_DEVICE); 3192 xdp_return_frame(tx_buf->xdpf); 3193 tx_buf->action = 0; 3194 tx_buf->xdpf = NULL; 3195 j++; 3196 continue; 3197 } 3198 3199 skb = tx_buf->skb; 3200 if (!skb) { 3201 j++; 3202 continue; 3203 } 3204 3205 tx_buf->skb = NULL; 3206 3207 if (tx_buf->is_push) { 3208 dev_kfree_skb(skb); 3209 j += 2; 3210 continue; 3211 } 3212 3213 dma_unmap_single(&pdev->dev, 3214 dma_unmap_addr(tx_buf, mapping), 3215 skb_headlen(skb), 3216 DMA_TO_DEVICE); 3217 3218 last = tx_buf->nr_frags; 3219 j += 2; 3220 for (k = 0; k < last; k++, j++) { 3221 int ring_idx = j & bp->tx_ring_mask; 3222 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 3223 3224 tx_buf = &txr->tx_buf_ring[ring_idx]; 3225 dma_unmap_page( 3226 &pdev->dev, 3227 dma_unmap_addr(tx_buf, mapping), 3228 skb_frag_size(frag), DMA_TO_DEVICE); 3229 } 3230 dev_kfree_skb(skb); 3231 } 3232 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 3233 } 3234 } 3235 3236 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) 3237 { 3238 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3239 struct pci_dev *pdev = bp->pdev; 3240 struct bnxt_tpa_idx_map *map; 3241 int i, max_idx, max_agg_idx; 3242 3243 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3244 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3245 if (!rxr->rx_tpa) 3246 goto skip_rx_tpa_free; 3247 3248 for (i = 0; i < bp->max_tpa; i++) { 3249 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3250 u8 *data = tpa_info->data; 3251 3252 if (!data) 3253 continue; 3254 3255 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, 3256 bp->rx_buf_use_size, bp->rx_dir, 3257 DMA_ATTR_WEAK_ORDERING); 3258 3259 tpa_info->data = NULL; 3260 3261 skb_free_frag(data); 3262 } 3263 3264 skip_rx_tpa_free: 3265 if (!rxr->rx_buf_ring) 3266 goto skip_rx_buf_free; 3267 3268 for (i = 0; i < max_idx; i++) { 3269 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3270 dma_addr_t mapping = rx_buf->mapping; 3271 void *data = rx_buf->data; 3272 3273 if (!data) 3274 continue; 3275 3276 rx_buf->data = NULL; 3277 if (BNXT_RX_PAGE_MODE(bp)) { 3278 page_pool_recycle_direct(rxr->page_pool, data); 3279 } else { 3280 dma_unmap_single_attrs(&pdev->dev, mapping, 3281 bp->rx_buf_use_size, bp->rx_dir, 3282 DMA_ATTR_WEAK_ORDERING); 3283 skb_free_frag(data); 3284 } 3285 } 3286 3287 skip_rx_buf_free: 3288 if (!rxr->rx_agg_ring) 3289 goto skip_rx_agg_free; 3290 3291 for (i = 0; i < max_agg_idx; i++) { 3292 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3293 struct page *page = rx_agg_buf->page; 3294 3295 if (!page) 3296 continue; 3297 3298 rx_agg_buf->page = NULL; 3299 __clear_bit(i, rxr->rx_agg_bmap); 3300 3301 page_pool_recycle_direct(rxr->page_pool, page); 3302 } 3303 3304 skip_rx_agg_free: 3305 map = rxr->rx_tpa_idx_map; 3306 if (map) 3307 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3308 } 3309 3310 static void bnxt_free_rx_skbs(struct bnxt *bp) 3311 { 3312 int i; 3313 3314 if (!bp->rx_ring) 3315 return; 3316 3317 for (i = 0; i < bp->rx_nr_rings; i++) 3318 bnxt_free_one_rx_ring_skbs(bp, i); 3319 } 3320 3321 static void bnxt_free_skbs(struct bnxt *bp) 3322 { 3323 bnxt_free_tx_skbs(bp); 3324 bnxt_free_rx_skbs(bp); 3325 } 3326 3327 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3328 { 3329 u8 init_val = ctxm->init_value; 3330 u16 offset = ctxm->init_offset; 3331 u8 *p2 = p; 3332 int i; 3333 3334 if (!init_val) 3335 return; 3336 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3337 memset(p, init_val, len); 3338 return; 3339 } 3340 for (i = 0; i < len; i += ctxm->entry_size) 3341 *(p2 + i + offset) = init_val; 3342 } 3343 3344 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3345 { 3346 struct pci_dev *pdev = bp->pdev; 3347 int i; 3348 3349 if (!rmem->pg_arr) 3350 goto skip_pages; 3351 3352 for (i = 0; i < rmem->nr_pages; i++) { 3353 if (!rmem->pg_arr[i]) 3354 continue; 3355 3356 dma_free_coherent(&pdev->dev, rmem->page_size, 3357 rmem->pg_arr[i], rmem->dma_arr[i]); 3358 3359 rmem->pg_arr[i] = NULL; 3360 } 3361 skip_pages: 3362 if (rmem->pg_tbl) { 3363 size_t pg_tbl_size = rmem->nr_pages * 8; 3364 3365 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3366 pg_tbl_size = rmem->page_size; 3367 dma_free_coherent(&pdev->dev, pg_tbl_size, 3368 rmem->pg_tbl, rmem->pg_tbl_map); 3369 rmem->pg_tbl = NULL; 3370 } 3371 if (rmem->vmem_size && *rmem->vmem) { 3372 vfree(*rmem->vmem); 3373 *rmem->vmem = NULL; 3374 } 3375 } 3376 3377 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3378 { 3379 struct pci_dev *pdev = bp->pdev; 3380 u64 valid_bit = 0; 3381 int i; 3382 3383 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3384 valid_bit = PTU_PTE_VALID; 3385 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3386 size_t pg_tbl_size = rmem->nr_pages * 8; 3387 3388 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3389 pg_tbl_size = rmem->page_size; 3390 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3391 &rmem->pg_tbl_map, 3392 GFP_KERNEL); 3393 if (!rmem->pg_tbl) 3394 return -ENOMEM; 3395 } 3396 3397 for (i = 0; i < rmem->nr_pages; i++) { 3398 u64 extra_bits = valid_bit; 3399 3400 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3401 rmem->page_size, 3402 &rmem->dma_arr[i], 3403 GFP_KERNEL); 3404 if (!rmem->pg_arr[i]) 3405 return -ENOMEM; 3406 3407 if (rmem->ctx_mem) 3408 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3409 rmem->page_size); 3410 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3411 if (i == rmem->nr_pages - 2 && 3412 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3413 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3414 else if (i == rmem->nr_pages - 1 && 3415 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3416 extra_bits |= PTU_PTE_LAST; 3417 rmem->pg_tbl[i] = 3418 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3419 } 3420 } 3421 3422 if (rmem->vmem_size) { 3423 *rmem->vmem = vzalloc(rmem->vmem_size); 3424 if (!(*rmem->vmem)) 3425 return -ENOMEM; 3426 } 3427 return 0; 3428 } 3429 3430 static void bnxt_free_tpa_info(struct bnxt *bp) 3431 { 3432 int i, j; 3433 3434 for (i = 0; i < bp->rx_nr_rings; i++) { 3435 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3436 3437 kfree(rxr->rx_tpa_idx_map); 3438 rxr->rx_tpa_idx_map = NULL; 3439 if (rxr->rx_tpa) { 3440 for (j = 0; j < bp->max_tpa; j++) { 3441 kfree(rxr->rx_tpa[j].agg_arr); 3442 rxr->rx_tpa[j].agg_arr = NULL; 3443 } 3444 } 3445 kfree(rxr->rx_tpa); 3446 rxr->rx_tpa = NULL; 3447 } 3448 } 3449 3450 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3451 { 3452 int i, j; 3453 3454 bp->max_tpa = MAX_TPA; 3455 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3456 if (!bp->max_tpa_v2) 3457 return 0; 3458 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3459 } 3460 3461 for (i = 0; i < bp->rx_nr_rings; i++) { 3462 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3463 struct rx_agg_cmp *agg; 3464 3465 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3466 GFP_KERNEL); 3467 if (!rxr->rx_tpa) 3468 return -ENOMEM; 3469 3470 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3471 continue; 3472 for (j = 0; j < bp->max_tpa; j++) { 3473 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3474 if (!agg) 3475 return -ENOMEM; 3476 rxr->rx_tpa[j].agg_arr = agg; 3477 } 3478 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3479 GFP_KERNEL); 3480 if (!rxr->rx_tpa_idx_map) 3481 return -ENOMEM; 3482 } 3483 return 0; 3484 } 3485 3486 static void bnxt_free_rx_rings(struct bnxt *bp) 3487 { 3488 int i; 3489 3490 if (!bp->rx_ring) 3491 return; 3492 3493 bnxt_free_tpa_info(bp); 3494 for (i = 0; i < bp->rx_nr_rings; i++) { 3495 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3496 struct bnxt_ring_struct *ring; 3497 3498 if (rxr->xdp_prog) 3499 bpf_prog_put(rxr->xdp_prog); 3500 3501 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3502 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3503 3504 page_pool_destroy(rxr->page_pool); 3505 rxr->page_pool = NULL; 3506 3507 kfree(rxr->rx_agg_bmap); 3508 rxr->rx_agg_bmap = NULL; 3509 3510 ring = &rxr->rx_ring_struct; 3511 bnxt_free_ring(bp, &ring->ring_mem); 3512 3513 ring = &rxr->rx_agg_ring_struct; 3514 bnxt_free_ring(bp, &ring->ring_mem); 3515 } 3516 } 3517 3518 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3519 struct bnxt_rx_ring_info *rxr) 3520 { 3521 struct page_pool_params pp = { 0 }; 3522 3523 pp.pool_size = bp->rx_agg_ring_size; 3524 if (BNXT_RX_PAGE_MODE(bp)) 3525 pp.pool_size += bp->rx_ring_size; 3526 pp.nid = dev_to_node(&bp->pdev->dev); 3527 pp.napi = &rxr->bnapi->napi; 3528 pp.netdev = bp->dev; 3529 pp.dev = &bp->pdev->dev; 3530 pp.dma_dir = bp->rx_dir; 3531 pp.max_len = PAGE_SIZE; 3532 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3533 3534 rxr->page_pool = page_pool_create(&pp); 3535 if (IS_ERR(rxr->page_pool)) { 3536 int err = PTR_ERR(rxr->page_pool); 3537 3538 rxr->page_pool = NULL; 3539 return err; 3540 } 3541 return 0; 3542 } 3543 3544 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3545 { 3546 int i, rc = 0, agg_rings = 0; 3547 3548 if (!bp->rx_ring) 3549 return -ENOMEM; 3550 3551 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3552 agg_rings = 1; 3553 3554 for (i = 0; i < bp->rx_nr_rings; i++) { 3555 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3556 struct bnxt_ring_struct *ring; 3557 3558 ring = &rxr->rx_ring_struct; 3559 3560 rc = bnxt_alloc_rx_page_pool(bp, rxr); 3561 if (rc) 3562 return rc; 3563 3564 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3565 if (rc < 0) 3566 return rc; 3567 3568 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3569 MEM_TYPE_PAGE_POOL, 3570 rxr->page_pool); 3571 if (rc) { 3572 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3573 return rc; 3574 } 3575 3576 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3577 if (rc) 3578 return rc; 3579 3580 ring->grp_idx = i; 3581 if (agg_rings) { 3582 u16 mem_size; 3583 3584 ring = &rxr->rx_agg_ring_struct; 3585 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3586 if (rc) 3587 return rc; 3588 3589 ring->grp_idx = i; 3590 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3591 mem_size = rxr->rx_agg_bmap_size / 8; 3592 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3593 if (!rxr->rx_agg_bmap) 3594 return -ENOMEM; 3595 } 3596 } 3597 if (bp->flags & BNXT_FLAG_TPA) 3598 rc = bnxt_alloc_tpa_info(bp); 3599 return rc; 3600 } 3601 3602 static void bnxt_free_tx_rings(struct bnxt *bp) 3603 { 3604 int i; 3605 struct pci_dev *pdev = bp->pdev; 3606 3607 if (!bp->tx_ring) 3608 return; 3609 3610 for (i = 0; i < bp->tx_nr_rings; i++) { 3611 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3612 struct bnxt_ring_struct *ring; 3613 3614 if (txr->tx_push) { 3615 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3616 txr->tx_push, txr->tx_push_mapping); 3617 txr->tx_push = NULL; 3618 } 3619 3620 ring = &txr->tx_ring_struct; 3621 3622 bnxt_free_ring(bp, &ring->ring_mem); 3623 } 3624 } 3625 3626 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3627 ((tc) * (bp)->tx_nr_rings_per_tc) 3628 3629 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3630 ((tx) % (bp)->tx_nr_rings_per_tc) 3631 3632 #define BNXT_RING_TO_TC(bp, tx) \ 3633 ((tx) / (bp)->tx_nr_rings_per_tc) 3634 3635 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3636 { 3637 int i, j, rc; 3638 struct pci_dev *pdev = bp->pdev; 3639 3640 bp->tx_push_size = 0; 3641 if (bp->tx_push_thresh) { 3642 int push_size; 3643 3644 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3645 bp->tx_push_thresh); 3646 3647 if (push_size > 256) { 3648 push_size = 0; 3649 bp->tx_push_thresh = 0; 3650 } 3651 3652 bp->tx_push_size = push_size; 3653 } 3654 3655 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3656 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3657 struct bnxt_ring_struct *ring; 3658 u8 qidx; 3659 3660 ring = &txr->tx_ring_struct; 3661 3662 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3663 if (rc) 3664 return rc; 3665 3666 ring->grp_idx = txr->bnapi->index; 3667 if (bp->tx_push_size) { 3668 dma_addr_t mapping; 3669 3670 /* One pre-allocated DMA buffer to backup 3671 * TX push operation 3672 */ 3673 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3674 bp->tx_push_size, 3675 &txr->tx_push_mapping, 3676 GFP_KERNEL); 3677 3678 if (!txr->tx_push) 3679 return -ENOMEM; 3680 3681 mapping = txr->tx_push_mapping + 3682 sizeof(struct tx_push_bd); 3683 txr->data_mapping = cpu_to_le64(mapping); 3684 } 3685 qidx = bp->tc_to_qidx[j]; 3686 ring->queue_id = bp->q_info[qidx].queue_id; 3687 spin_lock_init(&txr->xdp_tx_lock); 3688 if (i < bp->tx_nr_rings_xdp) 3689 continue; 3690 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 3691 j++; 3692 } 3693 return 0; 3694 } 3695 3696 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3697 { 3698 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3699 3700 kfree(cpr->cp_desc_ring); 3701 cpr->cp_desc_ring = NULL; 3702 ring->ring_mem.pg_arr = NULL; 3703 kfree(cpr->cp_desc_mapping); 3704 cpr->cp_desc_mapping = NULL; 3705 ring->ring_mem.dma_arr = NULL; 3706 } 3707 3708 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 3709 { 3710 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 3711 if (!cpr->cp_desc_ring) 3712 return -ENOMEM; 3713 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 3714 GFP_KERNEL); 3715 if (!cpr->cp_desc_mapping) 3716 return -ENOMEM; 3717 return 0; 3718 } 3719 3720 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 3721 { 3722 int i; 3723 3724 if (!bp->bnapi) 3725 return; 3726 for (i = 0; i < bp->cp_nr_rings; i++) { 3727 struct bnxt_napi *bnapi = bp->bnapi[i]; 3728 3729 if (!bnapi) 3730 continue; 3731 bnxt_free_cp_arrays(&bnapi->cp_ring); 3732 } 3733 } 3734 3735 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 3736 { 3737 int i, n = bp->cp_nr_pages; 3738 3739 for (i = 0; i < bp->cp_nr_rings; i++) { 3740 struct bnxt_napi *bnapi = bp->bnapi[i]; 3741 int rc; 3742 3743 if (!bnapi) 3744 continue; 3745 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 3746 if (rc) 3747 return rc; 3748 } 3749 return 0; 3750 } 3751 3752 static void bnxt_free_cp_rings(struct bnxt *bp) 3753 { 3754 int i; 3755 3756 if (!bp->bnapi) 3757 return; 3758 3759 for (i = 0; i < bp->cp_nr_rings; i++) { 3760 struct bnxt_napi *bnapi = bp->bnapi[i]; 3761 struct bnxt_cp_ring_info *cpr; 3762 struct bnxt_ring_struct *ring; 3763 int j; 3764 3765 if (!bnapi) 3766 continue; 3767 3768 cpr = &bnapi->cp_ring; 3769 ring = &cpr->cp_ring_struct; 3770 3771 bnxt_free_ring(bp, &ring->ring_mem); 3772 3773 if (!cpr->cp_ring_arr) 3774 continue; 3775 3776 for (j = 0; j < cpr->cp_ring_count; j++) { 3777 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 3778 3779 ring = &cpr2->cp_ring_struct; 3780 bnxt_free_ring(bp, &ring->ring_mem); 3781 bnxt_free_cp_arrays(cpr2); 3782 } 3783 kfree(cpr->cp_ring_arr); 3784 cpr->cp_ring_arr = NULL; 3785 cpr->cp_ring_count = 0; 3786 } 3787 } 3788 3789 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 3790 struct bnxt_cp_ring_info *cpr) 3791 { 3792 struct bnxt_ring_mem_info *rmem; 3793 struct bnxt_ring_struct *ring; 3794 int rc; 3795 3796 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 3797 if (rc) { 3798 bnxt_free_cp_arrays(cpr); 3799 return -ENOMEM; 3800 } 3801 ring = &cpr->cp_ring_struct; 3802 rmem = &ring->ring_mem; 3803 rmem->nr_pages = bp->cp_nr_pages; 3804 rmem->page_size = HW_CMPD_RING_SIZE; 3805 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3806 rmem->dma_arr = cpr->cp_desc_mapping; 3807 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3808 rc = bnxt_alloc_ring(bp, rmem); 3809 if (rc) { 3810 bnxt_free_ring(bp, rmem); 3811 bnxt_free_cp_arrays(cpr); 3812 } 3813 return rc; 3814 } 3815 3816 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3817 { 3818 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3819 int i, j, rc, ulp_base_vec, ulp_msix; 3820 int tcs = netdev_get_num_tc(bp->dev); 3821 3822 if (!tcs) 3823 tcs = 1; 3824 ulp_msix = bnxt_get_ulp_msix_num(bp); 3825 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3826 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 3827 struct bnxt_napi *bnapi = bp->bnapi[i]; 3828 struct bnxt_cp_ring_info *cpr, *cpr2; 3829 struct bnxt_ring_struct *ring; 3830 int cp_count = 0, k; 3831 int rx = 0, tx = 0; 3832 3833 if (!bnapi) 3834 continue; 3835 3836 cpr = &bnapi->cp_ring; 3837 cpr->bnapi = bnapi; 3838 ring = &cpr->cp_ring_struct; 3839 3840 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3841 if (rc) 3842 return rc; 3843 3844 if (ulp_msix && i >= ulp_base_vec) 3845 ring->map_idx = i + ulp_msix; 3846 else 3847 ring->map_idx = i; 3848 3849 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3850 continue; 3851 3852 if (i < bp->rx_nr_rings) { 3853 cp_count++; 3854 rx = 1; 3855 } 3856 if (i < bp->tx_nr_rings_xdp) { 3857 cp_count++; 3858 tx = 1; 3859 } else if ((sh && i < bp->tx_nr_rings) || 3860 (!sh && i >= bp->rx_nr_rings)) { 3861 cp_count += tcs; 3862 tx = 1; 3863 } 3864 3865 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 3866 GFP_KERNEL); 3867 if (!cpr->cp_ring_arr) 3868 return -ENOMEM; 3869 cpr->cp_ring_count = cp_count; 3870 3871 for (k = 0; k < cp_count; k++) { 3872 cpr2 = &cpr->cp_ring_arr[k]; 3873 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 3874 if (rc) 3875 return rc; 3876 cpr2->bnapi = bnapi; 3877 cpr2->cp_idx = k; 3878 if (!k && rx) { 3879 bp->rx_ring[i].rx_cpr = cpr2; 3880 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 3881 } else { 3882 int n, tc = k - rx; 3883 3884 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 3885 bp->tx_ring[n].tx_cpr = cpr2; 3886 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 3887 } 3888 } 3889 if (tx) 3890 j++; 3891 } 3892 return 0; 3893 } 3894 3895 static void bnxt_init_ring_struct(struct bnxt *bp) 3896 { 3897 int i, j; 3898 3899 for (i = 0; i < bp->cp_nr_rings; i++) { 3900 struct bnxt_napi *bnapi = bp->bnapi[i]; 3901 struct bnxt_ring_mem_info *rmem; 3902 struct bnxt_cp_ring_info *cpr; 3903 struct bnxt_rx_ring_info *rxr; 3904 struct bnxt_tx_ring_info *txr; 3905 struct bnxt_ring_struct *ring; 3906 3907 if (!bnapi) 3908 continue; 3909 3910 cpr = &bnapi->cp_ring; 3911 ring = &cpr->cp_ring_struct; 3912 rmem = &ring->ring_mem; 3913 rmem->nr_pages = bp->cp_nr_pages; 3914 rmem->page_size = HW_CMPD_RING_SIZE; 3915 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3916 rmem->dma_arr = cpr->cp_desc_mapping; 3917 rmem->vmem_size = 0; 3918 3919 rxr = bnapi->rx_ring; 3920 if (!rxr) 3921 goto skip_rx; 3922 3923 ring = &rxr->rx_ring_struct; 3924 rmem = &ring->ring_mem; 3925 rmem->nr_pages = bp->rx_nr_pages; 3926 rmem->page_size = HW_RXBD_RING_SIZE; 3927 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3928 rmem->dma_arr = rxr->rx_desc_mapping; 3929 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3930 rmem->vmem = (void **)&rxr->rx_buf_ring; 3931 3932 ring = &rxr->rx_agg_ring_struct; 3933 rmem = &ring->ring_mem; 3934 rmem->nr_pages = bp->rx_agg_nr_pages; 3935 rmem->page_size = HW_RXBD_RING_SIZE; 3936 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3937 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3938 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3939 rmem->vmem = (void **)&rxr->rx_agg_ring; 3940 3941 skip_rx: 3942 bnxt_for_each_napi_tx(j, bnapi, txr) { 3943 ring = &txr->tx_ring_struct; 3944 rmem = &ring->ring_mem; 3945 rmem->nr_pages = bp->tx_nr_pages; 3946 rmem->page_size = HW_TXBD_RING_SIZE; 3947 rmem->pg_arr = (void **)txr->tx_desc_ring; 3948 rmem->dma_arr = txr->tx_desc_mapping; 3949 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3950 rmem->vmem = (void **)&txr->tx_buf_ring; 3951 } 3952 } 3953 } 3954 3955 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3956 { 3957 int i; 3958 u32 prod; 3959 struct rx_bd **rx_buf_ring; 3960 3961 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3962 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3963 int j; 3964 struct rx_bd *rxbd; 3965 3966 rxbd = rx_buf_ring[i]; 3967 if (!rxbd) 3968 continue; 3969 3970 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3971 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3972 rxbd->rx_bd_opaque = prod; 3973 } 3974 } 3975 } 3976 3977 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 3978 { 3979 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3980 struct net_device *dev = bp->dev; 3981 u32 prod; 3982 int i; 3983 3984 prod = rxr->rx_prod; 3985 for (i = 0; i < bp->rx_ring_size; i++) { 3986 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 3987 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3988 ring_nr, i, bp->rx_ring_size); 3989 break; 3990 } 3991 prod = NEXT_RX(prod); 3992 } 3993 rxr->rx_prod = prod; 3994 3995 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3996 return 0; 3997 3998 prod = rxr->rx_agg_prod; 3999 for (i = 0; i < bp->rx_agg_ring_size; i++) { 4000 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 4001 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 4002 ring_nr, i, bp->rx_ring_size); 4003 break; 4004 } 4005 prod = NEXT_RX_AGG(prod); 4006 } 4007 rxr->rx_agg_prod = prod; 4008 4009 if (rxr->rx_tpa) { 4010 dma_addr_t mapping; 4011 u8 *data; 4012 4013 for (i = 0; i < bp->max_tpa; i++) { 4014 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); 4015 if (!data) 4016 return -ENOMEM; 4017 4018 rxr->rx_tpa[i].data = data; 4019 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 4020 rxr->rx_tpa[i].mapping = mapping; 4021 } 4022 } 4023 return 0; 4024 } 4025 4026 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 4027 { 4028 struct bnxt_rx_ring_info *rxr; 4029 struct bnxt_ring_struct *ring; 4030 u32 type; 4031 4032 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 4033 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 4034 4035 if (NET_IP_ALIGN == 2) 4036 type |= RX_BD_FLAGS_SOP; 4037 4038 rxr = &bp->rx_ring[ring_nr]; 4039 ring = &rxr->rx_ring_struct; 4040 bnxt_init_rxbd_pages(ring, type); 4041 4042 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, 4043 &rxr->bnapi->napi); 4044 4045 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 4046 bpf_prog_add(bp->xdp_prog, 1); 4047 rxr->xdp_prog = bp->xdp_prog; 4048 } 4049 ring->fw_ring_id = INVALID_HW_RING_ID; 4050 4051 ring = &rxr->rx_agg_ring_struct; 4052 ring->fw_ring_id = INVALID_HW_RING_ID; 4053 4054 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 4055 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 4056 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 4057 4058 bnxt_init_rxbd_pages(ring, type); 4059 } 4060 4061 return bnxt_alloc_one_rx_ring(bp, ring_nr); 4062 } 4063 4064 static void bnxt_init_cp_rings(struct bnxt *bp) 4065 { 4066 int i, j; 4067 4068 for (i = 0; i < bp->cp_nr_rings; i++) { 4069 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 4070 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4071 4072 ring->fw_ring_id = INVALID_HW_RING_ID; 4073 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4074 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4075 if (!cpr->cp_ring_arr) 4076 continue; 4077 for (j = 0; j < cpr->cp_ring_count; j++) { 4078 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4079 4080 ring = &cpr2->cp_ring_struct; 4081 ring->fw_ring_id = INVALID_HW_RING_ID; 4082 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4083 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4084 } 4085 } 4086 } 4087 4088 static int bnxt_init_rx_rings(struct bnxt *bp) 4089 { 4090 int i, rc = 0; 4091 4092 if (BNXT_RX_PAGE_MODE(bp)) { 4093 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 4094 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 4095 } else { 4096 bp->rx_offset = BNXT_RX_OFFSET; 4097 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 4098 } 4099 4100 for (i = 0; i < bp->rx_nr_rings; i++) { 4101 rc = bnxt_init_one_rx_ring(bp, i); 4102 if (rc) 4103 break; 4104 } 4105 4106 return rc; 4107 } 4108 4109 static int bnxt_init_tx_rings(struct bnxt *bp) 4110 { 4111 u16 i; 4112 4113 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 4114 BNXT_MIN_TX_DESC_CNT); 4115 4116 for (i = 0; i < bp->tx_nr_rings; i++) { 4117 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4118 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4119 4120 ring->fw_ring_id = INVALID_HW_RING_ID; 4121 4122 if (i >= bp->tx_nr_rings_xdp) 4123 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, 4124 NETDEV_QUEUE_TYPE_TX, 4125 &txr->bnapi->napi); 4126 } 4127 4128 return 0; 4129 } 4130 4131 static void bnxt_free_ring_grps(struct bnxt *bp) 4132 { 4133 kfree(bp->grp_info); 4134 bp->grp_info = NULL; 4135 } 4136 4137 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 4138 { 4139 int i; 4140 4141 if (irq_re_init) { 4142 bp->grp_info = kcalloc(bp->cp_nr_rings, 4143 sizeof(struct bnxt_ring_grp_info), 4144 GFP_KERNEL); 4145 if (!bp->grp_info) 4146 return -ENOMEM; 4147 } 4148 for (i = 0; i < bp->cp_nr_rings; i++) { 4149 if (irq_re_init) 4150 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 4151 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4152 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 4153 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 4154 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4155 } 4156 return 0; 4157 } 4158 4159 static void bnxt_free_vnics(struct bnxt *bp) 4160 { 4161 kfree(bp->vnic_info); 4162 bp->vnic_info = NULL; 4163 bp->nr_vnics = 0; 4164 } 4165 4166 static int bnxt_alloc_vnics(struct bnxt *bp) 4167 { 4168 int num_vnics = 1; 4169 4170 #ifdef CONFIG_RFS_ACCEL 4171 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS) 4172 num_vnics += bp->rx_nr_rings; 4173 #endif 4174 4175 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4176 num_vnics++; 4177 4178 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 4179 GFP_KERNEL); 4180 if (!bp->vnic_info) 4181 return -ENOMEM; 4182 4183 bp->nr_vnics = num_vnics; 4184 return 0; 4185 } 4186 4187 static void bnxt_init_vnics(struct bnxt *bp) 4188 { 4189 int i; 4190 4191 for (i = 0; i < bp->nr_vnics; i++) { 4192 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4193 int j; 4194 4195 vnic->fw_vnic_id = INVALID_HW_RING_ID; 4196 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 4197 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 4198 4199 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4200 4201 if (bp->vnic_info[i].rss_hash_key) { 4202 if (!i) { 4203 u8 *key = (void *)vnic->rss_hash_key; 4204 int k; 4205 4206 bp->toeplitz_prefix = 0; 4207 get_random_bytes(vnic->rss_hash_key, 4208 HW_HASH_KEY_SIZE); 4209 for (k = 0; k < 8; k++) { 4210 bp->toeplitz_prefix <<= 8; 4211 bp->toeplitz_prefix |= key[k]; 4212 } 4213 } else { 4214 memcpy(vnic->rss_hash_key, 4215 bp->vnic_info[0].rss_hash_key, 4216 HW_HASH_KEY_SIZE); 4217 } 4218 } 4219 } 4220 } 4221 4222 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4223 { 4224 int pages; 4225 4226 pages = ring_size / desc_per_pg; 4227 4228 if (!pages) 4229 return 1; 4230 4231 pages++; 4232 4233 while (pages & (pages - 1)) 4234 pages++; 4235 4236 return pages; 4237 } 4238 4239 void bnxt_set_tpa_flags(struct bnxt *bp) 4240 { 4241 bp->flags &= ~BNXT_FLAG_TPA; 4242 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4243 return; 4244 if (bp->dev->features & NETIF_F_LRO) 4245 bp->flags |= BNXT_FLAG_LRO; 4246 else if (bp->dev->features & NETIF_F_GRO_HW) 4247 bp->flags |= BNXT_FLAG_GRO; 4248 } 4249 4250 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4251 * be set on entry. 4252 */ 4253 void bnxt_set_ring_params(struct bnxt *bp) 4254 { 4255 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4256 u32 agg_factor = 0, agg_ring_size = 0; 4257 4258 /* 8 for CRC and VLAN */ 4259 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4260 4261 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4262 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4263 4264 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 4265 ring_size = bp->rx_ring_size; 4266 bp->rx_agg_ring_size = 0; 4267 bp->rx_agg_nr_pages = 0; 4268 4269 if (bp->flags & BNXT_FLAG_TPA) 4270 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4271 4272 bp->flags &= ~BNXT_FLAG_JUMBO; 4273 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4274 u32 jumbo_factor; 4275 4276 bp->flags |= BNXT_FLAG_JUMBO; 4277 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4278 if (jumbo_factor > agg_factor) 4279 agg_factor = jumbo_factor; 4280 } 4281 if (agg_factor) { 4282 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4283 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4284 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4285 bp->rx_ring_size, ring_size); 4286 bp->rx_ring_size = ring_size; 4287 } 4288 agg_ring_size = ring_size * agg_factor; 4289 4290 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4291 RX_DESC_CNT); 4292 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4293 u32 tmp = agg_ring_size; 4294 4295 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4296 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4297 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4298 tmp, agg_ring_size); 4299 } 4300 bp->rx_agg_ring_size = agg_ring_size; 4301 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4302 4303 if (BNXT_RX_PAGE_MODE(bp)) { 4304 rx_space = PAGE_SIZE; 4305 rx_size = PAGE_SIZE - 4306 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4307 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4308 } else { 4309 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 4310 rx_space = rx_size + NET_SKB_PAD + 4311 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4312 } 4313 } 4314 4315 bp->rx_buf_use_size = rx_size; 4316 bp->rx_buf_size = rx_space; 4317 4318 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4319 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4320 4321 ring_size = bp->tx_ring_size; 4322 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4323 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4324 4325 max_rx_cmpl = bp->rx_ring_size; 4326 /* MAX TPA needs to be added because TPA_START completions are 4327 * immediately recycled, so the TPA completions are not bound by 4328 * the RX ring size. 4329 */ 4330 if (bp->flags & BNXT_FLAG_TPA) 4331 max_rx_cmpl += bp->max_tpa; 4332 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4333 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4334 bp->cp_ring_size = ring_size; 4335 4336 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4337 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4338 bp->cp_nr_pages = MAX_CP_PAGES; 4339 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4340 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4341 ring_size, bp->cp_ring_size); 4342 } 4343 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4344 bp->cp_ring_mask = bp->cp_bit - 1; 4345 } 4346 4347 /* Changing allocation mode of RX rings. 4348 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4349 */ 4350 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4351 { 4352 struct net_device *dev = bp->dev; 4353 4354 if (page_mode) { 4355 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 4356 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4357 4358 if (bp->xdp_prog->aux->xdp_has_frags) 4359 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4360 else 4361 dev->max_mtu = 4362 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4363 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4364 bp->flags |= BNXT_FLAG_JUMBO; 4365 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4366 } else { 4367 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4368 bp->rx_skb_func = bnxt_rx_page_skb; 4369 } 4370 bp->rx_dir = DMA_BIDIRECTIONAL; 4371 /* Disable LRO or GRO_HW */ 4372 netdev_update_features(dev); 4373 } else { 4374 dev->max_mtu = bp->max_mtu; 4375 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4376 bp->rx_dir = DMA_FROM_DEVICE; 4377 bp->rx_skb_func = bnxt_rx_skb; 4378 } 4379 return 0; 4380 } 4381 4382 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4383 { 4384 int i; 4385 struct bnxt_vnic_info *vnic; 4386 struct pci_dev *pdev = bp->pdev; 4387 4388 if (!bp->vnic_info) 4389 return; 4390 4391 for (i = 0; i < bp->nr_vnics; i++) { 4392 vnic = &bp->vnic_info[i]; 4393 4394 kfree(vnic->fw_grp_ids); 4395 vnic->fw_grp_ids = NULL; 4396 4397 kfree(vnic->uc_list); 4398 vnic->uc_list = NULL; 4399 4400 if (vnic->mc_list) { 4401 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4402 vnic->mc_list, vnic->mc_list_mapping); 4403 vnic->mc_list = NULL; 4404 } 4405 4406 if (vnic->rss_table) { 4407 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4408 vnic->rss_table, 4409 vnic->rss_table_dma_addr); 4410 vnic->rss_table = NULL; 4411 } 4412 4413 vnic->rss_hash_key = NULL; 4414 vnic->flags = 0; 4415 } 4416 } 4417 4418 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4419 { 4420 int i, rc = 0, size; 4421 struct bnxt_vnic_info *vnic; 4422 struct pci_dev *pdev = bp->pdev; 4423 int max_rings; 4424 4425 for (i = 0; i < bp->nr_vnics; i++) { 4426 vnic = &bp->vnic_info[i]; 4427 4428 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4429 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4430 4431 if (mem_size > 0) { 4432 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4433 if (!vnic->uc_list) { 4434 rc = -ENOMEM; 4435 goto out; 4436 } 4437 } 4438 } 4439 4440 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4441 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4442 vnic->mc_list = 4443 dma_alloc_coherent(&pdev->dev, 4444 vnic->mc_list_size, 4445 &vnic->mc_list_mapping, 4446 GFP_KERNEL); 4447 if (!vnic->mc_list) { 4448 rc = -ENOMEM; 4449 goto out; 4450 } 4451 } 4452 4453 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4454 goto vnic_skip_grps; 4455 4456 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4457 max_rings = bp->rx_nr_rings; 4458 else 4459 max_rings = 1; 4460 4461 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4462 if (!vnic->fw_grp_ids) { 4463 rc = -ENOMEM; 4464 goto out; 4465 } 4466 vnic_skip_grps: 4467 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 4468 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4469 continue; 4470 4471 /* Allocate rss table and hash key */ 4472 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4473 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4474 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4475 4476 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4477 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4478 vnic->rss_table_size, 4479 &vnic->rss_table_dma_addr, 4480 GFP_KERNEL); 4481 if (!vnic->rss_table) { 4482 rc = -ENOMEM; 4483 goto out; 4484 } 4485 4486 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4487 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4488 } 4489 return 0; 4490 4491 out: 4492 return rc; 4493 } 4494 4495 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4496 { 4497 struct bnxt_hwrm_wait_token *token; 4498 4499 dma_pool_destroy(bp->hwrm_dma_pool); 4500 bp->hwrm_dma_pool = NULL; 4501 4502 rcu_read_lock(); 4503 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4504 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4505 rcu_read_unlock(); 4506 } 4507 4508 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4509 { 4510 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4511 BNXT_HWRM_DMA_SIZE, 4512 BNXT_HWRM_DMA_ALIGN, 0); 4513 if (!bp->hwrm_dma_pool) 4514 return -ENOMEM; 4515 4516 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4517 4518 return 0; 4519 } 4520 4521 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 4522 { 4523 kfree(stats->hw_masks); 4524 stats->hw_masks = NULL; 4525 kfree(stats->sw_stats); 4526 stats->sw_stats = NULL; 4527 if (stats->hw_stats) { 4528 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 4529 stats->hw_stats_map); 4530 stats->hw_stats = NULL; 4531 } 4532 } 4533 4534 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 4535 bool alloc_masks) 4536 { 4537 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 4538 &stats->hw_stats_map, GFP_KERNEL); 4539 if (!stats->hw_stats) 4540 return -ENOMEM; 4541 4542 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 4543 if (!stats->sw_stats) 4544 goto stats_mem_err; 4545 4546 if (alloc_masks) { 4547 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 4548 if (!stats->hw_masks) 4549 goto stats_mem_err; 4550 } 4551 return 0; 4552 4553 stats_mem_err: 4554 bnxt_free_stats_mem(bp, stats); 4555 return -ENOMEM; 4556 } 4557 4558 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 4559 { 4560 int i; 4561 4562 for (i = 0; i < count; i++) 4563 mask_arr[i] = mask; 4564 } 4565 4566 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 4567 { 4568 int i; 4569 4570 for (i = 0; i < count; i++) 4571 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 4572 } 4573 4574 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 4575 struct bnxt_stats_mem *stats) 4576 { 4577 struct hwrm_func_qstats_ext_output *resp; 4578 struct hwrm_func_qstats_ext_input *req; 4579 __le64 *hw_masks; 4580 int rc; 4581 4582 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 4583 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4584 return -EOPNOTSUPP; 4585 4586 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 4587 if (rc) 4588 return rc; 4589 4590 req->fid = cpu_to_le16(0xffff); 4591 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4592 4593 resp = hwrm_req_hold(bp, req); 4594 rc = hwrm_req_send(bp, req); 4595 if (!rc) { 4596 hw_masks = &resp->rx_ucast_pkts; 4597 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 4598 } 4599 hwrm_req_drop(bp, req); 4600 return rc; 4601 } 4602 4603 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 4604 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 4605 4606 static void bnxt_init_stats(struct bnxt *bp) 4607 { 4608 struct bnxt_napi *bnapi = bp->bnapi[0]; 4609 struct bnxt_cp_ring_info *cpr; 4610 struct bnxt_stats_mem *stats; 4611 __le64 *rx_stats, *tx_stats; 4612 int rc, rx_count, tx_count; 4613 u64 *rx_masks, *tx_masks; 4614 u64 mask; 4615 u8 flags; 4616 4617 cpr = &bnapi->cp_ring; 4618 stats = &cpr->stats; 4619 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 4620 if (rc) { 4621 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4622 mask = (1ULL << 48) - 1; 4623 else 4624 mask = -1ULL; 4625 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 4626 } 4627 if (bp->flags & BNXT_FLAG_PORT_STATS) { 4628 stats = &bp->port_stats; 4629 rx_stats = stats->hw_stats; 4630 rx_masks = stats->hw_masks; 4631 rx_count = sizeof(struct rx_port_stats) / 8; 4632 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4633 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4634 tx_count = sizeof(struct tx_port_stats) / 8; 4635 4636 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 4637 rc = bnxt_hwrm_port_qstats(bp, flags); 4638 if (rc) { 4639 mask = (1ULL << 40) - 1; 4640 4641 bnxt_fill_masks(rx_masks, mask, rx_count); 4642 bnxt_fill_masks(tx_masks, mask, tx_count); 4643 } else { 4644 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4645 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 4646 bnxt_hwrm_port_qstats(bp, 0); 4647 } 4648 } 4649 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 4650 stats = &bp->rx_port_stats_ext; 4651 rx_stats = stats->hw_stats; 4652 rx_masks = stats->hw_masks; 4653 rx_count = sizeof(struct rx_port_stats_ext) / 8; 4654 stats = &bp->tx_port_stats_ext; 4655 tx_stats = stats->hw_stats; 4656 tx_masks = stats->hw_masks; 4657 tx_count = sizeof(struct tx_port_stats_ext) / 8; 4658 4659 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4660 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 4661 if (rc) { 4662 mask = (1ULL << 40) - 1; 4663 4664 bnxt_fill_masks(rx_masks, mask, rx_count); 4665 if (tx_stats) 4666 bnxt_fill_masks(tx_masks, mask, tx_count); 4667 } else { 4668 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4669 if (tx_stats) 4670 bnxt_copy_hw_masks(tx_masks, tx_stats, 4671 tx_count); 4672 bnxt_hwrm_port_qstats_ext(bp, 0); 4673 } 4674 } 4675 } 4676 4677 static void bnxt_free_port_stats(struct bnxt *bp) 4678 { 4679 bp->flags &= ~BNXT_FLAG_PORT_STATS; 4680 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 4681 4682 bnxt_free_stats_mem(bp, &bp->port_stats); 4683 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 4684 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 4685 } 4686 4687 static void bnxt_free_ring_stats(struct bnxt *bp) 4688 { 4689 int i; 4690 4691 if (!bp->bnapi) 4692 return; 4693 4694 for (i = 0; i < bp->cp_nr_rings; i++) { 4695 struct bnxt_napi *bnapi = bp->bnapi[i]; 4696 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4697 4698 bnxt_free_stats_mem(bp, &cpr->stats); 4699 } 4700 } 4701 4702 static int bnxt_alloc_stats(struct bnxt *bp) 4703 { 4704 u32 size, i; 4705 int rc; 4706 4707 size = bp->hw_ring_stats_size; 4708 4709 for (i = 0; i < bp->cp_nr_rings; i++) { 4710 struct bnxt_napi *bnapi = bp->bnapi[i]; 4711 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4712 4713 cpr->stats.len = size; 4714 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 4715 if (rc) 4716 return rc; 4717 4718 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4719 } 4720 4721 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 4722 return 0; 4723 4724 if (bp->port_stats.hw_stats) 4725 goto alloc_ext_stats; 4726 4727 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 4728 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 4729 if (rc) 4730 return rc; 4731 4732 bp->flags |= BNXT_FLAG_PORT_STATS; 4733 4734 alloc_ext_stats: 4735 /* Display extended statistics only if FW supports it */ 4736 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 4737 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 4738 return 0; 4739 4740 if (bp->rx_port_stats_ext.hw_stats) 4741 goto alloc_tx_ext_stats; 4742 4743 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 4744 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 4745 /* Extended stats are optional */ 4746 if (rc) 4747 return 0; 4748 4749 alloc_tx_ext_stats: 4750 if (bp->tx_port_stats_ext.hw_stats) 4751 return 0; 4752 4753 if (bp->hwrm_spec_code >= 0x10902 || 4754 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 4755 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 4756 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 4757 /* Extended stats are optional */ 4758 if (rc) 4759 return 0; 4760 } 4761 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 4762 return 0; 4763 } 4764 4765 static void bnxt_clear_ring_indices(struct bnxt *bp) 4766 { 4767 int i, j; 4768 4769 if (!bp->bnapi) 4770 return; 4771 4772 for (i = 0; i < bp->cp_nr_rings; i++) { 4773 struct bnxt_napi *bnapi = bp->bnapi[i]; 4774 struct bnxt_cp_ring_info *cpr; 4775 struct bnxt_rx_ring_info *rxr; 4776 struct bnxt_tx_ring_info *txr; 4777 4778 if (!bnapi) 4779 continue; 4780 4781 cpr = &bnapi->cp_ring; 4782 cpr->cp_raw_cons = 0; 4783 4784 bnxt_for_each_napi_tx(j, bnapi, txr) { 4785 txr->tx_prod = 0; 4786 txr->tx_cons = 0; 4787 txr->tx_hw_cons = 0; 4788 } 4789 4790 rxr = bnapi->rx_ring; 4791 if (rxr) { 4792 rxr->rx_prod = 0; 4793 rxr->rx_agg_prod = 0; 4794 rxr->rx_sw_agg_prod = 0; 4795 rxr->rx_next_cons = 0; 4796 } 4797 bnapi->events = 0; 4798 } 4799 } 4800 4801 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) 4802 { 4803 int i; 4804 4805 /* Under rtnl_lock and all our NAPIs have been disabled. It's 4806 * safe to delete the hash table. 4807 */ 4808 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 4809 struct hlist_head *head; 4810 struct hlist_node *tmp; 4811 struct bnxt_ntuple_filter *fltr; 4812 4813 head = &bp->ntp_fltr_hash_tbl[i]; 4814 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 4815 bnxt_del_l2_filter(bp, fltr->l2_fltr); 4816 if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) 4817 continue; 4818 hlist_del(&fltr->base.hash); 4819 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 4820 bp->ntp_fltr_count--; 4821 kfree(fltr); 4822 } 4823 } 4824 if (!all) 4825 return; 4826 4827 bitmap_free(bp->ntp_fltr_bmap); 4828 bp->ntp_fltr_bmap = NULL; 4829 bp->ntp_fltr_count = 0; 4830 } 4831 4832 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 4833 { 4834 int i, rc = 0; 4835 4836 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) 4837 return 0; 4838 4839 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 4840 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 4841 4842 bp->ntp_fltr_count = 0; 4843 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL); 4844 4845 if (!bp->ntp_fltr_bmap) 4846 rc = -ENOMEM; 4847 4848 return rc; 4849 } 4850 4851 static void bnxt_free_l2_filters(struct bnxt *bp, bool all) 4852 { 4853 int i; 4854 4855 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { 4856 struct hlist_head *head; 4857 struct hlist_node *tmp; 4858 struct bnxt_l2_filter *fltr; 4859 4860 head = &bp->l2_fltr_hash_tbl[i]; 4861 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 4862 if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) 4863 continue; 4864 hlist_del(&fltr->base.hash); 4865 if (fltr->base.flags) { 4866 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 4867 bp->ntp_fltr_count--; 4868 } 4869 kfree(fltr); 4870 } 4871 } 4872 } 4873 4874 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) 4875 { 4876 int i; 4877 4878 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) 4879 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); 4880 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); 4881 } 4882 4883 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 4884 { 4885 bnxt_free_vnic_attributes(bp); 4886 bnxt_free_tx_rings(bp); 4887 bnxt_free_rx_rings(bp); 4888 bnxt_free_cp_rings(bp); 4889 bnxt_free_all_cp_arrays(bp); 4890 bnxt_free_ntp_fltrs(bp, false); 4891 bnxt_free_l2_filters(bp, false); 4892 if (irq_re_init) { 4893 bnxt_free_ring_stats(bp); 4894 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 4895 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 4896 bnxt_free_port_stats(bp); 4897 bnxt_free_ring_grps(bp); 4898 bnxt_free_vnics(bp); 4899 kfree(bp->tx_ring_map); 4900 bp->tx_ring_map = NULL; 4901 kfree(bp->tx_ring); 4902 bp->tx_ring = NULL; 4903 kfree(bp->rx_ring); 4904 bp->rx_ring = NULL; 4905 kfree(bp->bnapi); 4906 bp->bnapi = NULL; 4907 } else { 4908 bnxt_clear_ring_indices(bp); 4909 } 4910 } 4911 4912 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 4913 { 4914 int i, j, rc, size, arr_size; 4915 void *bnapi; 4916 4917 if (irq_re_init) { 4918 /* Allocate bnapi mem pointer array and mem block for 4919 * all queues 4920 */ 4921 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 4922 bp->cp_nr_rings); 4923 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 4924 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 4925 if (!bnapi) 4926 return -ENOMEM; 4927 4928 bp->bnapi = bnapi; 4929 bnapi += arr_size; 4930 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 4931 bp->bnapi[i] = bnapi; 4932 bp->bnapi[i]->index = i; 4933 bp->bnapi[i]->bp = bp; 4934 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 4935 struct bnxt_cp_ring_info *cpr = 4936 &bp->bnapi[i]->cp_ring; 4937 4938 cpr->cp_ring_struct.ring_mem.flags = 4939 BNXT_RMEM_RING_PTE_FLAG; 4940 } 4941 } 4942 4943 bp->rx_ring = kcalloc(bp->rx_nr_rings, 4944 sizeof(struct bnxt_rx_ring_info), 4945 GFP_KERNEL); 4946 if (!bp->rx_ring) 4947 return -ENOMEM; 4948 4949 for (i = 0; i < bp->rx_nr_rings; i++) { 4950 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4951 4952 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 4953 rxr->rx_ring_struct.ring_mem.flags = 4954 BNXT_RMEM_RING_PTE_FLAG; 4955 rxr->rx_agg_ring_struct.ring_mem.flags = 4956 BNXT_RMEM_RING_PTE_FLAG; 4957 } else { 4958 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 4959 } 4960 rxr->bnapi = bp->bnapi[i]; 4961 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4962 } 4963 4964 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4965 sizeof(struct bnxt_tx_ring_info), 4966 GFP_KERNEL); 4967 if (!bp->tx_ring) 4968 return -ENOMEM; 4969 4970 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4971 GFP_KERNEL); 4972 4973 if (!bp->tx_ring_map) 4974 return -ENOMEM; 4975 4976 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4977 j = 0; 4978 else 4979 j = bp->rx_nr_rings; 4980 4981 for (i = 0; i < bp->tx_nr_rings; i++) { 4982 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4983 struct bnxt_napi *bnapi2; 4984 4985 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4986 txr->tx_ring_struct.ring_mem.flags = 4987 BNXT_RMEM_RING_PTE_FLAG; 4988 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4989 if (i >= bp->tx_nr_rings_xdp) { 4990 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 4991 4992 bnapi2 = bp->bnapi[k]; 4993 txr->txq_index = i - bp->tx_nr_rings_xdp; 4994 txr->tx_napi_idx = 4995 BNXT_RING_TO_TC(bp, txr->txq_index); 4996 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 4997 bnapi2->tx_int = bnxt_tx_int; 4998 } else { 4999 bnapi2 = bp->bnapi[j]; 5000 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 5001 bnapi2->tx_ring[0] = txr; 5002 bnapi2->tx_int = bnxt_tx_int_xdp; 5003 j++; 5004 } 5005 txr->bnapi = bnapi2; 5006 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5007 txr->tx_cpr = &bnapi2->cp_ring; 5008 } 5009 5010 rc = bnxt_alloc_stats(bp); 5011 if (rc) 5012 goto alloc_mem_err; 5013 bnxt_init_stats(bp); 5014 5015 rc = bnxt_alloc_ntp_fltrs(bp); 5016 if (rc) 5017 goto alloc_mem_err; 5018 5019 rc = bnxt_alloc_vnics(bp); 5020 if (rc) 5021 goto alloc_mem_err; 5022 } 5023 5024 rc = bnxt_alloc_all_cp_arrays(bp); 5025 if (rc) 5026 goto alloc_mem_err; 5027 5028 bnxt_init_ring_struct(bp); 5029 5030 rc = bnxt_alloc_rx_rings(bp); 5031 if (rc) 5032 goto alloc_mem_err; 5033 5034 rc = bnxt_alloc_tx_rings(bp); 5035 if (rc) 5036 goto alloc_mem_err; 5037 5038 rc = bnxt_alloc_cp_rings(bp); 5039 if (rc) 5040 goto alloc_mem_err; 5041 5042 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 5043 BNXT_VNIC_UCAST_FLAG; 5044 rc = bnxt_alloc_vnic_attributes(bp); 5045 if (rc) 5046 goto alloc_mem_err; 5047 return 0; 5048 5049 alloc_mem_err: 5050 bnxt_free_mem(bp, true); 5051 return rc; 5052 } 5053 5054 static void bnxt_disable_int(struct bnxt *bp) 5055 { 5056 int i; 5057 5058 if (!bp->bnapi) 5059 return; 5060 5061 for (i = 0; i < bp->cp_nr_rings; i++) { 5062 struct bnxt_napi *bnapi = bp->bnapi[i]; 5063 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5064 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5065 5066 if (ring->fw_ring_id != INVALID_HW_RING_ID) 5067 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5068 } 5069 } 5070 5071 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 5072 { 5073 struct bnxt_napi *bnapi = bp->bnapi[n]; 5074 struct bnxt_cp_ring_info *cpr; 5075 5076 cpr = &bnapi->cp_ring; 5077 return cpr->cp_ring_struct.map_idx; 5078 } 5079 5080 static void bnxt_disable_int_sync(struct bnxt *bp) 5081 { 5082 int i; 5083 5084 if (!bp->irq_tbl) 5085 return; 5086 5087 atomic_inc(&bp->intr_sem); 5088 5089 bnxt_disable_int(bp); 5090 for (i = 0; i < bp->cp_nr_rings; i++) { 5091 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 5092 5093 synchronize_irq(bp->irq_tbl[map_idx].vector); 5094 } 5095 } 5096 5097 static void bnxt_enable_int(struct bnxt *bp) 5098 { 5099 int i; 5100 5101 atomic_set(&bp->intr_sem, 0); 5102 for (i = 0; i < bp->cp_nr_rings; i++) { 5103 struct bnxt_napi *bnapi = bp->bnapi[i]; 5104 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5105 5106 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 5107 } 5108 } 5109 5110 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 5111 bool async_only) 5112 { 5113 DECLARE_BITMAP(async_events_bmap, 256); 5114 u32 *events = (u32 *)async_events_bmap; 5115 struct hwrm_func_drv_rgtr_output *resp; 5116 struct hwrm_func_drv_rgtr_input *req; 5117 u32 flags; 5118 int rc, i; 5119 5120 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 5121 if (rc) 5122 return rc; 5123 5124 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 5125 FUNC_DRV_RGTR_REQ_ENABLES_VER | 5126 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5127 5128 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 5129 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 5130 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 5131 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 5132 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 5133 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 5134 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 5135 req->flags = cpu_to_le32(flags); 5136 req->ver_maj_8b = DRV_VER_MAJ; 5137 req->ver_min_8b = DRV_VER_MIN; 5138 req->ver_upd_8b = DRV_VER_UPD; 5139 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 5140 req->ver_min = cpu_to_le16(DRV_VER_MIN); 5141 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 5142 5143 if (BNXT_PF(bp)) { 5144 u32 data[8]; 5145 int i; 5146 5147 memset(data, 0, sizeof(data)); 5148 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 5149 u16 cmd = bnxt_vf_req_snif[i]; 5150 unsigned int bit, idx; 5151 5152 idx = cmd / 32; 5153 bit = cmd % 32; 5154 data[idx] |= 1 << bit; 5155 } 5156 5157 for (i = 0; i < 8; i++) 5158 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 5159 5160 req->enables |= 5161 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 5162 } 5163 5164 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 5165 req->flags |= cpu_to_le32( 5166 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 5167 5168 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 5169 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 5170 u16 event_id = bnxt_async_events_arr[i]; 5171 5172 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 5173 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5174 continue; 5175 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 5176 !bp->ptp_cfg) 5177 continue; 5178 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 5179 } 5180 if (bmap && bmap_size) { 5181 for (i = 0; i < bmap_size; i++) { 5182 if (test_bit(i, bmap)) 5183 __set_bit(i, async_events_bmap); 5184 } 5185 } 5186 for (i = 0; i < 8; i++) 5187 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 5188 5189 if (async_only) 5190 req->enables = 5191 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5192 5193 resp = hwrm_req_hold(bp, req); 5194 rc = hwrm_req_send(bp, req); 5195 if (!rc) { 5196 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 5197 if (resp->flags & 5198 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 5199 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 5200 } 5201 hwrm_req_drop(bp, req); 5202 return rc; 5203 } 5204 5205 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 5206 { 5207 struct hwrm_func_drv_unrgtr_input *req; 5208 int rc; 5209 5210 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 5211 return 0; 5212 5213 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 5214 if (rc) 5215 return rc; 5216 return hwrm_req_send(bp, req); 5217 } 5218 5219 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); 5220 5221 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 5222 { 5223 struct hwrm_tunnel_dst_port_free_input *req; 5224 int rc; 5225 5226 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 5227 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 5228 return 0; 5229 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 5230 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 5231 return 0; 5232 5233 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 5234 if (rc) 5235 return rc; 5236 5237 req->tunnel_type = tunnel_type; 5238 5239 switch (tunnel_type) { 5240 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 5241 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 5242 bp->vxlan_port = 0; 5243 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 5244 break; 5245 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5246 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5247 bp->nge_port = 0; 5248 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5249 break; 5250 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: 5251 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); 5252 bp->vxlan_gpe_port = 0; 5253 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; 5254 break; 5255 default: 5256 break; 5257 } 5258 5259 rc = hwrm_req_send(bp, req); 5260 if (rc) 5261 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5262 rc); 5263 if (bp->flags & BNXT_FLAG_TPA) 5264 bnxt_set_tpa(bp, true); 5265 return rc; 5266 } 5267 5268 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5269 u8 tunnel_type) 5270 { 5271 struct hwrm_tunnel_dst_port_alloc_output *resp; 5272 struct hwrm_tunnel_dst_port_alloc_input *req; 5273 int rc; 5274 5275 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5276 if (rc) 5277 return rc; 5278 5279 req->tunnel_type = tunnel_type; 5280 req->tunnel_dst_port_val = port; 5281 5282 resp = hwrm_req_hold(bp, req); 5283 rc = hwrm_req_send(bp, req); 5284 if (rc) { 5285 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5286 rc); 5287 goto err_out; 5288 } 5289 5290 switch (tunnel_type) { 5291 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5292 bp->vxlan_port = port; 5293 bp->vxlan_fw_dst_port_id = 5294 le16_to_cpu(resp->tunnel_dst_port_id); 5295 break; 5296 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5297 bp->nge_port = port; 5298 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5299 break; 5300 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: 5301 bp->vxlan_gpe_port = port; 5302 bp->vxlan_gpe_fw_dst_port_id = 5303 le16_to_cpu(resp->tunnel_dst_port_id); 5304 break; 5305 default: 5306 break; 5307 } 5308 if (bp->flags & BNXT_FLAG_TPA) 5309 bnxt_set_tpa(bp, true); 5310 5311 err_out: 5312 hwrm_req_drop(bp, req); 5313 return rc; 5314 } 5315 5316 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5317 { 5318 struct hwrm_cfa_l2_set_rx_mask_input *req; 5319 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5320 int rc; 5321 5322 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5323 if (rc) 5324 return rc; 5325 5326 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5327 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5328 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5329 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5330 } 5331 req->mask = cpu_to_le32(vnic->rx_mask); 5332 return hwrm_req_send_silent(bp, req); 5333 } 5334 5335 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5336 { 5337 if (!atomic_dec_and_test(&fltr->refcnt)) 5338 return; 5339 spin_lock_bh(&bp->ntp_fltr_lock); 5340 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 5341 spin_unlock_bh(&bp->ntp_fltr_lock); 5342 return; 5343 } 5344 hlist_del_rcu(&fltr->base.hash); 5345 if (fltr->base.flags) { 5346 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 5347 bp->ntp_fltr_count--; 5348 } 5349 spin_unlock_bh(&bp->ntp_fltr_lock); 5350 kfree_rcu(fltr, base.rcu); 5351 } 5352 5353 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, 5354 struct bnxt_l2_key *key, 5355 u32 idx) 5356 { 5357 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; 5358 struct bnxt_l2_filter *fltr; 5359 5360 hlist_for_each_entry_rcu(fltr, head, base.hash) { 5361 struct bnxt_l2_key *l2_key = &fltr->l2_key; 5362 5363 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && 5364 l2_key->vlan == key->vlan) 5365 return fltr; 5366 } 5367 return NULL; 5368 } 5369 5370 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, 5371 struct bnxt_l2_key *key, 5372 u32 idx) 5373 { 5374 struct bnxt_l2_filter *fltr = NULL; 5375 5376 rcu_read_lock(); 5377 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5378 if (fltr) 5379 atomic_inc(&fltr->refcnt); 5380 rcu_read_unlock(); 5381 return fltr; 5382 } 5383 5384 #define BNXT_IPV4_4TUPLE(bp, fkeys) \ 5385 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5386 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ 5387 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5388 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) 5389 5390 #define BNXT_IPV6_4TUPLE(bp, fkeys) \ 5391 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5392 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ 5393 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5394 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) 5395 5396 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) 5397 { 5398 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5399 if (BNXT_IPV4_4TUPLE(bp, fkeys)) 5400 return sizeof(fkeys->addrs.v4addrs) + 5401 sizeof(fkeys->ports); 5402 5403 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 5404 return sizeof(fkeys->addrs.v4addrs); 5405 } 5406 5407 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 5408 if (BNXT_IPV6_4TUPLE(bp, fkeys)) 5409 return sizeof(fkeys->addrs.v6addrs) + 5410 sizeof(fkeys->ports); 5411 5412 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 5413 return sizeof(fkeys->addrs.v6addrs); 5414 } 5415 5416 return 0; 5417 } 5418 5419 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, 5420 const unsigned char *key) 5421 { 5422 u64 prefix = bp->toeplitz_prefix, hash = 0; 5423 struct bnxt_ipv4_tuple tuple4; 5424 struct bnxt_ipv6_tuple tuple6; 5425 int i, j, len = 0; 5426 u8 *four_tuple; 5427 5428 len = bnxt_get_rss_flow_tuple_len(bp, fkeys); 5429 if (!len) 5430 return 0; 5431 5432 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5433 tuple4.v4addrs = fkeys->addrs.v4addrs; 5434 tuple4.ports = fkeys->ports; 5435 four_tuple = (unsigned char *)&tuple4; 5436 } else { 5437 tuple6.v6addrs = fkeys->addrs.v6addrs; 5438 tuple6.ports = fkeys->ports; 5439 four_tuple = (unsigned char *)&tuple6; 5440 } 5441 5442 for (i = 0, j = 8; i < len; i++, j++) { 5443 u8 byte = four_tuple[i]; 5444 int bit; 5445 5446 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { 5447 if (byte & 0x80) 5448 hash ^= prefix; 5449 } 5450 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; 5451 } 5452 5453 /* The valid part of the hash is in the upper 32 bits. */ 5454 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; 5455 } 5456 5457 #ifdef CONFIG_RFS_ACCEL 5458 static struct bnxt_l2_filter * 5459 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) 5460 { 5461 struct bnxt_l2_filter *fltr; 5462 u32 idx; 5463 5464 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5465 BNXT_L2_FLTR_HASH_MASK; 5466 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5467 return fltr; 5468 } 5469 #endif 5470 5471 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, 5472 struct bnxt_l2_key *key, u32 idx) 5473 { 5474 struct hlist_head *head; 5475 5476 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); 5477 fltr->l2_key.vlan = key->vlan; 5478 fltr->base.type = BNXT_FLTR_TYPE_L2; 5479 if (fltr->base.flags) { 5480 int bit_id; 5481 5482 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5483 BNXT_MAX_FLTR, 0); 5484 if (bit_id < 0) 5485 return -ENOMEM; 5486 fltr->base.sw_id = (u16)bit_id; 5487 } 5488 head = &bp->l2_fltr_hash_tbl[idx]; 5489 hlist_add_head_rcu(&fltr->base.hash, head); 5490 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 5491 atomic_set(&fltr->refcnt, 1); 5492 return 0; 5493 } 5494 5495 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, 5496 struct bnxt_l2_key *key, 5497 gfp_t gfp) 5498 { 5499 struct bnxt_l2_filter *fltr; 5500 u32 idx; 5501 int rc; 5502 5503 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5504 BNXT_L2_FLTR_HASH_MASK; 5505 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5506 if (fltr) 5507 return fltr; 5508 5509 fltr = kzalloc(sizeof(*fltr), gfp); 5510 if (!fltr) 5511 return ERR_PTR(-ENOMEM); 5512 spin_lock_bh(&bp->ntp_fltr_lock); 5513 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 5514 spin_unlock_bh(&bp->ntp_fltr_lock); 5515 if (rc) { 5516 bnxt_del_l2_filter(bp, fltr); 5517 fltr = ERR_PTR(rc); 5518 } 5519 return fltr; 5520 } 5521 5522 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) 5523 { 5524 #ifdef CONFIG_BNXT_SRIOV 5525 struct bnxt_vf_info *vf = &pf->vf[vf_idx]; 5526 5527 return vf->fw_fid; 5528 #else 5529 return INVALID_HW_RING_ID; 5530 #endif 5531 } 5532 5533 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5534 { 5535 struct hwrm_cfa_l2_filter_free_input *req; 5536 u16 target_id = 0xffff; 5537 int rc; 5538 5539 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 5540 struct bnxt_pf_info *pf = &bp->pf; 5541 5542 if (fltr->base.vf_idx >= pf->active_vfs) 5543 return -EINVAL; 5544 5545 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 5546 if (target_id == INVALID_HW_RING_ID) 5547 return -EINVAL; 5548 } 5549 5550 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 5551 if (rc) 5552 return rc; 5553 5554 req->target_id = cpu_to_le16(target_id); 5555 req->l2_filter_id = fltr->base.filter_id; 5556 return hwrm_req_send(bp, req); 5557 } 5558 5559 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5560 { 5561 struct hwrm_cfa_l2_filter_alloc_output *resp; 5562 struct hwrm_cfa_l2_filter_alloc_input *req; 5563 u16 target_id = 0xffff; 5564 int rc; 5565 5566 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 5567 struct bnxt_pf_info *pf = &bp->pf; 5568 5569 if (fltr->base.vf_idx >= pf->active_vfs) 5570 return -EINVAL; 5571 5572 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 5573 } 5574 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 5575 if (rc) 5576 return rc; 5577 5578 req->target_id = cpu_to_le16(target_id); 5579 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 5580 5581 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 5582 req->flags |= 5583 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 5584 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); 5585 req->enables = 5586 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 5587 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 5588 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 5589 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); 5590 eth_broadcast_addr(req->l2_addr_mask); 5591 5592 if (fltr->l2_key.vlan) { 5593 req->enables |= 5594 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | 5595 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | 5596 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); 5597 req->num_vlans = 1; 5598 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); 5599 req->l2_ivlan_mask = cpu_to_le16(0xfff); 5600 } 5601 5602 resp = hwrm_req_hold(bp, req); 5603 rc = hwrm_req_send(bp, req); 5604 if (!rc) { 5605 fltr->base.filter_id = resp->l2_filter_id; 5606 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 5607 } 5608 hwrm_req_drop(bp, req); 5609 return rc; 5610 } 5611 5612 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 5613 struct bnxt_ntuple_filter *fltr) 5614 { 5615 struct hwrm_cfa_ntuple_filter_free_input *req; 5616 int rc; 5617 5618 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); 5619 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 5620 if (rc) 5621 return rc; 5622 5623 req->ntuple_filter_id = fltr->base.filter_id; 5624 return hwrm_req_send(bp, req); 5625 } 5626 5627 #define BNXT_NTP_FLTR_FLAGS \ 5628 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 5629 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 5630 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 5631 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 5632 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 5633 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 5634 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 5635 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 5636 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 5637 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 5638 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 5639 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 5640 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 5641 5642 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 5643 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 5644 5645 void bnxt_fill_ipv6_mask(__be32 mask[4]) 5646 { 5647 int i; 5648 5649 for (i = 0; i < 4; i++) 5650 mask[i] = cpu_to_be32(~0); 5651 } 5652 5653 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 5654 struct bnxt_ntuple_filter *fltr) 5655 { 5656 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 5657 struct hwrm_cfa_ntuple_filter_alloc_input *req; 5658 struct flow_keys *keys = &fltr->fkeys; 5659 struct bnxt_l2_filter *l2_fltr; 5660 struct bnxt_vnic_info *vnic; 5661 u32 flags = 0; 5662 int rc; 5663 5664 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 5665 if (rc) 5666 return rc; 5667 5668 l2_fltr = fltr->l2_fltr; 5669 req->l2_filter_id = l2_fltr->base.filter_id; 5670 5671 5672 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 5673 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 5674 req->dst_id = cpu_to_le16(fltr->base.rxq); 5675 } else { 5676 vnic = &bp->vnic_info[fltr->base.rxq + 1]; 5677 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5678 } 5679 req->flags = cpu_to_le32(flags); 5680 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 5681 5682 req->ethertype = htons(ETH_P_IP); 5683 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 5684 req->ip_protocol = keys->basic.ip_proto; 5685 5686 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 5687 req->ethertype = htons(ETH_P_IPV6); 5688 req->ip_addr_type = 5689 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 5690 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { 5691 *(struct in6_addr *)&req->src_ipaddr[0] = 5692 keys->addrs.v6addrs.src; 5693 bnxt_fill_ipv6_mask(req->src_ipaddr_mask); 5694 } 5695 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { 5696 *(struct in6_addr *)&req->dst_ipaddr[0] = 5697 keys->addrs.v6addrs.dst; 5698 bnxt_fill_ipv6_mask(req->dst_ipaddr_mask); 5699 } 5700 } else { 5701 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { 5702 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 5703 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 5704 } 5705 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { 5706 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 5707 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 5708 } 5709 } 5710 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 5711 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 5712 req->tunnel_type = 5713 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 5714 } 5715 5716 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { 5717 req->src_port = keys->ports.src; 5718 req->src_port_mask = cpu_to_be16(0xffff); 5719 } 5720 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { 5721 req->dst_port = keys->ports.dst; 5722 req->dst_port_mask = cpu_to_be16(0xffff); 5723 } 5724 5725 resp = hwrm_req_hold(bp, req); 5726 rc = hwrm_req_send(bp, req); 5727 if (!rc) 5728 fltr->base.filter_id = resp->ntuple_filter_id; 5729 hwrm_req_drop(bp, req); 5730 return rc; 5731 } 5732 5733 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 5734 const u8 *mac_addr) 5735 { 5736 struct bnxt_l2_filter *fltr; 5737 struct bnxt_l2_key key; 5738 int rc; 5739 5740 ether_addr_copy(key.dst_mac_addr, mac_addr); 5741 key.vlan = 0; 5742 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); 5743 if (IS_ERR(fltr)) 5744 return PTR_ERR(fltr); 5745 5746 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; 5747 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 5748 if (rc) 5749 bnxt_del_l2_filter(bp, fltr); 5750 else 5751 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; 5752 return rc; 5753 } 5754 5755 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 5756 { 5757 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 5758 5759 /* Any associated ntuple filters will also be cleared by firmware. */ 5760 for (i = 0; i < num_of_vnics; i++) { 5761 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5762 5763 for (j = 0; j < vnic->uc_filter_count; j++) { 5764 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; 5765 5766 bnxt_hwrm_l2_filter_free(bp, fltr); 5767 bnxt_del_l2_filter(bp, fltr); 5768 } 5769 vnic->uc_filter_count = 0; 5770 } 5771 } 5772 5773 #define BNXT_DFLT_TUNL_TPA_BMAP \ 5774 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ 5775 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ 5776 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) 5777 5778 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, 5779 struct hwrm_vnic_tpa_cfg_input *req) 5780 { 5781 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; 5782 5783 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) 5784 return; 5785 5786 if (bp->vxlan_port) 5787 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; 5788 if (bp->vxlan_gpe_port) 5789 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; 5790 if (bp->nge_port) 5791 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; 5792 5793 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); 5794 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); 5795 } 5796 5797 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 5798 { 5799 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5800 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 5801 struct hwrm_vnic_tpa_cfg_input *req; 5802 int rc; 5803 5804 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 5805 return 0; 5806 5807 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 5808 if (rc) 5809 return rc; 5810 5811 if (tpa_flags) { 5812 u16 mss = bp->dev->mtu - 40; 5813 u32 nsegs, n, segs = 0, flags; 5814 5815 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 5816 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 5817 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 5818 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 5819 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 5820 if (tpa_flags & BNXT_FLAG_GRO) 5821 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 5822 5823 req->flags = cpu_to_le32(flags); 5824 5825 req->enables = 5826 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 5827 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 5828 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 5829 5830 /* Number of segs are log2 units, and first packet is not 5831 * included as part of this units. 5832 */ 5833 if (mss <= BNXT_RX_PAGE_SIZE) { 5834 n = BNXT_RX_PAGE_SIZE / mss; 5835 nsegs = (MAX_SKB_FRAGS - 1) * n; 5836 } else { 5837 n = mss / BNXT_RX_PAGE_SIZE; 5838 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 5839 n++; 5840 nsegs = (MAX_SKB_FRAGS - n) / n; 5841 } 5842 5843 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5844 segs = MAX_TPA_SEGS_P5; 5845 max_aggs = bp->max_tpa; 5846 } else { 5847 segs = ilog2(nsegs); 5848 } 5849 req->max_agg_segs = cpu_to_le16(segs); 5850 req->max_aggs = cpu_to_le16(max_aggs); 5851 5852 req->min_agg_len = cpu_to_le32(512); 5853 bnxt_hwrm_vnic_update_tunl_tpa(bp, req); 5854 } 5855 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5856 5857 return hwrm_req_send(bp, req); 5858 } 5859 5860 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 5861 { 5862 struct bnxt_ring_grp_info *grp_info; 5863 5864 grp_info = &bp->grp_info[ring->grp_idx]; 5865 return grp_info->cp_fw_ring_id; 5866 } 5867 5868 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 5869 { 5870 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5871 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 5872 else 5873 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 5874 } 5875 5876 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 5877 { 5878 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5879 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 5880 else 5881 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 5882 } 5883 5884 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 5885 { 5886 int entries; 5887 5888 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5889 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 5890 else 5891 entries = HW_HASH_INDEX_SIZE; 5892 5893 bp->rss_indir_tbl_entries = entries; 5894 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), 5895 GFP_KERNEL); 5896 if (!bp->rss_indir_tbl) 5897 return -ENOMEM; 5898 return 0; 5899 } 5900 5901 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) 5902 { 5903 u16 max_rings, max_entries, pad, i; 5904 5905 if (!bp->rx_nr_rings) 5906 return; 5907 5908 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5909 max_rings = bp->rx_nr_rings - 1; 5910 else 5911 max_rings = bp->rx_nr_rings; 5912 5913 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 5914 5915 for (i = 0; i < max_entries; i++) 5916 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 5917 5918 pad = bp->rss_indir_tbl_entries - max_entries; 5919 if (pad) 5920 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 5921 } 5922 5923 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 5924 { 5925 u16 i, tbl_size, max_ring = 0; 5926 5927 if (!bp->rss_indir_tbl) 5928 return 0; 5929 5930 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5931 for (i = 0; i < tbl_size; i++) 5932 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 5933 return max_ring; 5934 } 5935 5936 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 5937 { 5938 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5939 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); 5940 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5941 return 2; 5942 return 1; 5943 } 5944 5945 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 5946 { 5947 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 5948 u16 i, j; 5949 5950 /* Fill the RSS indirection table with ring group ids */ 5951 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 5952 if (!no_rss) 5953 j = bp->rss_indir_tbl[i]; 5954 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 5955 } 5956 } 5957 5958 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 5959 struct bnxt_vnic_info *vnic) 5960 { 5961 __le16 *ring_tbl = vnic->rss_table; 5962 struct bnxt_rx_ring_info *rxr; 5963 u16 tbl_size, i; 5964 5965 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5966 5967 for (i = 0; i < tbl_size; i++) { 5968 u16 ring_id, j; 5969 5970 j = bp->rss_indir_tbl[i]; 5971 rxr = &bp->rx_ring[j]; 5972 5973 ring_id = rxr->rx_ring_struct.fw_ring_id; 5974 *ring_tbl++ = cpu_to_le16(ring_id); 5975 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5976 *ring_tbl++ = cpu_to_le16(ring_id); 5977 } 5978 } 5979 5980 static void 5981 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 5982 struct bnxt_vnic_info *vnic) 5983 { 5984 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5985 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 5986 else 5987 bnxt_fill_hw_rss_tbl(bp, vnic); 5988 5989 if (bp->rss_hash_delta) { 5990 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 5991 if (bp->rss_hash_cfg & bp->rss_hash_delta) 5992 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 5993 else 5994 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 5995 } else { 5996 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 5997 } 5998 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 5999 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 6000 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 6001 } 6002 6003 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 6004 { 6005 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 6006 struct hwrm_vnic_rss_cfg_input *req; 6007 int rc; 6008 6009 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 6010 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 6011 return 0; 6012 6013 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6014 if (rc) 6015 return rc; 6016 6017 if (set_rss) 6018 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6019 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6020 return hwrm_req_send(bp, req); 6021 } 6022 6023 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 6024 { 6025 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 6026 struct hwrm_vnic_rss_cfg_input *req; 6027 dma_addr_t ring_tbl_map; 6028 u32 i, nr_ctxs; 6029 int rc; 6030 6031 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6032 if (rc) 6033 return rc; 6034 6035 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6036 if (!set_rss) 6037 return hwrm_req_send(bp, req); 6038 6039 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6040 ring_tbl_map = vnic->rss_table_dma_addr; 6041 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 6042 6043 hwrm_req_hold(bp, req); 6044 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 6045 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 6046 req->ring_table_pair_index = i; 6047 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 6048 rc = hwrm_req_send(bp, req); 6049 if (rc) 6050 goto exit; 6051 } 6052 6053 exit: 6054 hwrm_req_drop(bp, req); 6055 return rc; 6056 } 6057 6058 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 6059 { 6060 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6061 struct hwrm_vnic_rss_qcfg_output *resp; 6062 struct hwrm_vnic_rss_qcfg_input *req; 6063 6064 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 6065 return; 6066 6067 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6068 /* all contexts configured to same hash_type, zero always exists */ 6069 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6070 resp = hwrm_req_hold(bp, req); 6071 if (!hwrm_req_send(bp, req)) { 6072 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 6073 bp->rss_hash_delta = 0; 6074 } 6075 hwrm_req_drop(bp, req); 6076 } 6077 6078 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 6079 { 6080 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 6081 struct hwrm_vnic_plcmodes_cfg_input *req; 6082 int rc; 6083 6084 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 6085 if (rc) 6086 return rc; 6087 6088 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 6089 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 6090 6091 if (BNXT_RX_PAGE_MODE(bp)) { 6092 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 6093 } else { 6094 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 6095 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 6096 req->enables |= 6097 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 6098 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 6099 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 6100 } 6101 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6102 return hwrm_req_send(bp, req); 6103 } 6104 6105 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 6106 u16 ctx_idx) 6107 { 6108 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 6109 6110 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 6111 return; 6112 6113 req->rss_cos_lb_ctx_id = 6114 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 6115 6116 hwrm_req_send(bp, req); 6117 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 6118 } 6119 6120 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 6121 { 6122 int i, j; 6123 6124 for (i = 0; i < bp->nr_vnics; i++) { 6125 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6126 6127 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 6128 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 6129 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 6130 } 6131 } 6132 bp->rsscos_nr_ctxs = 0; 6133 } 6134 6135 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 6136 { 6137 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 6138 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 6139 int rc; 6140 6141 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 6142 if (rc) 6143 return rc; 6144 6145 resp = hwrm_req_hold(bp, req); 6146 rc = hwrm_req_send(bp, req); 6147 if (!rc) 6148 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 6149 le16_to_cpu(resp->rss_cos_lb_ctx_id); 6150 hwrm_req_drop(bp, req); 6151 6152 return rc; 6153 } 6154 6155 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 6156 { 6157 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 6158 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 6159 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 6160 } 6161 6162 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 6163 { 6164 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 6165 struct hwrm_vnic_cfg_input *req; 6166 unsigned int ring = 0, grp_idx; 6167 u16 def_vlan = 0; 6168 int rc; 6169 6170 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 6171 if (rc) 6172 return rc; 6173 6174 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6175 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 6176 6177 req->default_rx_ring_id = 6178 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 6179 req->default_cmpl_ring_id = 6180 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 6181 req->enables = 6182 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 6183 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 6184 goto vnic_mru; 6185 } 6186 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 6187 /* Only RSS support for now TBD: COS & LB */ 6188 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 6189 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6190 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6191 VNIC_CFG_REQ_ENABLES_MRU); 6192 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 6193 req->rss_rule = 6194 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 6195 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6196 VNIC_CFG_REQ_ENABLES_MRU); 6197 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 6198 } else { 6199 req->rss_rule = cpu_to_le16(0xffff); 6200 } 6201 6202 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 6203 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 6204 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 6205 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 6206 } else { 6207 req->cos_rule = cpu_to_le16(0xffff); 6208 } 6209 6210 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 6211 ring = 0; 6212 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 6213 ring = vnic_id - 1; 6214 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 6215 ring = bp->rx_nr_rings - 1; 6216 6217 grp_idx = bp->rx_ring[ring].bnapi->index; 6218 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 6219 req->lb_rule = cpu_to_le16(0xffff); 6220 vnic_mru: 6221 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); 6222 6223 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6224 #ifdef CONFIG_BNXT_SRIOV 6225 if (BNXT_VF(bp)) 6226 def_vlan = bp->vf.vlan; 6227 #endif 6228 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 6229 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 6230 if (!vnic_id && bnxt_ulp_registered(bp->edev)) 6231 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 6232 6233 return hwrm_req_send(bp, req); 6234 } 6235 6236 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 6237 { 6238 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 6239 struct hwrm_vnic_free_input *req; 6240 6241 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 6242 return; 6243 6244 req->vnic_id = 6245 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 6246 6247 hwrm_req_send(bp, req); 6248 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 6249 } 6250 } 6251 6252 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 6253 { 6254 u16 i; 6255 6256 for (i = 0; i < bp->nr_vnics; i++) 6257 bnxt_hwrm_vnic_free_one(bp, i); 6258 } 6259 6260 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 6261 unsigned int start_rx_ring_idx, 6262 unsigned int nr_rings) 6263 { 6264 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 6265 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 6266 struct hwrm_vnic_alloc_output *resp; 6267 struct hwrm_vnic_alloc_input *req; 6268 int rc; 6269 6270 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 6271 if (rc) 6272 return rc; 6273 6274 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6275 goto vnic_no_ring_grps; 6276 6277 /* map ring groups to this vnic */ 6278 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 6279 grp_idx = bp->rx_ring[i].bnapi->index; 6280 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 6281 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 6282 j, nr_rings); 6283 break; 6284 } 6285 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 6286 } 6287 6288 vnic_no_ring_grps: 6289 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 6290 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 6291 if (vnic_id == 0) 6292 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 6293 6294 resp = hwrm_req_hold(bp, req); 6295 rc = hwrm_req_send(bp, req); 6296 if (!rc) 6297 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 6298 hwrm_req_drop(bp, req); 6299 return rc; 6300 } 6301 6302 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 6303 { 6304 struct hwrm_vnic_qcaps_output *resp; 6305 struct hwrm_vnic_qcaps_input *req; 6306 int rc; 6307 6308 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 6309 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; 6310 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; 6311 if (bp->hwrm_spec_code < 0x10600) 6312 return 0; 6313 6314 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 6315 if (rc) 6316 return rc; 6317 6318 resp = hwrm_req_hold(bp, req); 6319 rc = hwrm_req_send(bp, req); 6320 if (!rc) { 6321 u32 flags = le32_to_cpu(resp->flags); 6322 6323 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 6324 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 6325 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; 6326 if (flags & 6327 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 6328 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 6329 6330 /* Older P5 fw before EXT_HW_STATS support did not set 6331 * VLAN_STRIP_CAP properly. 6332 */ 6333 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 6334 (BNXT_CHIP_P5(bp) && 6335 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 6336 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 6337 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 6338 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; 6339 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) 6340 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; 6341 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 6342 if (bp->max_tpa_v2) { 6343 if (BNXT_CHIP_P5(bp)) 6344 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 6345 else 6346 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; 6347 } 6348 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) 6349 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; 6350 } 6351 hwrm_req_drop(bp, req); 6352 return rc; 6353 } 6354 6355 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 6356 { 6357 struct hwrm_ring_grp_alloc_output *resp; 6358 struct hwrm_ring_grp_alloc_input *req; 6359 int rc; 6360 u16 i; 6361 6362 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6363 return 0; 6364 6365 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 6366 if (rc) 6367 return rc; 6368 6369 resp = hwrm_req_hold(bp, req); 6370 for (i = 0; i < bp->rx_nr_rings; i++) { 6371 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 6372 6373 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 6374 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 6375 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 6376 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 6377 6378 rc = hwrm_req_send(bp, req); 6379 6380 if (rc) 6381 break; 6382 6383 bp->grp_info[grp_idx].fw_grp_id = 6384 le32_to_cpu(resp->ring_group_id); 6385 } 6386 hwrm_req_drop(bp, req); 6387 return rc; 6388 } 6389 6390 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 6391 { 6392 struct hwrm_ring_grp_free_input *req; 6393 u16 i; 6394 6395 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6396 return; 6397 6398 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 6399 return; 6400 6401 hwrm_req_hold(bp, req); 6402 for (i = 0; i < bp->cp_nr_rings; i++) { 6403 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 6404 continue; 6405 req->ring_group_id = 6406 cpu_to_le32(bp->grp_info[i].fw_grp_id); 6407 6408 hwrm_req_send(bp, req); 6409 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 6410 } 6411 hwrm_req_drop(bp, req); 6412 } 6413 6414 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 6415 struct bnxt_ring_struct *ring, 6416 u32 ring_type, u32 map_index) 6417 { 6418 struct hwrm_ring_alloc_output *resp; 6419 struct hwrm_ring_alloc_input *req; 6420 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 6421 struct bnxt_ring_grp_info *grp_info; 6422 int rc, err = 0; 6423 u16 ring_id; 6424 6425 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 6426 if (rc) 6427 goto exit; 6428 6429 req->enables = 0; 6430 if (rmem->nr_pages > 1) { 6431 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 6432 /* Page size is in log2 units */ 6433 req->page_size = BNXT_PAGE_SHIFT; 6434 req->page_tbl_depth = 1; 6435 } else { 6436 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 6437 } 6438 req->fbo = 0; 6439 /* Association of ring index with doorbell index and MSIX number */ 6440 req->logical_id = cpu_to_le16(map_index); 6441 6442 switch (ring_type) { 6443 case HWRM_RING_ALLOC_TX: { 6444 struct bnxt_tx_ring_info *txr; 6445 6446 txr = container_of(ring, struct bnxt_tx_ring_info, 6447 tx_ring_struct); 6448 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 6449 /* Association of transmit ring with completion ring */ 6450 grp_info = &bp->grp_info[ring->grp_idx]; 6451 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 6452 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 6453 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6454 req->queue_id = cpu_to_le16(ring->queue_id); 6455 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) 6456 req->cmpl_coal_cnt = 6457 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; 6458 break; 6459 } 6460 case HWRM_RING_ALLOC_RX: 6461 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6462 req->length = cpu_to_le32(bp->rx_ring_mask + 1); 6463 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6464 u16 flags = 0; 6465 6466 /* Association of rx ring with stats context */ 6467 grp_info = &bp->grp_info[ring->grp_idx]; 6468 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 6469 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6470 req->enables |= cpu_to_le32( 6471 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6472 if (NET_IP_ALIGN == 2) 6473 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 6474 req->flags = cpu_to_le16(flags); 6475 } 6476 break; 6477 case HWRM_RING_ALLOC_AGG: 6478 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6479 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 6480 /* Association of agg ring with rx ring */ 6481 grp_info = &bp->grp_info[ring->grp_idx]; 6482 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 6483 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 6484 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6485 req->enables |= cpu_to_le32( 6486 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 6487 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6488 } else { 6489 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6490 } 6491 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 6492 break; 6493 case HWRM_RING_ALLOC_CMPL: 6494 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 6495 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6496 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6497 /* Association of cp ring with nq */ 6498 grp_info = &bp->grp_info[map_index]; 6499 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 6500 req->cq_handle = cpu_to_le64(ring->handle); 6501 req->enables |= cpu_to_le32( 6502 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 6503 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 6504 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6505 } 6506 break; 6507 case HWRM_RING_ALLOC_NQ: 6508 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 6509 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6510 if (bp->flags & BNXT_FLAG_USING_MSIX) 6511 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6512 break; 6513 default: 6514 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 6515 ring_type); 6516 return -1; 6517 } 6518 6519 resp = hwrm_req_hold(bp, req); 6520 rc = hwrm_req_send(bp, req); 6521 err = le16_to_cpu(resp->error_code); 6522 ring_id = le16_to_cpu(resp->ring_id); 6523 hwrm_req_drop(bp, req); 6524 6525 exit: 6526 if (rc || err) { 6527 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 6528 ring_type, rc, err); 6529 return -EIO; 6530 } 6531 ring->fw_ring_id = ring_id; 6532 return rc; 6533 } 6534 6535 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 6536 { 6537 int rc; 6538 6539 if (BNXT_PF(bp)) { 6540 struct hwrm_func_cfg_input *req; 6541 6542 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 6543 if (rc) 6544 return rc; 6545 6546 req->fid = cpu_to_le16(0xffff); 6547 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6548 req->async_event_cr = cpu_to_le16(idx); 6549 return hwrm_req_send(bp, req); 6550 } else { 6551 struct hwrm_func_vf_cfg_input *req; 6552 6553 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 6554 if (rc) 6555 return rc; 6556 6557 req->enables = 6558 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6559 req->async_event_cr = cpu_to_le16(idx); 6560 return hwrm_req_send(bp, req); 6561 } 6562 } 6563 6564 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 6565 u32 ring_type) 6566 { 6567 switch (ring_type) { 6568 case HWRM_RING_ALLOC_TX: 6569 db->db_ring_mask = bp->tx_ring_mask; 6570 break; 6571 case HWRM_RING_ALLOC_RX: 6572 db->db_ring_mask = bp->rx_ring_mask; 6573 break; 6574 case HWRM_RING_ALLOC_AGG: 6575 db->db_ring_mask = bp->rx_agg_ring_mask; 6576 break; 6577 case HWRM_RING_ALLOC_CMPL: 6578 case HWRM_RING_ALLOC_NQ: 6579 db->db_ring_mask = bp->cp_ring_mask; 6580 break; 6581 } 6582 if (bp->flags & BNXT_FLAG_CHIP_P7) { 6583 db->db_epoch_mask = db->db_ring_mask + 1; 6584 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); 6585 } 6586 } 6587 6588 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 6589 u32 map_idx, u32 xid) 6590 { 6591 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6592 switch (ring_type) { 6593 case HWRM_RING_ALLOC_TX: 6594 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 6595 break; 6596 case HWRM_RING_ALLOC_RX: 6597 case HWRM_RING_ALLOC_AGG: 6598 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 6599 break; 6600 case HWRM_RING_ALLOC_CMPL: 6601 db->db_key64 = DBR_PATH_L2; 6602 break; 6603 case HWRM_RING_ALLOC_NQ: 6604 db->db_key64 = DBR_PATH_L2; 6605 break; 6606 } 6607 db->db_key64 |= (u64)xid << DBR_XID_SFT; 6608 6609 if (bp->flags & BNXT_FLAG_CHIP_P7) 6610 db->db_key64 |= DBR_VALID; 6611 6612 db->doorbell = bp->bar1 + bp->db_offset; 6613 } else { 6614 db->doorbell = bp->bar1 + map_idx * 0x80; 6615 switch (ring_type) { 6616 case HWRM_RING_ALLOC_TX: 6617 db->db_key32 = DB_KEY_TX; 6618 break; 6619 case HWRM_RING_ALLOC_RX: 6620 case HWRM_RING_ALLOC_AGG: 6621 db->db_key32 = DB_KEY_RX; 6622 break; 6623 case HWRM_RING_ALLOC_CMPL: 6624 db->db_key32 = DB_KEY_CP; 6625 break; 6626 } 6627 } 6628 bnxt_set_db_mask(bp, db, ring_type); 6629 } 6630 6631 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 6632 { 6633 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 6634 int i, rc = 0; 6635 u32 type; 6636 6637 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6638 type = HWRM_RING_ALLOC_NQ; 6639 else 6640 type = HWRM_RING_ALLOC_CMPL; 6641 for (i = 0; i < bp->cp_nr_rings; i++) { 6642 struct bnxt_napi *bnapi = bp->bnapi[i]; 6643 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6644 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 6645 u32 map_idx = ring->map_idx; 6646 unsigned int vector; 6647 6648 vector = bp->irq_tbl[map_idx].vector; 6649 disable_irq_nosync(vector); 6650 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6651 if (rc) { 6652 enable_irq(vector); 6653 goto err_out; 6654 } 6655 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 6656 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 6657 enable_irq(vector); 6658 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 6659 6660 if (!i) { 6661 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 6662 if (rc) 6663 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 6664 } 6665 } 6666 6667 type = HWRM_RING_ALLOC_TX; 6668 for (i = 0; i < bp->tx_nr_rings; i++) { 6669 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6670 struct bnxt_ring_struct *ring; 6671 u32 map_idx; 6672 6673 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6674 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; 6675 struct bnxt_napi *bnapi = txr->bnapi; 6676 u32 type2 = HWRM_RING_ALLOC_CMPL; 6677 6678 ring = &cpr2->cp_ring_struct; 6679 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6680 map_idx = bnapi->index; 6681 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6682 if (rc) 6683 goto err_out; 6684 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6685 ring->fw_ring_id); 6686 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6687 } 6688 ring = &txr->tx_ring_struct; 6689 map_idx = i; 6690 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6691 if (rc) 6692 goto err_out; 6693 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 6694 } 6695 6696 type = HWRM_RING_ALLOC_RX; 6697 for (i = 0; i < bp->rx_nr_rings; i++) { 6698 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6699 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6700 struct bnxt_napi *bnapi = rxr->bnapi; 6701 u32 map_idx = bnapi->index; 6702 6703 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6704 if (rc) 6705 goto err_out; 6706 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 6707 /* If we have agg rings, post agg buffers first. */ 6708 if (!agg_rings) 6709 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6710 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 6711 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6712 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; 6713 u32 type2 = HWRM_RING_ALLOC_CMPL; 6714 6715 ring = &cpr2->cp_ring_struct; 6716 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6717 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6718 if (rc) 6719 goto err_out; 6720 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6721 ring->fw_ring_id); 6722 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6723 } 6724 } 6725 6726 if (agg_rings) { 6727 type = HWRM_RING_ALLOC_AGG; 6728 for (i = 0; i < bp->rx_nr_rings; i++) { 6729 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6730 struct bnxt_ring_struct *ring = 6731 &rxr->rx_agg_ring_struct; 6732 u32 grp_idx = ring->grp_idx; 6733 u32 map_idx = grp_idx + bp->rx_nr_rings; 6734 6735 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6736 if (rc) 6737 goto err_out; 6738 6739 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 6740 ring->fw_ring_id); 6741 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 6742 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6743 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 6744 } 6745 } 6746 err_out: 6747 return rc; 6748 } 6749 6750 static int hwrm_ring_free_send_msg(struct bnxt *bp, 6751 struct bnxt_ring_struct *ring, 6752 u32 ring_type, int cmpl_ring_id) 6753 { 6754 struct hwrm_ring_free_output *resp; 6755 struct hwrm_ring_free_input *req; 6756 u16 error_code = 0; 6757 int rc; 6758 6759 if (BNXT_NO_FW_ACCESS(bp)) 6760 return 0; 6761 6762 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 6763 if (rc) 6764 goto exit; 6765 6766 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 6767 req->ring_type = ring_type; 6768 req->ring_id = cpu_to_le16(ring->fw_ring_id); 6769 6770 resp = hwrm_req_hold(bp, req); 6771 rc = hwrm_req_send(bp, req); 6772 error_code = le16_to_cpu(resp->error_code); 6773 hwrm_req_drop(bp, req); 6774 exit: 6775 if (rc || error_code) { 6776 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 6777 ring_type, rc, error_code); 6778 return -EIO; 6779 } 6780 return 0; 6781 } 6782 6783 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 6784 { 6785 u32 type; 6786 int i; 6787 6788 if (!bp->bnapi) 6789 return; 6790 6791 for (i = 0; i < bp->tx_nr_rings; i++) { 6792 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6793 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 6794 6795 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6796 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 6797 6798 hwrm_ring_free_send_msg(bp, ring, 6799 RING_FREE_REQ_RING_TYPE_TX, 6800 close_path ? cmpl_ring_id : 6801 INVALID_HW_RING_ID); 6802 ring->fw_ring_id = INVALID_HW_RING_ID; 6803 } 6804 } 6805 6806 for (i = 0; i < bp->rx_nr_rings; i++) { 6807 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6808 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6809 u32 grp_idx = rxr->bnapi->index; 6810 6811 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6812 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6813 6814 hwrm_ring_free_send_msg(bp, ring, 6815 RING_FREE_REQ_RING_TYPE_RX, 6816 close_path ? cmpl_ring_id : 6817 INVALID_HW_RING_ID); 6818 ring->fw_ring_id = INVALID_HW_RING_ID; 6819 bp->grp_info[grp_idx].rx_fw_ring_id = 6820 INVALID_HW_RING_ID; 6821 } 6822 } 6823 6824 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6825 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 6826 else 6827 type = RING_FREE_REQ_RING_TYPE_RX; 6828 for (i = 0; i < bp->rx_nr_rings; i++) { 6829 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6830 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 6831 u32 grp_idx = rxr->bnapi->index; 6832 6833 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6834 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6835 6836 hwrm_ring_free_send_msg(bp, ring, type, 6837 close_path ? cmpl_ring_id : 6838 INVALID_HW_RING_ID); 6839 ring->fw_ring_id = INVALID_HW_RING_ID; 6840 bp->grp_info[grp_idx].agg_fw_ring_id = 6841 INVALID_HW_RING_ID; 6842 } 6843 } 6844 6845 /* The completion rings are about to be freed. After that the 6846 * IRQ doorbell will not work anymore. So we need to disable 6847 * IRQ here. 6848 */ 6849 bnxt_disable_int_sync(bp); 6850 6851 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6852 type = RING_FREE_REQ_RING_TYPE_NQ; 6853 else 6854 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 6855 for (i = 0; i < bp->cp_nr_rings; i++) { 6856 struct bnxt_napi *bnapi = bp->bnapi[i]; 6857 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6858 struct bnxt_ring_struct *ring; 6859 int j; 6860 6861 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { 6862 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 6863 6864 ring = &cpr2->cp_ring_struct; 6865 if (ring->fw_ring_id == INVALID_HW_RING_ID) 6866 continue; 6867 hwrm_ring_free_send_msg(bp, ring, 6868 RING_FREE_REQ_RING_TYPE_L2_CMPL, 6869 INVALID_HW_RING_ID); 6870 ring->fw_ring_id = INVALID_HW_RING_ID; 6871 } 6872 ring = &cpr->cp_ring_struct; 6873 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6874 hwrm_ring_free_send_msg(bp, ring, type, 6875 INVALID_HW_RING_ID); 6876 ring->fw_ring_id = INVALID_HW_RING_ID; 6877 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 6878 } 6879 } 6880 } 6881 6882 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 6883 bool shared); 6884 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 6885 bool shared); 6886 6887 static int bnxt_hwrm_get_rings(struct bnxt *bp) 6888 { 6889 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6890 struct hwrm_func_qcfg_output *resp; 6891 struct hwrm_func_qcfg_input *req; 6892 int rc; 6893 6894 if (bp->hwrm_spec_code < 0x10601) 6895 return 0; 6896 6897 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 6898 if (rc) 6899 return rc; 6900 6901 req->fid = cpu_to_le16(0xffff); 6902 resp = hwrm_req_hold(bp, req); 6903 rc = hwrm_req_send(bp, req); 6904 if (rc) { 6905 hwrm_req_drop(bp, req); 6906 return rc; 6907 } 6908 6909 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6910 if (BNXT_NEW_RM(bp)) { 6911 u16 cp, stats; 6912 6913 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 6914 hw_resc->resv_hw_ring_grps = 6915 le32_to_cpu(resp->alloc_hw_ring_grps); 6916 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 6917 cp = le16_to_cpu(resp->alloc_cmpl_rings); 6918 stats = le16_to_cpu(resp->alloc_stat_ctx); 6919 hw_resc->resv_irqs = cp; 6920 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6921 int rx = hw_resc->resv_rx_rings; 6922 int tx = hw_resc->resv_tx_rings; 6923 6924 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6925 rx >>= 1; 6926 if (cp < (rx + tx)) { 6927 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 6928 if (rc) 6929 return rc; 6930 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6931 rx <<= 1; 6932 hw_resc->resv_rx_rings = rx; 6933 hw_resc->resv_tx_rings = tx; 6934 } 6935 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 6936 hw_resc->resv_hw_ring_grps = rx; 6937 } 6938 hw_resc->resv_cp_rings = cp; 6939 hw_resc->resv_stat_ctxs = stats; 6940 } 6941 hwrm_req_drop(bp, req); 6942 return 0; 6943 } 6944 6945 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 6946 { 6947 struct hwrm_func_qcfg_output *resp; 6948 struct hwrm_func_qcfg_input *req; 6949 int rc; 6950 6951 if (bp->hwrm_spec_code < 0x10601) 6952 return 0; 6953 6954 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 6955 if (rc) 6956 return rc; 6957 6958 req->fid = cpu_to_le16(fid); 6959 resp = hwrm_req_hold(bp, req); 6960 rc = hwrm_req_send(bp, req); 6961 if (!rc) 6962 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6963 6964 hwrm_req_drop(bp, req); 6965 return rc; 6966 } 6967 6968 static bool bnxt_rfs_supported(struct bnxt *bp); 6969 6970 static struct hwrm_func_cfg_input * 6971 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6972 int ring_grps, int cp_rings, int stats, int vnics) 6973 { 6974 struct hwrm_func_cfg_input *req; 6975 u32 enables = 0; 6976 6977 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 6978 return NULL; 6979 6980 req->fid = cpu_to_le16(0xffff); 6981 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6982 req->num_tx_rings = cpu_to_le16(tx_rings); 6983 if (BNXT_NEW_RM(bp)) { 6984 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 6985 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6986 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6987 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 6988 enables |= tx_rings + ring_grps ? 6989 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6990 enables |= rx_rings ? 6991 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6992 } else { 6993 enables |= cp_rings ? 6994 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6995 enables |= ring_grps ? 6996 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 6997 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6998 } 6999 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7000 7001 req->num_rx_rings = cpu_to_le16(rx_rings); 7002 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7003 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7004 req->num_msix = cpu_to_le16(cp_rings); 7005 req->num_rsscos_ctxs = 7006 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 7007 } else { 7008 req->num_cmpl_rings = cpu_to_le16(cp_rings); 7009 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 7010 req->num_rsscos_ctxs = cpu_to_le16(1); 7011 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 7012 bnxt_rfs_supported(bp)) 7013 req->num_rsscos_ctxs = 7014 cpu_to_le16(ring_grps + 1); 7015 } 7016 req->num_stat_ctxs = cpu_to_le16(stats); 7017 req->num_vnics = cpu_to_le16(vnics); 7018 } 7019 req->enables = cpu_to_le32(enables); 7020 return req; 7021 } 7022 7023 static struct hwrm_func_vf_cfg_input * 7024 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7025 int ring_grps, int cp_rings, int stats, int vnics) 7026 { 7027 struct hwrm_func_vf_cfg_input *req; 7028 u32 enables = 0; 7029 7030 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 7031 return NULL; 7032 7033 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7034 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7035 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7036 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7037 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7038 enables |= tx_rings + ring_grps ? 7039 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7040 } else { 7041 enables |= cp_rings ? 7042 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7043 enables |= ring_grps ? 7044 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7045 } 7046 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7047 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 7048 7049 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 7050 req->num_tx_rings = cpu_to_le16(tx_rings); 7051 req->num_rx_rings = cpu_to_le16(rx_rings); 7052 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7053 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7054 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 7055 } else { 7056 req->num_cmpl_rings = cpu_to_le16(cp_rings); 7057 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 7058 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 7059 } 7060 req->num_stat_ctxs = cpu_to_le16(stats); 7061 req->num_vnics = cpu_to_le16(vnics); 7062 7063 req->enables = cpu_to_le32(enables); 7064 return req; 7065 } 7066 7067 static int 7068 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7069 int ring_grps, int cp_rings, int stats, int vnics) 7070 { 7071 struct hwrm_func_cfg_input *req; 7072 int rc; 7073 7074 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 7075 cp_rings, stats, vnics); 7076 if (!req) 7077 return -ENOMEM; 7078 7079 if (!req->enables) { 7080 hwrm_req_drop(bp, req); 7081 return 0; 7082 } 7083 7084 rc = hwrm_req_send(bp, req); 7085 if (rc) 7086 return rc; 7087 7088 if (bp->hwrm_spec_code < 0x10601) 7089 bp->hw_resc.resv_tx_rings = tx_rings; 7090 7091 return bnxt_hwrm_get_rings(bp); 7092 } 7093 7094 static int 7095 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7096 int ring_grps, int cp_rings, int stats, int vnics) 7097 { 7098 struct hwrm_func_vf_cfg_input *req; 7099 int rc; 7100 7101 if (!BNXT_NEW_RM(bp)) { 7102 bp->hw_resc.resv_tx_rings = tx_rings; 7103 return 0; 7104 } 7105 7106 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 7107 cp_rings, stats, vnics); 7108 if (!req) 7109 return -ENOMEM; 7110 7111 rc = hwrm_req_send(bp, req); 7112 if (rc) 7113 return rc; 7114 7115 return bnxt_hwrm_get_rings(bp); 7116 } 7117 7118 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 7119 int cp, int stat, int vnic) 7120 { 7121 if (BNXT_PF(bp)) 7122 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 7123 vnic); 7124 else 7125 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 7126 vnic); 7127 } 7128 7129 int bnxt_nq_rings_in_use(struct bnxt *bp) 7130 { 7131 int cp = bp->cp_nr_rings; 7132 int ulp_msix, ulp_base; 7133 7134 ulp_msix = bnxt_get_ulp_msix_num(bp); 7135 if (ulp_msix) { 7136 ulp_base = bnxt_get_ulp_msix_base(bp); 7137 cp += ulp_msix; 7138 if ((ulp_base + ulp_msix) > cp) 7139 cp = ulp_base + ulp_msix; 7140 } 7141 return cp; 7142 } 7143 7144 static int bnxt_cp_rings_in_use(struct bnxt *bp) 7145 { 7146 int cp; 7147 7148 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7149 return bnxt_nq_rings_in_use(bp); 7150 7151 cp = bp->tx_nr_rings + bp->rx_nr_rings; 7152 return cp; 7153 } 7154 7155 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 7156 { 7157 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 7158 int cp = bp->cp_nr_rings; 7159 7160 if (!ulp_stat) 7161 return cp; 7162 7163 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 7164 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 7165 7166 return cp + ulp_stat; 7167 } 7168 7169 /* Check if a default RSS map needs to be setup. This function is only 7170 * used on older firmware that does not require reserving RX rings. 7171 */ 7172 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 7173 { 7174 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7175 7176 /* The RSS map is valid for RX rings set to resv_rx_rings */ 7177 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 7178 hw_resc->resv_rx_rings = bp->rx_nr_rings; 7179 if (!netif_is_rxfh_configured(bp->dev)) 7180 bnxt_set_dflt_rss_indir_tbl(bp); 7181 } 7182 } 7183 7184 static bool bnxt_need_reserve_rings(struct bnxt *bp) 7185 { 7186 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7187 int cp = bnxt_cp_rings_in_use(bp); 7188 int nq = bnxt_nq_rings_in_use(bp); 7189 int rx = bp->rx_nr_rings, stat; 7190 int vnic = 1, grp = rx; 7191 7192 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7193 bp->hwrm_spec_code >= 0x10601) 7194 return true; 7195 7196 /* Old firmware does not need RX ring reservations but we still 7197 * need to setup a default RSS map when needed. With new firmware 7198 * we go through RX ring reservations first and then set up the 7199 * RSS map for the successfully reserved RX rings when needed. 7200 */ 7201 if (!BNXT_NEW_RM(bp)) { 7202 bnxt_check_rss_tbl_no_rmgr(bp); 7203 return false; 7204 } 7205 if ((bp->flags & BNXT_FLAG_RFS) && 7206 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7207 vnic = rx + 1; 7208 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7209 rx <<= 1; 7210 stat = bnxt_get_func_stat_ctxs(bp); 7211 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 7212 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 7213 (hw_resc->resv_hw_ring_grps != grp && 7214 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 7215 return true; 7216 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 7217 hw_resc->resv_irqs != nq) 7218 return true; 7219 return false; 7220 } 7221 7222 static int __bnxt_reserve_rings(struct bnxt *bp) 7223 { 7224 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7225 int cp = bnxt_nq_rings_in_use(bp); 7226 int tx = bp->tx_nr_rings; 7227 int rx = bp->rx_nr_rings; 7228 int grp, rx_rings, rc; 7229 int vnic = 1, stat; 7230 bool sh = false; 7231 int tx_cp; 7232 7233 if (!bnxt_need_reserve_rings(bp)) 7234 return 0; 7235 7236 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7237 sh = true; 7238 if ((bp->flags & BNXT_FLAG_RFS) && 7239 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7240 vnic = rx + 1; 7241 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7242 rx <<= 1; 7243 grp = bp->rx_nr_rings; 7244 stat = bnxt_get_func_stat_ctxs(bp); 7245 7246 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 7247 if (rc) 7248 return rc; 7249 7250 tx = hw_resc->resv_tx_rings; 7251 if (BNXT_NEW_RM(bp)) { 7252 rx = hw_resc->resv_rx_rings; 7253 cp = hw_resc->resv_irqs; 7254 grp = hw_resc->resv_hw_ring_grps; 7255 vnic = hw_resc->resv_vnics; 7256 stat = hw_resc->resv_stat_ctxs; 7257 } 7258 7259 rx_rings = rx; 7260 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7261 if (rx >= 2) { 7262 rx_rings = rx >> 1; 7263 } else { 7264 if (netif_running(bp->dev)) 7265 return -ENOMEM; 7266 7267 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7268 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7269 bp->dev->hw_features &= ~NETIF_F_LRO; 7270 bp->dev->features &= ~NETIF_F_LRO; 7271 bnxt_set_ring_params(bp); 7272 } 7273 } 7274 rx_rings = min_t(int, rx_rings, grp); 7275 cp = min_t(int, cp, bp->cp_nr_rings); 7276 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 7277 stat -= bnxt_get_ulp_stat_ctxs(bp); 7278 cp = min_t(int, cp, stat); 7279 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 7280 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7281 rx = rx_rings << 1; 7282 tx_cp = bnxt_num_tx_to_cp(bp, tx); 7283 cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 7284 bp->tx_nr_rings = tx; 7285 7286 /* If we cannot reserve all the RX rings, reset the RSS map only 7287 * if absolutely necessary 7288 */ 7289 if (rx_rings != bp->rx_nr_rings) { 7290 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 7291 rx_rings, bp->rx_nr_rings); 7292 if (netif_is_rxfh_configured(bp->dev) && 7293 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 7294 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 7295 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 7296 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 7297 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 7298 } 7299 } 7300 bp->rx_nr_rings = rx_rings; 7301 bp->cp_nr_rings = cp; 7302 7303 if (!tx || !rx || !cp || !grp || !vnic || !stat) 7304 return -ENOMEM; 7305 7306 if (!netif_is_rxfh_configured(bp->dev)) 7307 bnxt_set_dflt_rss_indir_tbl(bp); 7308 7309 return rc; 7310 } 7311 7312 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7313 int ring_grps, int cp_rings, int stats, 7314 int vnics) 7315 { 7316 struct hwrm_func_vf_cfg_input *req; 7317 u32 flags; 7318 7319 if (!BNXT_NEW_RM(bp)) 7320 return 0; 7321 7322 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 7323 cp_rings, stats, vnics); 7324 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 7325 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 7326 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 7327 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 7328 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 7329 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 7330 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7331 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 7332 7333 req->flags = cpu_to_le32(flags); 7334 return hwrm_req_send_silent(bp, req); 7335 } 7336 7337 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7338 int ring_grps, int cp_rings, int stats, 7339 int vnics) 7340 { 7341 struct hwrm_func_cfg_input *req; 7342 u32 flags; 7343 7344 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 7345 cp_rings, stats, vnics); 7346 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 7347 if (BNXT_NEW_RM(bp)) { 7348 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 7349 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 7350 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 7351 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 7352 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7353 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 7354 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 7355 else 7356 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 7357 } 7358 7359 req->flags = cpu_to_le32(flags); 7360 return hwrm_req_send_silent(bp, req); 7361 } 7362 7363 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7364 int ring_grps, int cp_rings, int stats, 7365 int vnics) 7366 { 7367 if (bp->hwrm_spec_code < 0x10801) 7368 return 0; 7369 7370 if (BNXT_PF(bp)) 7371 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 7372 ring_grps, cp_rings, stats, 7373 vnics); 7374 7375 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 7376 cp_rings, stats, vnics); 7377 } 7378 7379 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 7380 { 7381 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7382 struct hwrm_ring_aggint_qcaps_output *resp; 7383 struct hwrm_ring_aggint_qcaps_input *req; 7384 int rc; 7385 7386 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 7387 coal_cap->num_cmpl_dma_aggr_max = 63; 7388 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 7389 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 7390 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 7391 coal_cap->int_lat_tmr_min_max = 65535; 7392 coal_cap->int_lat_tmr_max_max = 65535; 7393 coal_cap->num_cmpl_aggr_int_max = 65535; 7394 coal_cap->timer_units = 80; 7395 7396 if (bp->hwrm_spec_code < 0x10902) 7397 return; 7398 7399 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 7400 return; 7401 7402 resp = hwrm_req_hold(bp, req); 7403 rc = hwrm_req_send_silent(bp, req); 7404 if (!rc) { 7405 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 7406 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 7407 coal_cap->num_cmpl_dma_aggr_max = 7408 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 7409 coal_cap->num_cmpl_dma_aggr_during_int_max = 7410 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 7411 coal_cap->cmpl_aggr_dma_tmr_max = 7412 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 7413 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 7414 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 7415 coal_cap->int_lat_tmr_min_max = 7416 le16_to_cpu(resp->int_lat_tmr_min_max); 7417 coal_cap->int_lat_tmr_max_max = 7418 le16_to_cpu(resp->int_lat_tmr_max_max); 7419 coal_cap->num_cmpl_aggr_int_max = 7420 le16_to_cpu(resp->num_cmpl_aggr_int_max); 7421 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 7422 } 7423 hwrm_req_drop(bp, req); 7424 } 7425 7426 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 7427 { 7428 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7429 7430 return usec * 1000 / coal_cap->timer_units; 7431 } 7432 7433 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 7434 struct bnxt_coal *hw_coal, 7435 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7436 { 7437 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7438 u16 val, tmr, max, flags = hw_coal->flags; 7439 u32 cmpl_params = coal_cap->cmpl_params; 7440 7441 max = hw_coal->bufs_per_record * 128; 7442 if (hw_coal->budget) 7443 max = hw_coal->bufs_per_record * hw_coal->budget; 7444 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 7445 7446 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 7447 req->num_cmpl_aggr_int = cpu_to_le16(val); 7448 7449 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 7450 req->num_cmpl_dma_aggr = cpu_to_le16(val); 7451 7452 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 7453 coal_cap->num_cmpl_dma_aggr_during_int_max); 7454 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 7455 7456 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 7457 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 7458 req->int_lat_tmr_max = cpu_to_le16(tmr); 7459 7460 /* min timer set to 1/2 of interrupt timer */ 7461 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 7462 val = tmr / 2; 7463 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 7464 req->int_lat_tmr_min = cpu_to_le16(val); 7465 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7466 } 7467 7468 /* buf timer set to 1/4 of interrupt timer */ 7469 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 7470 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 7471 7472 if (cmpl_params & 7473 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 7474 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 7475 val = clamp_t(u16, tmr, 1, 7476 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 7477 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 7478 req->enables |= 7479 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 7480 } 7481 7482 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 7483 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 7484 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 7485 req->flags = cpu_to_le16(flags); 7486 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 7487 } 7488 7489 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 7490 struct bnxt_coal *hw_coal) 7491 { 7492 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 7493 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7494 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7495 u32 nq_params = coal_cap->nq_params; 7496 u16 tmr; 7497 int rc; 7498 7499 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 7500 return 0; 7501 7502 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7503 if (rc) 7504 return rc; 7505 7506 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 7507 req->flags = 7508 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 7509 7510 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 7511 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 7512 req->int_lat_tmr_min = cpu_to_le16(tmr); 7513 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7514 return hwrm_req_send(bp, req); 7515 } 7516 7517 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 7518 { 7519 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 7520 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7521 struct bnxt_coal coal; 7522 int rc; 7523 7524 /* Tick values in micro seconds. 7525 * 1 coal_buf x bufs_per_record = 1 completion record. 7526 */ 7527 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 7528 7529 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 7530 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 7531 7532 if (!bnapi->rx_ring) 7533 return -ENODEV; 7534 7535 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7536 if (rc) 7537 return rc; 7538 7539 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 7540 7541 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 7542 7543 return hwrm_req_send(bp, req_rx); 7544 } 7545 7546 static int 7547 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7548 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7549 { 7550 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 7551 7552 req->ring_id = cpu_to_le16(ring_id); 7553 return hwrm_req_send(bp, req); 7554 } 7555 7556 static int 7557 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7558 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7559 { 7560 struct bnxt_tx_ring_info *txr; 7561 int i, rc; 7562 7563 bnxt_for_each_napi_tx(i, bnapi, txr) { 7564 u16 ring_id; 7565 7566 ring_id = bnxt_cp_ring_for_tx(bp, txr); 7567 req->ring_id = cpu_to_le16(ring_id); 7568 rc = hwrm_req_send(bp, req); 7569 if (rc) 7570 return rc; 7571 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7572 return 0; 7573 } 7574 return 0; 7575 } 7576 7577 int bnxt_hwrm_set_coal(struct bnxt *bp) 7578 { 7579 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 7580 int i, rc; 7581 7582 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7583 if (rc) 7584 return rc; 7585 7586 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7587 if (rc) { 7588 hwrm_req_drop(bp, req_rx); 7589 return rc; 7590 } 7591 7592 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 7593 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 7594 7595 hwrm_req_hold(bp, req_rx); 7596 hwrm_req_hold(bp, req_tx); 7597 for (i = 0; i < bp->cp_nr_rings; i++) { 7598 struct bnxt_napi *bnapi = bp->bnapi[i]; 7599 struct bnxt_coal *hw_coal; 7600 7601 if (!bnapi->rx_ring) 7602 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7603 else 7604 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 7605 if (rc) 7606 break; 7607 7608 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7609 continue; 7610 7611 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 7612 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7613 if (rc) 7614 break; 7615 } 7616 if (bnapi->rx_ring) 7617 hw_coal = &bp->rx_coal; 7618 else 7619 hw_coal = &bp->tx_coal; 7620 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 7621 } 7622 hwrm_req_drop(bp, req_rx); 7623 hwrm_req_drop(bp, req_tx); 7624 return rc; 7625 } 7626 7627 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 7628 { 7629 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 7630 struct hwrm_stat_ctx_free_input *req; 7631 int i; 7632 7633 if (!bp->bnapi) 7634 return; 7635 7636 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7637 return; 7638 7639 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 7640 return; 7641 if (BNXT_FW_MAJ(bp) <= 20) { 7642 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 7643 hwrm_req_drop(bp, req); 7644 return; 7645 } 7646 hwrm_req_hold(bp, req0); 7647 } 7648 hwrm_req_hold(bp, req); 7649 for (i = 0; i < bp->cp_nr_rings; i++) { 7650 struct bnxt_napi *bnapi = bp->bnapi[i]; 7651 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7652 7653 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 7654 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 7655 if (req0) { 7656 req0->stat_ctx_id = req->stat_ctx_id; 7657 hwrm_req_send(bp, req0); 7658 } 7659 hwrm_req_send(bp, req); 7660 7661 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 7662 } 7663 } 7664 hwrm_req_drop(bp, req); 7665 if (req0) 7666 hwrm_req_drop(bp, req0); 7667 } 7668 7669 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 7670 { 7671 struct hwrm_stat_ctx_alloc_output *resp; 7672 struct hwrm_stat_ctx_alloc_input *req; 7673 int rc, i; 7674 7675 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7676 return 0; 7677 7678 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 7679 if (rc) 7680 return rc; 7681 7682 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 7683 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 7684 7685 resp = hwrm_req_hold(bp, req); 7686 for (i = 0; i < bp->cp_nr_rings; i++) { 7687 struct bnxt_napi *bnapi = bp->bnapi[i]; 7688 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7689 7690 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 7691 7692 rc = hwrm_req_send(bp, req); 7693 if (rc) 7694 break; 7695 7696 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 7697 7698 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 7699 } 7700 hwrm_req_drop(bp, req); 7701 return rc; 7702 } 7703 7704 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 7705 { 7706 struct hwrm_func_qcfg_output *resp; 7707 struct hwrm_func_qcfg_input *req; 7708 u16 flags; 7709 int rc; 7710 7711 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7712 if (rc) 7713 return rc; 7714 7715 req->fid = cpu_to_le16(0xffff); 7716 resp = hwrm_req_hold(bp, req); 7717 rc = hwrm_req_send(bp, req); 7718 if (rc) 7719 goto func_qcfg_exit; 7720 7721 #ifdef CONFIG_BNXT_SRIOV 7722 if (BNXT_VF(bp)) { 7723 struct bnxt_vf_info *vf = &bp->vf; 7724 7725 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 7726 } else { 7727 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 7728 } 7729 #endif 7730 flags = le16_to_cpu(resp->flags); 7731 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 7732 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 7733 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 7734 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 7735 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 7736 } 7737 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 7738 bp->flags |= BNXT_FLAG_MULTI_HOST; 7739 7740 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 7741 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 7742 7743 switch (resp->port_partition_type) { 7744 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 7745 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 7746 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 7747 bp->port_partition_type = resp->port_partition_type; 7748 break; 7749 } 7750 if (bp->hwrm_spec_code < 0x10707 || 7751 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 7752 bp->br_mode = BRIDGE_MODE_VEB; 7753 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 7754 bp->br_mode = BRIDGE_MODE_VEPA; 7755 else 7756 bp->br_mode = BRIDGE_MODE_UNDEF; 7757 7758 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 7759 if (!bp->max_mtu) 7760 bp->max_mtu = BNXT_MAX_MTU; 7761 7762 if (bp->db_size) 7763 goto func_qcfg_exit; 7764 7765 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; 7766 if (BNXT_CHIP_P5(bp)) { 7767 if (BNXT_PF(bp)) 7768 bp->db_offset = DB_PF_OFFSET_P5; 7769 else 7770 bp->db_offset = DB_VF_OFFSET_P5; 7771 } 7772 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 7773 1024); 7774 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 7775 bp->db_size <= bp->db_offset) 7776 bp->db_size = pci_resource_len(bp->pdev, 2); 7777 7778 func_qcfg_exit: 7779 hwrm_req_drop(bp, req); 7780 return rc; 7781 } 7782 7783 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 7784 u8 init_val, u8 init_offset, 7785 bool init_mask_set) 7786 { 7787 ctxm->init_value = init_val; 7788 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 7789 if (init_mask_set) 7790 ctxm->init_offset = init_offset * 4; 7791 else 7792 ctxm->init_value = 0; 7793 } 7794 7795 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 7796 { 7797 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7798 u16 type; 7799 7800 for (type = 0; type < ctx_max; type++) { 7801 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7802 int n = 1; 7803 7804 if (!ctxm->max_entries) 7805 continue; 7806 7807 if (ctxm->instance_bmap) 7808 n = hweight32(ctxm->instance_bmap); 7809 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 7810 if (!ctxm->pg_info) 7811 return -ENOMEM; 7812 } 7813 return 0; 7814 } 7815 7816 #define BNXT_CTX_INIT_VALID(flags) \ 7817 (!!((flags) & \ 7818 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 7819 7820 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 7821 { 7822 struct hwrm_func_backing_store_qcaps_v2_output *resp; 7823 struct hwrm_func_backing_store_qcaps_v2_input *req; 7824 struct bnxt_ctx_mem_info *ctx; 7825 u16 type; 7826 int rc; 7827 7828 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 7829 if (rc) 7830 return rc; 7831 7832 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 7833 if (!ctx) 7834 return -ENOMEM; 7835 bp->ctx = ctx; 7836 7837 resp = hwrm_req_hold(bp, req); 7838 7839 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 7840 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7841 u8 init_val, init_off, i; 7842 __le32 *p; 7843 u32 flags; 7844 7845 req->type = cpu_to_le16(type); 7846 rc = hwrm_req_send(bp, req); 7847 if (rc) 7848 goto ctx_done; 7849 flags = le32_to_cpu(resp->flags); 7850 type = le16_to_cpu(resp->next_valid_type); 7851 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) 7852 continue; 7853 7854 ctxm->type = le16_to_cpu(resp->type); 7855 ctxm->entry_size = le16_to_cpu(resp->entry_size); 7856 ctxm->flags = flags; 7857 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 7858 ctxm->entry_multiple = resp->entry_multiple; 7859 ctxm->max_entries = le32_to_cpu(resp->max_num_entries); 7860 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 7861 init_val = resp->ctx_init_value; 7862 init_off = resp->ctx_init_offset; 7863 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 7864 BNXT_CTX_INIT_VALID(flags)); 7865 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 7866 BNXT_MAX_SPLIT_ENTRY); 7867 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 7868 i++, p++) 7869 ctxm->split[i] = le32_to_cpu(*p); 7870 } 7871 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 7872 7873 ctx_done: 7874 hwrm_req_drop(bp, req); 7875 return rc; 7876 } 7877 7878 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 7879 { 7880 struct hwrm_func_backing_store_qcaps_output *resp; 7881 struct hwrm_func_backing_store_qcaps_input *req; 7882 int rc; 7883 7884 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 7885 return 0; 7886 7887 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 7888 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 7889 7890 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 7891 if (rc) 7892 return rc; 7893 7894 resp = hwrm_req_hold(bp, req); 7895 rc = hwrm_req_send_silent(bp, req); 7896 if (!rc) { 7897 struct bnxt_ctx_mem_type *ctxm; 7898 struct bnxt_ctx_mem_info *ctx; 7899 u8 init_val, init_idx = 0; 7900 u16 init_mask; 7901 7902 ctx = bp->ctx; 7903 if (!ctx) { 7904 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 7905 if (!ctx) { 7906 rc = -ENOMEM; 7907 goto ctx_err; 7908 } 7909 bp->ctx = ctx; 7910 } 7911 init_val = resp->ctx_kind_initializer; 7912 init_mask = le16_to_cpu(resp->ctx_init_mask); 7913 7914 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 7915 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 7916 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 7917 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 7918 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); 7919 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 7920 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 7921 (init_mask & (1 << init_idx++)) != 0); 7922 7923 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 7924 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 7925 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 7926 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 7927 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 7928 (init_mask & (1 << init_idx++)) != 0); 7929 7930 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 7931 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 7932 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 7933 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 7934 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 7935 (init_mask & (1 << init_idx++)) != 0); 7936 7937 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 7938 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 7939 ctxm->max_entries = ctxm->vnic_entries + 7940 le16_to_cpu(resp->vnic_max_ring_table_entries); 7941 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 7942 bnxt_init_ctx_initializer(ctxm, init_val, 7943 resp->vnic_init_offset, 7944 (init_mask & (1 << init_idx++)) != 0); 7945 7946 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 7947 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 7948 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 7949 bnxt_init_ctx_initializer(ctxm, init_val, 7950 resp->stat_init_offset, 7951 (init_mask & (1 << init_idx++)) != 0); 7952 7953 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 7954 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 7955 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 7956 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 7957 ctxm->entry_multiple = resp->tqm_entries_multiple; 7958 if (!ctxm->entry_multiple) 7959 ctxm->entry_multiple = 1; 7960 7961 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 7962 7963 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 7964 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 7965 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 7966 ctxm->mrav_num_entries_units = 7967 le16_to_cpu(resp->mrav_num_entries_units); 7968 bnxt_init_ctx_initializer(ctxm, init_val, 7969 resp->mrav_init_offset, 7970 (init_mask & (1 << init_idx++)) != 0); 7971 7972 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 7973 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 7974 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 7975 7976 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 7977 if (!ctx->tqm_fp_rings_count) 7978 ctx->tqm_fp_rings_count = bp->max_q; 7979 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 7980 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 7981 7982 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 7983 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 7984 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 7985 7986 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 7987 } else { 7988 rc = 0; 7989 } 7990 ctx_err: 7991 hwrm_req_drop(bp, req); 7992 return rc; 7993 } 7994 7995 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 7996 __le64 *pg_dir) 7997 { 7998 if (!rmem->nr_pages) 7999 return; 8000 8001 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 8002 if (rmem->depth >= 1) { 8003 if (rmem->depth == 2) 8004 *pg_attr |= 2; 8005 else 8006 *pg_attr |= 1; 8007 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 8008 } else { 8009 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 8010 } 8011 } 8012 8013 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 8014 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 8015 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 8016 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 8017 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 8018 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 8019 8020 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 8021 { 8022 struct hwrm_func_backing_store_cfg_input *req; 8023 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8024 struct bnxt_ctx_pg_info *ctx_pg; 8025 struct bnxt_ctx_mem_type *ctxm; 8026 void **__req = (void **)&req; 8027 u32 req_len = sizeof(*req); 8028 __le32 *num_entries; 8029 __le64 *pg_dir; 8030 u32 flags = 0; 8031 u8 *pg_attr; 8032 u32 ena; 8033 int rc; 8034 int i; 8035 8036 if (!ctx) 8037 return 0; 8038 8039 if (req_len > bp->hwrm_max_ext_req_len) 8040 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 8041 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 8042 if (rc) 8043 return rc; 8044 8045 req->enables = cpu_to_le32(enables); 8046 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 8047 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8048 ctx_pg = ctxm->pg_info; 8049 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 8050 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 8051 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 8052 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 8053 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8054 &req->qpc_pg_size_qpc_lvl, 8055 &req->qpc_page_dir); 8056 8057 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) 8058 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); 8059 } 8060 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 8061 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8062 ctx_pg = ctxm->pg_info; 8063 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 8064 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 8065 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 8066 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8067 &req->srq_pg_size_srq_lvl, 8068 &req->srq_page_dir); 8069 } 8070 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 8071 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8072 ctx_pg = ctxm->pg_info; 8073 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 8074 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 8075 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 8076 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8077 &req->cq_pg_size_cq_lvl, 8078 &req->cq_page_dir); 8079 } 8080 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 8081 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8082 ctx_pg = ctxm->pg_info; 8083 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 8084 req->vnic_num_ring_table_entries = 8085 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 8086 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 8087 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8088 &req->vnic_pg_size_vnic_lvl, 8089 &req->vnic_page_dir); 8090 } 8091 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 8092 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8093 ctx_pg = ctxm->pg_info; 8094 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 8095 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 8096 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8097 &req->stat_pg_size_stat_lvl, 8098 &req->stat_page_dir); 8099 } 8100 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 8101 u32 units; 8102 8103 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8104 ctx_pg = ctxm->pg_info; 8105 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 8106 units = ctxm->mrav_num_entries_units; 8107 if (units) { 8108 u32 num_mr, num_ah = ctxm->mrav_av_entries; 8109 u32 entries; 8110 8111 num_mr = ctx_pg->entries - num_ah; 8112 entries = ((num_mr / units) << 16) | (num_ah / units); 8113 req->mrav_num_entries = cpu_to_le32(entries); 8114 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 8115 } 8116 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 8117 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8118 &req->mrav_pg_size_mrav_lvl, 8119 &req->mrav_page_dir); 8120 } 8121 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 8122 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8123 ctx_pg = ctxm->pg_info; 8124 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 8125 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 8126 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8127 &req->tim_pg_size_tim_lvl, 8128 &req->tim_page_dir); 8129 } 8130 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8131 for (i = 0, num_entries = &req->tqm_sp_num_entries, 8132 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 8133 pg_dir = &req->tqm_sp_page_dir, 8134 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 8135 ctx_pg = ctxm->pg_info; 8136 i < BNXT_MAX_TQM_RINGS; 8137 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 8138 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 8139 if (!(enables & ena)) 8140 continue; 8141 8142 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 8143 *num_entries = cpu_to_le32(ctx_pg->entries); 8144 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 8145 } 8146 req->flags = cpu_to_le32(flags); 8147 return hwrm_req_send(bp, req); 8148 } 8149 8150 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 8151 struct bnxt_ctx_pg_info *ctx_pg) 8152 { 8153 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8154 8155 rmem->page_size = BNXT_PAGE_SIZE; 8156 rmem->pg_arr = ctx_pg->ctx_pg_arr; 8157 rmem->dma_arr = ctx_pg->ctx_dma_arr; 8158 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 8159 if (rmem->depth >= 1) 8160 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 8161 return bnxt_alloc_ring(bp, rmem); 8162 } 8163 8164 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 8165 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 8166 u8 depth, struct bnxt_ctx_mem_type *ctxm) 8167 { 8168 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8169 int rc; 8170 8171 if (!mem_size) 8172 return -EINVAL; 8173 8174 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8175 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 8176 ctx_pg->nr_pages = 0; 8177 return -EINVAL; 8178 } 8179 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 8180 int nr_tbls, i; 8181 8182 rmem->depth = 2; 8183 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 8184 GFP_KERNEL); 8185 if (!ctx_pg->ctx_pg_tbl) 8186 return -ENOMEM; 8187 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 8188 rmem->nr_pages = nr_tbls; 8189 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8190 if (rc) 8191 return rc; 8192 for (i = 0; i < nr_tbls; i++) { 8193 struct bnxt_ctx_pg_info *pg_tbl; 8194 8195 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 8196 if (!pg_tbl) 8197 return -ENOMEM; 8198 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 8199 rmem = &pg_tbl->ring_mem; 8200 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 8201 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 8202 rmem->depth = 1; 8203 rmem->nr_pages = MAX_CTX_PAGES; 8204 rmem->ctx_mem = ctxm; 8205 if (i == (nr_tbls - 1)) { 8206 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 8207 8208 if (rem) 8209 rmem->nr_pages = rem; 8210 } 8211 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 8212 if (rc) 8213 break; 8214 } 8215 } else { 8216 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8217 if (rmem->nr_pages > 1 || depth) 8218 rmem->depth = 1; 8219 rmem->ctx_mem = ctxm; 8220 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8221 } 8222 return rc; 8223 } 8224 8225 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 8226 struct bnxt_ctx_pg_info *ctx_pg) 8227 { 8228 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8229 8230 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 8231 ctx_pg->ctx_pg_tbl) { 8232 int i, nr_tbls = rmem->nr_pages; 8233 8234 for (i = 0; i < nr_tbls; i++) { 8235 struct bnxt_ctx_pg_info *pg_tbl; 8236 struct bnxt_ring_mem_info *rmem2; 8237 8238 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 8239 if (!pg_tbl) 8240 continue; 8241 rmem2 = &pg_tbl->ring_mem; 8242 bnxt_free_ring(bp, rmem2); 8243 ctx_pg->ctx_pg_arr[i] = NULL; 8244 kfree(pg_tbl); 8245 ctx_pg->ctx_pg_tbl[i] = NULL; 8246 } 8247 kfree(ctx_pg->ctx_pg_tbl); 8248 ctx_pg->ctx_pg_tbl = NULL; 8249 } 8250 bnxt_free_ring(bp, rmem); 8251 ctx_pg->nr_pages = 0; 8252 } 8253 8254 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 8255 struct bnxt_ctx_mem_type *ctxm, u32 entries, 8256 u8 pg_lvl) 8257 { 8258 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 8259 int i, rc = 0, n = 1; 8260 u32 mem_size; 8261 8262 if (!ctxm->entry_size || !ctx_pg) 8263 return -EINVAL; 8264 if (ctxm->instance_bmap) 8265 n = hweight32(ctxm->instance_bmap); 8266 if (ctxm->entry_multiple) 8267 entries = roundup(entries, ctxm->entry_multiple); 8268 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 8269 mem_size = entries * ctxm->entry_size; 8270 for (i = 0; i < n && !rc; i++) { 8271 ctx_pg[i].entries = entries; 8272 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 8273 ctxm->init_value ? ctxm : NULL); 8274 } 8275 return rc; 8276 } 8277 8278 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 8279 struct bnxt_ctx_mem_type *ctxm, 8280 bool last) 8281 { 8282 struct hwrm_func_backing_store_cfg_v2_input *req; 8283 u32 instance_bmap = ctxm->instance_bmap; 8284 int i, j, rc = 0, n = 1; 8285 __le32 *p; 8286 8287 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 8288 return 0; 8289 8290 if (instance_bmap) 8291 n = hweight32(ctxm->instance_bmap); 8292 else 8293 instance_bmap = 1; 8294 8295 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 8296 if (rc) 8297 return rc; 8298 hwrm_req_hold(bp, req); 8299 req->type = cpu_to_le16(ctxm->type); 8300 req->entry_size = cpu_to_le16(ctxm->entry_size); 8301 req->subtype_valid_cnt = ctxm->split_entry_cnt; 8302 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 8303 p[i] = cpu_to_le32(ctxm->split[i]); 8304 for (i = 0, j = 0; j < n && !rc; i++) { 8305 struct bnxt_ctx_pg_info *ctx_pg; 8306 8307 if (!(instance_bmap & (1 << i))) 8308 continue; 8309 req->instance = cpu_to_le16(i); 8310 ctx_pg = &ctxm->pg_info[j++]; 8311 if (!ctx_pg->entries) 8312 continue; 8313 req->num_entries = cpu_to_le32(ctx_pg->entries); 8314 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8315 &req->page_size_pbl_level, 8316 &req->page_dir); 8317 if (last && j == n) 8318 req->flags = 8319 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 8320 rc = hwrm_req_send(bp, req); 8321 } 8322 hwrm_req_drop(bp, req); 8323 return rc; 8324 } 8325 8326 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) 8327 { 8328 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8329 struct bnxt_ctx_mem_type *ctxm; 8330 u16 last_type; 8331 int rc = 0; 8332 u16 type; 8333 8334 if (!ena) 8335 return 0; 8336 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) 8337 last_type = BNXT_CTX_MAX - 1; 8338 else 8339 last_type = BNXT_CTX_L2_MAX - 1; 8340 ctx->ctx_arr[last_type].last = 1; 8341 8342 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 8343 ctxm = &ctx->ctx_arr[type]; 8344 8345 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 8346 if (rc) 8347 return rc; 8348 } 8349 return 0; 8350 } 8351 8352 void bnxt_free_ctx_mem(struct bnxt *bp) 8353 { 8354 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8355 u16 type; 8356 8357 if (!ctx) 8358 return; 8359 8360 for (type = 0; type < BNXT_CTX_V2_MAX; type++) { 8361 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8362 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 8363 int i, n = 1; 8364 8365 if (!ctx_pg) 8366 continue; 8367 if (ctxm->instance_bmap) 8368 n = hweight32(ctxm->instance_bmap); 8369 for (i = 0; i < n; i++) 8370 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 8371 8372 kfree(ctx_pg); 8373 ctxm->pg_info = NULL; 8374 } 8375 8376 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 8377 kfree(ctx); 8378 bp->ctx = NULL; 8379 } 8380 8381 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 8382 { 8383 struct bnxt_ctx_mem_type *ctxm; 8384 struct bnxt_ctx_mem_info *ctx; 8385 u32 l2_qps, qp1_qps, max_qps; 8386 u32 ena, entries_sp, entries; 8387 u32 srqs, max_srqs, min; 8388 u32 num_mr, num_ah; 8389 u32 extra_srqs = 0; 8390 u32 extra_qps = 0; 8391 u32 fast_qpmd_qps; 8392 u8 pg_lvl = 1; 8393 int i, rc; 8394 8395 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 8396 if (rc) { 8397 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 8398 rc); 8399 return rc; 8400 } 8401 ctx = bp->ctx; 8402 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 8403 return 0; 8404 8405 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8406 l2_qps = ctxm->qp_l2_entries; 8407 qp1_qps = ctxm->qp_qp1_entries; 8408 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; 8409 max_qps = ctxm->max_entries; 8410 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8411 srqs = ctxm->srq_l2_entries; 8412 max_srqs = ctxm->max_entries; 8413 ena = 0; 8414 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 8415 pg_lvl = 2; 8416 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); 8417 /* allocate extra qps if fw supports RoCE fast qp destroy feature */ 8418 extra_qps += fast_qpmd_qps; 8419 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 8420 if (fast_qpmd_qps) 8421 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; 8422 } 8423 8424 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8425 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 8426 pg_lvl); 8427 if (rc) 8428 return rc; 8429 8430 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8431 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 8432 if (rc) 8433 return rc; 8434 8435 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8436 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 8437 extra_qps * 2, pg_lvl); 8438 if (rc) 8439 return rc; 8440 8441 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8442 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8443 if (rc) 8444 return rc; 8445 8446 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8447 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8448 if (rc) 8449 return rc; 8450 8451 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 8452 goto skip_rdma; 8453 8454 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8455 /* 128K extra is needed to accommodate static AH context 8456 * allocation by f/w. 8457 */ 8458 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 8459 num_ah = min_t(u32, num_mr, 1024 * 128); 8460 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 8461 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 8462 ctxm->mrav_av_entries = num_ah; 8463 8464 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 8465 if (rc) 8466 return rc; 8467 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 8468 8469 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8470 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 8471 if (rc) 8472 return rc; 8473 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 8474 8475 skip_rdma: 8476 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8477 min = ctxm->min_entries; 8478 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 8479 2 * (extra_qps + qp1_qps) + min; 8480 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 8481 if (rc) 8482 return rc; 8483 8484 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8485 entries = l2_qps + 2 * (extra_qps + qp1_qps); 8486 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 8487 if (rc) 8488 return rc; 8489 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 8490 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 8491 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 8492 8493 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8494 rc = bnxt_backing_store_cfg_v2(bp, ena); 8495 else 8496 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 8497 if (rc) { 8498 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 8499 rc); 8500 return rc; 8501 } 8502 ctx->flags |= BNXT_CTX_FLAG_INITED; 8503 return 0; 8504 } 8505 8506 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 8507 { 8508 struct hwrm_func_resource_qcaps_output *resp; 8509 struct hwrm_func_resource_qcaps_input *req; 8510 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8511 int rc; 8512 8513 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 8514 if (rc) 8515 return rc; 8516 8517 req->fid = cpu_to_le16(0xffff); 8518 resp = hwrm_req_hold(bp, req); 8519 rc = hwrm_req_send_silent(bp, req); 8520 if (rc) 8521 goto hwrm_func_resc_qcaps_exit; 8522 8523 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 8524 if (!all) 8525 goto hwrm_func_resc_qcaps_exit; 8526 8527 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 8528 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 8529 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 8530 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 8531 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 8532 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 8533 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 8534 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 8535 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 8536 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 8537 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 8538 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 8539 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 8540 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 8541 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 8542 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 8543 8544 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 8545 u16 max_msix = le16_to_cpu(resp->max_msix); 8546 8547 hw_resc->max_nqs = max_msix; 8548 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 8549 } 8550 8551 if (BNXT_PF(bp)) { 8552 struct bnxt_pf_info *pf = &bp->pf; 8553 8554 pf->vf_resv_strategy = 8555 le16_to_cpu(resp->vf_reservation_strategy); 8556 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 8557 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 8558 } 8559 hwrm_func_resc_qcaps_exit: 8560 hwrm_req_drop(bp, req); 8561 return rc; 8562 } 8563 8564 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 8565 { 8566 struct hwrm_port_mac_ptp_qcfg_output *resp; 8567 struct hwrm_port_mac_ptp_qcfg_input *req; 8568 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 8569 bool phc_cfg; 8570 u8 flags; 8571 int rc; 8572 8573 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { 8574 rc = -ENODEV; 8575 goto no_ptp; 8576 } 8577 8578 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 8579 if (rc) 8580 goto no_ptp; 8581 8582 req->port_id = cpu_to_le16(bp->pf.port_id); 8583 resp = hwrm_req_hold(bp, req); 8584 rc = hwrm_req_send(bp, req); 8585 if (rc) 8586 goto exit; 8587 8588 flags = resp->flags; 8589 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 8590 rc = -ENODEV; 8591 goto exit; 8592 } 8593 if (!ptp) { 8594 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 8595 if (!ptp) { 8596 rc = -ENOMEM; 8597 goto exit; 8598 } 8599 ptp->bp = bp; 8600 bp->ptp_cfg = ptp; 8601 } 8602 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { 8603 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 8604 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 8605 } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 8606 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 8607 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 8608 } else { 8609 rc = -ENODEV; 8610 goto exit; 8611 } 8612 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 8613 rc = bnxt_ptp_init(bp, phc_cfg); 8614 if (rc) 8615 netdev_warn(bp->dev, "PTP initialization failed.\n"); 8616 exit: 8617 hwrm_req_drop(bp, req); 8618 if (!rc) 8619 return 0; 8620 8621 no_ptp: 8622 bnxt_ptp_clear(bp); 8623 kfree(ptp); 8624 bp->ptp_cfg = NULL; 8625 return rc; 8626 } 8627 8628 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 8629 { 8630 struct hwrm_func_qcaps_output *resp; 8631 struct hwrm_func_qcaps_input *req; 8632 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8633 u32 flags, flags_ext, flags_ext2; 8634 int rc; 8635 8636 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 8637 if (rc) 8638 return rc; 8639 8640 req->fid = cpu_to_le16(0xffff); 8641 resp = hwrm_req_hold(bp, req); 8642 rc = hwrm_req_send(bp, req); 8643 if (rc) 8644 goto hwrm_func_qcaps_exit; 8645 8646 flags = le32_to_cpu(resp->flags); 8647 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 8648 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 8649 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 8650 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 8651 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 8652 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 8653 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 8654 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 8655 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 8656 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 8657 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 8658 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 8659 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 8660 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 8661 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 8662 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 8663 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 8664 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 8665 8666 flags_ext = le32_to_cpu(resp->flags_ext); 8667 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 8668 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 8669 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 8670 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 8671 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 8672 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 8673 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 8674 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 8675 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 8676 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 8677 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 8678 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 8679 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) 8680 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; 8681 8682 flags_ext2 = le32_to_cpu(resp->flags_ext2); 8683 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 8684 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 8685 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) 8686 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; 8687 8688 bp->tx_push_thresh = 0; 8689 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 8690 BNXT_FW_MAJ(bp) > 217) 8691 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 8692 8693 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 8694 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 8695 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 8696 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 8697 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 8698 if (!hw_resc->max_hw_ring_grps) 8699 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 8700 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 8701 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 8702 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 8703 8704 if (BNXT_PF(bp)) { 8705 struct bnxt_pf_info *pf = &bp->pf; 8706 8707 pf->fw_fid = le16_to_cpu(resp->fid); 8708 pf->port_id = le16_to_cpu(resp->port_id); 8709 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 8710 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 8711 pf->max_vfs = le16_to_cpu(resp->max_vfs); 8712 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 8713 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 8714 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 8715 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 8716 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 8717 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 8718 bp->flags &= ~BNXT_FLAG_WOL_CAP; 8719 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 8720 bp->flags |= BNXT_FLAG_WOL_CAP; 8721 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 8722 bp->fw_cap |= BNXT_FW_CAP_PTP; 8723 } else { 8724 bnxt_ptp_clear(bp); 8725 kfree(bp->ptp_cfg); 8726 bp->ptp_cfg = NULL; 8727 } 8728 } else { 8729 #ifdef CONFIG_BNXT_SRIOV 8730 struct bnxt_vf_info *vf = &bp->vf; 8731 8732 vf->fw_fid = le16_to_cpu(resp->fid); 8733 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 8734 #endif 8735 } 8736 8737 hwrm_func_qcaps_exit: 8738 hwrm_req_drop(bp, req); 8739 return rc; 8740 } 8741 8742 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 8743 { 8744 struct hwrm_dbg_qcaps_output *resp; 8745 struct hwrm_dbg_qcaps_input *req; 8746 int rc; 8747 8748 bp->fw_dbg_cap = 0; 8749 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 8750 return; 8751 8752 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 8753 if (rc) 8754 return; 8755 8756 req->fid = cpu_to_le16(0xffff); 8757 resp = hwrm_req_hold(bp, req); 8758 rc = hwrm_req_send(bp, req); 8759 if (rc) 8760 goto hwrm_dbg_qcaps_exit; 8761 8762 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 8763 8764 hwrm_dbg_qcaps_exit: 8765 hwrm_req_drop(bp, req); 8766 } 8767 8768 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 8769 8770 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 8771 { 8772 int rc; 8773 8774 rc = __bnxt_hwrm_func_qcaps(bp); 8775 if (rc) 8776 return rc; 8777 8778 bnxt_hwrm_dbg_qcaps(bp); 8779 8780 rc = bnxt_hwrm_queue_qportcfg(bp); 8781 if (rc) { 8782 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 8783 return rc; 8784 } 8785 if (bp->hwrm_spec_code >= 0x10803) { 8786 rc = bnxt_alloc_ctx_mem(bp); 8787 if (rc) 8788 return rc; 8789 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 8790 if (!rc) 8791 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 8792 } 8793 return 0; 8794 } 8795 8796 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 8797 { 8798 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 8799 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 8800 u32 flags; 8801 int rc; 8802 8803 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 8804 return 0; 8805 8806 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 8807 if (rc) 8808 return rc; 8809 8810 resp = hwrm_req_hold(bp, req); 8811 rc = hwrm_req_send(bp, req); 8812 if (rc) 8813 goto hwrm_cfa_adv_qcaps_exit; 8814 8815 flags = le32_to_cpu(resp->flags); 8816 if (flags & 8817 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 8818 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 8819 8820 hwrm_cfa_adv_qcaps_exit: 8821 hwrm_req_drop(bp, req); 8822 return rc; 8823 } 8824 8825 static int __bnxt_alloc_fw_health(struct bnxt *bp) 8826 { 8827 if (bp->fw_health) 8828 return 0; 8829 8830 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 8831 if (!bp->fw_health) 8832 return -ENOMEM; 8833 8834 mutex_init(&bp->fw_health->lock); 8835 return 0; 8836 } 8837 8838 static int bnxt_alloc_fw_health(struct bnxt *bp) 8839 { 8840 int rc; 8841 8842 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 8843 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 8844 return 0; 8845 8846 rc = __bnxt_alloc_fw_health(bp); 8847 if (rc) { 8848 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 8849 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 8850 return rc; 8851 } 8852 8853 return 0; 8854 } 8855 8856 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 8857 { 8858 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 8859 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8860 BNXT_FW_HEALTH_WIN_MAP_OFF); 8861 } 8862 8863 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 8864 { 8865 struct bnxt_fw_health *fw_health = bp->fw_health; 8866 u32 reg_type; 8867 8868 if (!fw_health) 8869 return; 8870 8871 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 8872 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 8873 fw_health->status_reliable = false; 8874 8875 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 8876 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 8877 fw_health->resets_reliable = false; 8878 } 8879 8880 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 8881 { 8882 void __iomem *hs; 8883 u32 status_loc; 8884 u32 reg_type; 8885 u32 sig; 8886 8887 if (bp->fw_health) 8888 bp->fw_health->status_reliable = false; 8889 8890 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 8891 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 8892 8893 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 8894 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 8895 if (!bp->chip_num) { 8896 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 8897 bp->chip_num = readl(bp->bar0 + 8898 BNXT_FW_HEALTH_WIN_BASE + 8899 BNXT_GRC_REG_CHIP_NUM); 8900 } 8901 if (!BNXT_CHIP_P5(bp)) 8902 return; 8903 8904 status_loc = BNXT_GRC_REG_STATUS_P5 | 8905 BNXT_FW_HEALTH_REG_TYPE_BAR0; 8906 } else { 8907 status_loc = readl(hs + offsetof(struct hcomm_status, 8908 fw_status_loc)); 8909 } 8910 8911 if (__bnxt_alloc_fw_health(bp)) { 8912 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 8913 return; 8914 } 8915 8916 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 8917 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 8918 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 8919 __bnxt_map_fw_health_reg(bp, status_loc); 8920 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 8921 BNXT_FW_HEALTH_WIN_OFF(status_loc); 8922 } 8923 8924 bp->fw_health->status_reliable = true; 8925 } 8926 8927 static int bnxt_map_fw_health_regs(struct bnxt *bp) 8928 { 8929 struct bnxt_fw_health *fw_health = bp->fw_health; 8930 u32 reg_base = 0xffffffff; 8931 int i; 8932 8933 bp->fw_health->status_reliable = false; 8934 bp->fw_health->resets_reliable = false; 8935 /* Only pre-map the monitoring GRC registers using window 3 */ 8936 for (i = 0; i < 4; i++) { 8937 u32 reg = fw_health->regs[i]; 8938 8939 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 8940 continue; 8941 if (reg_base == 0xffffffff) 8942 reg_base = reg & BNXT_GRC_BASE_MASK; 8943 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 8944 return -ERANGE; 8945 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 8946 } 8947 bp->fw_health->status_reliable = true; 8948 bp->fw_health->resets_reliable = true; 8949 if (reg_base == 0xffffffff) 8950 return 0; 8951 8952 __bnxt_map_fw_health_reg(bp, reg_base); 8953 return 0; 8954 } 8955 8956 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 8957 { 8958 if (!bp->fw_health) 8959 return; 8960 8961 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 8962 bp->fw_health->status_reliable = true; 8963 bp->fw_health->resets_reliable = true; 8964 } else { 8965 bnxt_try_map_fw_health_reg(bp); 8966 } 8967 } 8968 8969 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 8970 { 8971 struct bnxt_fw_health *fw_health = bp->fw_health; 8972 struct hwrm_error_recovery_qcfg_output *resp; 8973 struct hwrm_error_recovery_qcfg_input *req; 8974 int rc, i; 8975 8976 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 8977 return 0; 8978 8979 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 8980 if (rc) 8981 return rc; 8982 8983 resp = hwrm_req_hold(bp, req); 8984 rc = hwrm_req_send(bp, req); 8985 if (rc) 8986 goto err_recovery_out; 8987 fw_health->flags = le32_to_cpu(resp->flags); 8988 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 8989 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 8990 rc = -EINVAL; 8991 goto err_recovery_out; 8992 } 8993 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 8994 fw_health->master_func_wait_dsecs = 8995 le32_to_cpu(resp->master_func_wait_period); 8996 fw_health->normal_func_wait_dsecs = 8997 le32_to_cpu(resp->normal_func_wait_period); 8998 fw_health->post_reset_wait_dsecs = 8999 le32_to_cpu(resp->master_func_wait_period_after_reset); 9000 fw_health->post_reset_max_wait_dsecs = 9001 le32_to_cpu(resp->max_bailout_time_after_reset); 9002 fw_health->regs[BNXT_FW_HEALTH_REG] = 9003 le32_to_cpu(resp->fw_health_status_reg); 9004 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 9005 le32_to_cpu(resp->fw_heartbeat_reg); 9006 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 9007 le32_to_cpu(resp->fw_reset_cnt_reg); 9008 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 9009 le32_to_cpu(resp->reset_inprogress_reg); 9010 fw_health->fw_reset_inprog_reg_mask = 9011 le32_to_cpu(resp->reset_inprogress_reg_mask); 9012 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 9013 if (fw_health->fw_reset_seq_cnt >= 16) { 9014 rc = -EINVAL; 9015 goto err_recovery_out; 9016 } 9017 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 9018 fw_health->fw_reset_seq_regs[i] = 9019 le32_to_cpu(resp->reset_reg[i]); 9020 fw_health->fw_reset_seq_vals[i] = 9021 le32_to_cpu(resp->reset_reg_val[i]); 9022 fw_health->fw_reset_seq_delay_msec[i] = 9023 resp->delay_after_reset[i]; 9024 } 9025 err_recovery_out: 9026 hwrm_req_drop(bp, req); 9027 if (!rc) 9028 rc = bnxt_map_fw_health_regs(bp); 9029 if (rc) 9030 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9031 return rc; 9032 } 9033 9034 static int bnxt_hwrm_func_reset(struct bnxt *bp) 9035 { 9036 struct hwrm_func_reset_input *req; 9037 int rc; 9038 9039 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 9040 if (rc) 9041 return rc; 9042 9043 req->enables = 0; 9044 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 9045 return hwrm_req_send(bp, req); 9046 } 9047 9048 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 9049 { 9050 struct hwrm_nvm_get_dev_info_output nvm_info; 9051 9052 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 9053 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 9054 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 9055 nvm_info.nvm_cfg_ver_upd); 9056 } 9057 9058 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 9059 { 9060 struct hwrm_queue_qportcfg_output *resp; 9061 struct hwrm_queue_qportcfg_input *req; 9062 u8 i, j, *qptr; 9063 bool no_rdma; 9064 int rc = 0; 9065 9066 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 9067 if (rc) 9068 return rc; 9069 9070 resp = hwrm_req_hold(bp, req); 9071 rc = hwrm_req_send(bp, req); 9072 if (rc) 9073 goto qportcfg_exit; 9074 9075 if (!resp->max_configurable_queues) { 9076 rc = -EINVAL; 9077 goto qportcfg_exit; 9078 } 9079 bp->max_tc = resp->max_configurable_queues; 9080 bp->max_lltc = resp->max_configurable_lossless_queues; 9081 if (bp->max_tc > BNXT_MAX_QUEUE) 9082 bp->max_tc = BNXT_MAX_QUEUE; 9083 9084 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 9085 qptr = &resp->queue_id0; 9086 for (i = 0, j = 0; i < bp->max_tc; i++) { 9087 bp->q_info[j].queue_id = *qptr; 9088 bp->q_ids[i] = *qptr++; 9089 bp->q_info[j].queue_profile = *qptr++; 9090 bp->tc_to_qidx[j] = j; 9091 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 9092 (no_rdma && BNXT_PF(bp))) 9093 j++; 9094 } 9095 bp->max_q = bp->max_tc; 9096 bp->max_tc = max_t(u8, j, 1); 9097 9098 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 9099 bp->max_tc = 1; 9100 9101 if (bp->max_lltc > bp->max_tc) 9102 bp->max_lltc = bp->max_tc; 9103 9104 qportcfg_exit: 9105 hwrm_req_drop(bp, req); 9106 return rc; 9107 } 9108 9109 static int bnxt_hwrm_poll(struct bnxt *bp) 9110 { 9111 struct hwrm_ver_get_input *req; 9112 int rc; 9113 9114 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 9115 if (rc) 9116 return rc; 9117 9118 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 9119 req->hwrm_intf_min = HWRM_VERSION_MINOR; 9120 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 9121 9122 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 9123 rc = hwrm_req_send(bp, req); 9124 return rc; 9125 } 9126 9127 static int bnxt_hwrm_ver_get(struct bnxt *bp) 9128 { 9129 struct hwrm_ver_get_output *resp; 9130 struct hwrm_ver_get_input *req; 9131 u16 fw_maj, fw_min, fw_bld, fw_rsv; 9132 u32 dev_caps_cfg, hwrm_ver; 9133 int rc, len; 9134 9135 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 9136 if (rc) 9137 return rc; 9138 9139 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 9140 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 9141 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 9142 req->hwrm_intf_min = HWRM_VERSION_MINOR; 9143 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 9144 9145 resp = hwrm_req_hold(bp, req); 9146 rc = hwrm_req_send(bp, req); 9147 if (rc) 9148 goto hwrm_ver_get_exit; 9149 9150 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 9151 9152 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 9153 resp->hwrm_intf_min_8b << 8 | 9154 resp->hwrm_intf_upd_8b; 9155 if (resp->hwrm_intf_maj_8b < 1) { 9156 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 9157 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 9158 resp->hwrm_intf_upd_8b); 9159 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 9160 } 9161 9162 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 9163 HWRM_VERSION_UPDATE; 9164 9165 if (bp->hwrm_spec_code > hwrm_ver) 9166 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 9167 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 9168 HWRM_VERSION_UPDATE); 9169 else 9170 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 9171 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 9172 resp->hwrm_intf_upd_8b); 9173 9174 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 9175 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 9176 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 9177 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 9178 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 9179 len = FW_VER_STR_LEN; 9180 } else { 9181 fw_maj = resp->hwrm_fw_maj_8b; 9182 fw_min = resp->hwrm_fw_min_8b; 9183 fw_bld = resp->hwrm_fw_bld_8b; 9184 fw_rsv = resp->hwrm_fw_rsvd_8b; 9185 len = BC_HWRM_STR_LEN; 9186 } 9187 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 9188 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 9189 fw_rsv); 9190 9191 if (strlen(resp->active_pkg_name)) { 9192 int fw_ver_len = strlen(bp->fw_ver_str); 9193 9194 snprintf(bp->fw_ver_str + fw_ver_len, 9195 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 9196 resp->active_pkg_name); 9197 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 9198 } 9199 9200 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 9201 if (!bp->hwrm_cmd_timeout) 9202 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 9203 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 9204 if (!bp->hwrm_cmd_max_timeout) 9205 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 9206 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) 9207 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", 9208 bp->hwrm_cmd_max_timeout / 1000); 9209 9210 if (resp->hwrm_intf_maj_8b >= 1) { 9211 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 9212 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 9213 } 9214 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 9215 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 9216 9217 bp->chip_num = le16_to_cpu(resp->chip_num); 9218 bp->chip_rev = resp->chip_rev; 9219 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 9220 !resp->chip_metal) 9221 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 9222 9223 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 9224 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 9225 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 9226 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 9227 9228 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 9229 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 9230 9231 if (dev_caps_cfg & 9232 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 9233 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 9234 9235 if (dev_caps_cfg & 9236 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 9237 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 9238 9239 if (dev_caps_cfg & 9240 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 9241 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 9242 9243 hwrm_ver_get_exit: 9244 hwrm_req_drop(bp, req); 9245 return rc; 9246 } 9247 9248 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 9249 { 9250 struct hwrm_fw_set_time_input *req; 9251 struct tm tm; 9252 time64_t now = ktime_get_real_seconds(); 9253 int rc; 9254 9255 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 9256 bp->hwrm_spec_code < 0x10400) 9257 return -EOPNOTSUPP; 9258 9259 time64_to_tm(now, 0, &tm); 9260 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 9261 if (rc) 9262 return rc; 9263 9264 req->year = cpu_to_le16(1900 + tm.tm_year); 9265 req->month = 1 + tm.tm_mon; 9266 req->day = tm.tm_mday; 9267 req->hour = tm.tm_hour; 9268 req->minute = tm.tm_min; 9269 req->second = tm.tm_sec; 9270 return hwrm_req_send(bp, req); 9271 } 9272 9273 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 9274 { 9275 u64 sw_tmp; 9276 9277 hw &= mask; 9278 sw_tmp = (*sw & ~mask) | hw; 9279 if (hw < (*sw & mask)) 9280 sw_tmp += mask + 1; 9281 WRITE_ONCE(*sw, sw_tmp); 9282 } 9283 9284 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 9285 int count, bool ignore_zero) 9286 { 9287 int i; 9288 9289 for (i = 0; i < count; i++) { 9290 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 9291 9292 if (ignore_zero && !hw) 9293 continue; 9294 9295 if (masks[i] == -1ULL) 9296 sw_stats[i] = hw; 9297 else 9298 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 9299 } 9300 } 9301 9302 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 9303 { 9304 if (!stats->hw_stats) 9305 return; 9306 9307 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 9308 stats->hw_masks, stats->len / 8, false); 9309 } 9310 9311 static void bnxt_accumulate_all_stats(struct bnxt *bp) 9312 { 9313 struct bnxt_stats_mem *ring0_stats; 9314 bool ignore_zero = false; 9315 int i; 9316 9317 /* Chip bug. Counter intermittently becomes 0. */ 9318 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9319 ignore_zero = true; 9320 9321 for (i = 0; i < bp->cp_nr_rings; i++) { 9322 struct bnxt_napi *bnapi = bp->bnapi[i]; 9323 struct bnxt_cp_ring_info *cpr; 9324 struct bnxt_stats_mem *stats; 9325 9326 cpr = &bnapi->cp_ring; 9327 stats = &cpr->stats; 9328 if (!i) 9329 ring0_stats = stats; 9330 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 9331 ring0_stats->hw_masks, 9332 ring0_stats->len / 8, ignore_zero); 9333 } 9334 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9335 struct bnxt_stats_mem *stats = &bp->port_stats; 9336 __le64 *hw_stats = stats->hw_stats; 9337 u64 *sw_stats = stats->sw_stats; 9338 u64 *masks = stats->hw_masks; 9339 int cnt; 9340 9341 cnt = sizeof(struct rx_port_stats) / 8; 9342 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 9343 9344 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9345 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9346 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9347 cnt = sizeof(struct tx_port_stats) / 8; 9348 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 9349 } 9350 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 9351 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 9352 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 9353 } 9354 } 9355 9356 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 9357 { 9358 struct hwrm_port_qstats_input *req; 9359 struct bnxt_pf_info *pf = &bp->pf; 9360 int rc; 9361 9362 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 9363 return 0; 9364 9365 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 9366 return -EOPNOTSUPP; 9367 9368 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 9369 if (rc) 9370 return rc; 9371 9372 req->flags = flags; 9373 req->port_id = cpu_to_le16(pf->port_id); 9374 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 9375 BNXT_TX_PORT_STATS_BYTE_OFFSET); 9376 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 9377 return hwrm_req_send(bp, req); 9378 } 9379 9380 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 9381 { 9382 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 9383 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 9384 struct hwrm_port_qstats_ext_output *resp_qs; 9385 struct hwrm_port_qstats_ext_input *req_qs; 9386 struct bnxt_pf_info *pf = &bp->pf; 9387 u32 tx_stat_size; 9388 int rc; 9389 9390 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 9391 return 0; 9392 9393 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 9394 return -EOPNOTSUPP; 9395 9396 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 9397 if (rc) 9398 return rc; 9399 9400 req_qs->flags = flags; 9401 req_qs->port_id = cpu_to_le16(pf->port_id); 9402 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 9403 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 9404 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 9405 sizeof(struct tx_port_stats_ext) : 0; 9406 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 9407 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 9408 resp_qs = hwrm_req_hold(bp, req_qs); 9409 rc = hwrm_req_send(bp, req_qs); 9410 if (!rc) { 9411 bp->fw_rx_stats_ext_size = 9412 le16_to_cpu(resp_qs->rx_stat_size) / 8; 9413 if (BNXT_FW_MAJ(bp) < 220 && 9414 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 9415 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 9416 9417 bp->fw_tx_stats_ext_size = tx_stat_size ? 9418 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 9419 } else { 9420 bp->fw_rx_stats_ext_size = 0; 9421 bp->fw_tx_stats_ext_size = 0; 9422 } 9423 hwrm_req_drop(bp, req_qs); 9424 9425 if (flags) 9426 return rc; 9427 9428 if (bp->fw_tx_stats_ext_size <= 9429 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 9430 bp->pri2cos_valid = 0; 9431 return rc; 9432 } 9433 9434 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 9435 if (rc) 9436 return rc; 9437 9438 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 9439 9440 resp_qc = hwrm_req_hold(bp, req_qc); 9441 rc = hwrm_req_send(bp, req_qc); 9442 if (!rc) { 9443 u8 *pri2cos; 9444 int i, j; 9445 9446 pri2cos = &resp_qc->pri0_cos_queue_id; 9447 for (i = 0; i < 8; i++) { 9448 u8 queue_id = pri2cos[i]; 9449 u8 queue_idx; 9450 9451 /* Per port queue IDs start from 0, 10, 20, etc */ 9452 queue_idx = queue_id % 10; 9453 if (queue_idx > BNXT_MAX_QUEUE) { 9454 bp->pri2cos_valid = false; 9455 hwrm_req_drop(bp, req_qc); 9456 return rc; 9457 } 9458 for (j = 0; j < bp->max_q; j++) { 9459 if (bp->q_ids[j] == queue_id) 9460 bp->pri2cos_idx[i] = queue_idx; 9461 } 9462 } 9463 bp->pri2cos_valid = true; 9464 } 9465 hwrm_req_drop(bp, req_qc); 9466 9467 return rc; 9468 } 9469 9470 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 9471 { 9472 bnxt_hwrm_tunnel_dst_port_free(bp, 9473 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 9474 bnxt_hwrm_tunnel_dst_port_free(bp, 9475 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 9476 } 9477 9478 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 9479 { 9480 int rc, i; 9481 u32 tpa_flags = 0; 9482 9483 if (set_tpa) 9484 tpa_flags = bp->flags & BNXT_FLAG_TPA; 9485 else if (BNXT_NO_FW_ACCESS(bp)) 9486 return 0; 9487 for (i = 0; i < bp->nr_vnics; i++) { 9488 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 9489 if (rc) { 9490 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 9491 i, rc); 9492 return rc; 9493 } 9494 } 9495 return 0; 9496 } 9497 9498 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 9499 { 9500 int i; 9501 9502 for (i = 0; i < bp->nr_vnics; i++) 9503 bnxt_hwrm_vnic_set_rss(bp, i, false); 9504 } 9505 9506 static void bnxt_clear_vnic(struct bnxt *bp) 9507 { 9508 if (!bp->vnic_info) 9509 return; 9510 9511 bnxt_hwrm_clear_vnic_filter(bp); 9512 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 9513 /* clear all RSS setting before free vnic ctx */ 9514 bnxt_hwrm_clear_vnic_rss(bp); 9515 bnxt_hwrm_vnic_ctx_free(bp); 9516 } 9517 /* before free the vnic, undo the vnic tpa settings */ 9518 if (bp->flags & BNXT_FLAG_TPA) 9519 bnxt_set_tpa(bp, false); 9520 bnxt_hwrm_vnic_free(bp); 9521 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9522 bnxt_hwrm_vnic_ctx_free(bp); 9523 } 9524 9525 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 9526 bool irq_re_init) 9527 { 9528 bnxt_clear_vnic(bp); 9529 bnxt_hwrm_ring_free(bp, close_path); 9530 bnxt_hwrm_ring_grp_free(bp); 9531 if (irq_re_init) { 9532 bnxt_hwrm_stat_ctx_free(bp); 9533 bnxt_hwrm_free_tunnel_ports(bp); 9534 } 9535 } 9536 9537 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 9538 { 9539 struct hwrm_func_cfg_input *req; 9540 u8 evb_mode; 9541 int rc; 9542 9543 if (br_mode == BRIDGE_MODE_VEB) 9544 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 9545 else if (br_mode == BRIDGE_MODE_VEPA) 9546 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 9547 else 9548 return -EINVAL; 9549 9550 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 9551 if (rc) 9552 return rc; 9553 9554 req->fid = cpu_to_le16(0xffff); 9555 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 9556 req->evb_mode = evb_mode; 9557 return hwrm_req_send(bp, req); 9558 } 9559 9560 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 9561 { 9562 struct hwrm_func_cfg_input *req; 9563 int rc; 9564 9565 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 9566 return 0; 9567 9568 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 9569 if (rc) 9570 return rc; 9571 9572 req->fid = cpu_to_le16(0xffff); 9573 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 9574 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 9575 if (size == 128) 9576 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 9577 9578 return hwrm_req_send(bp, req); 9579 } 9580 9581 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 9582 { 9583 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 9584 int rc; 9585 9586 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 9587 goto skip_rss_ctx; 9588 9589 /* allocate context for vnic */ 9590 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 9591 if (rc) { 9592 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9593 vnic_id, rc); 9594 goto vnic_setup_err; 9595 } 9596 bp->rsscos_nr_ctxs++; 9597 9598 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9599 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 9600 if (rc) { 9601 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 9602 vnic_id, rc); 9603 goto vnic_setup_err; 9604 } 9605 bp->rsscos_nr_ctxs++; 9606 } 9607 9608 skip_rss_ctx: 9609 /* configure default vnic, ring grp */ 9610 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 9611 if (rc) { 9612 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9613 vnic_id, rc); 9614 goto vnic_setup_err; 9615 } 9616 9617 /* Enable RSS hashing on vnic */ 9618 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 9619 if (rc) { 9620 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 9621 vnic_id, rc); 9622 goto vnic_setup_err; 9623 } 9624 9625 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9626 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 9627 if (rc) { 9628 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9629 vnic_id, rc); 9630 } 9631 } 9632 9633 vnic_setup_err: 9634 return rc; 9635 } 9636 9637 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 9638 { 9639 int rc, i, nr_ctxs; 9640 9641 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 9642 for (i = 0; i < nr_ctxs; i++) { 9643 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 9644 if (rc) { 9645 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 9646 vnic_id, i, rc); 9647 break; 9648 } 9649 bp->rsscos_nr_ctxs++; 9650 } 9651 if (i < nr_ctxs) 9652 return -ENOMEM; 9653 9654 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 9655 if (rc) { 9656 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 9657 vnic_id, rc); 9658 return rc; 9659 } 9660 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 9661 if (rc) { 9662 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9663 vnic_id, rc); 9664 return rc; 9665 } 9666 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9667 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 9668 if (rc) { 9669 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9670 vnic_id, rc); 9671 } 9672 } 9673 return rc; 9674 } 9675 9676 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 9677 { 9678 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9679 return __bnxt_setup_vnic_p5(bp, vnic_id); 9680 else 9681 return __bnxt_setup_vnic(bp, vnic_id); 9682 } 9683 9684 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 9685 { 9686 int i, rc = 0; 9687 9688 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9689 return 0; 9690 9691 for (i = 0; i < bp->rx_nr_rings; i++) { 9692 struct bnxt_vnic_info *vnic; 9693 u16 vnic_id = i + 1; 9694 u16 ring_id = i; 9695 9696 if (vnic_id >= bp->nr_vnics) 9697 break; 9698 9699 vnic = &bp->vnic_info[vnic_id]; 9700 vnic->flags |= BNXT_VNIC_RFS_FLAG; 9701 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 9702 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 9703 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 9704 if (rc) { 9705 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9706 vnic_id, rc); 9707 break; 9708 } 9709 rc = bnxt_setup_vnic(bp, vnic_id); 9710 if (rc) 9711 break; 9712 } 9713 return rc; 9714 } 9715 9716 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 9717 static bool bnxt_promisc_ok(struct bnxt *bp) 9718 { 9719 #ifdef CONFIG_BNXT_SRIOV 9720 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 9721 return false; 9722 #endif 9723 return true; 9724 } 9725 9726 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 9727 { 9728 unsigned int rc = 0; 9729 9730 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 9731 if (rc) { 9732 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 9733 rc); 9734 return rc; 9735 } 9736 9737 rc = bnxt_hwrm_vnic_cfg(bp, 1); 9738 if (rc) { 9739 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 9740 rc); 9741 return rc; 9742 } 9743 return rc; 9744 } 9745 9746 static int bnxt_cfg_rx_mode(struct bnxt *); 9747 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 9748 9749 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 9750 { 9751 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9752 int rc = 0; 9753 unsigned int rx_nr_rings = bp->rx_nr_rings; 9754 9755 if (irq_re_init) { 9756 rc = bnxt_hwrm_stat_ctx_alloc(bp); 9757 if (rc) { 9758 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 9759 rc); 9760 goto err_out; 9761 } 9762 } 9763 9764 rc = bnxt_hwrm_ring_alloc(bp); 9765 if (rc) { 9766 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 9767 goto err_out; 9768 } 9769 9770 rc = bnxt_hwrm_ring_grp_alloc(bp); 9771 if (rc) { 9772 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 9773 goto err_out; 9774 } 9775 9776 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 9777 rx_nr_rings--; 9778 9779 /* default vnic 0 */ 9780 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 9781 if (rc) { 9782 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 9783 goto err_out; 9784 } 9785 9786 if (BNXT_VF(bp)) 9787 bnxt_hwrm_func_qcfg(bp); 9788 9789 rc = bnxt_setup_vnic(bp, 0); 9790 if (rc) 9791 goto err_out; 9792 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 9793 bnxt_hwrm_update_rss_hash_cfg(bp); 9794 9795 if (bp->flags & BNXT_FLAG_RFS) { 9796 rc = bnxt_alloc_rfs_vnics(bp); 9797 if (rc) 9798 goto err_out; 9799 } 9800 9801 if (bp->flags & BNXT_FLAG_TPA) { 9802 rc = bnxt_set_tpa(bp, true); 9803 if (rc) 9804 goto err_out; 9805 } 9806 9807 if (BNXT_VF(bp)) 9808 bnxt_update_vf_mac(bp); 9809 9810 /* Filter for default vnic 0 */ 9811 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 9812 if (rc) { 9813 if (BNXT_VF(bp) && rc == -ENODEV) 9814 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 9815 else 9816 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 9817 goto err_out; 9818 } 9819 vnic->uc_filter_count = 1; 9820 9821 vnic->rx_mask = 0; 9822 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 9823 goto skip_rx_mask; 9824 9825 if (bp->dev->flags & IFF_BROADCAST) 9826 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 9827 9828 if (bp->dev->flags & IFF_PROMISC) 9829 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9830 9831 if (bp->dev->flags & IFF_ALLMULTI) { 9832 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9833 vnic->mc_list_count = 0; 9834 } else if (bp->dev->flags & IFF_MULTICAST) { 9835 u32 mask = 0; 9836 9837 bnxt_mc_list_updated(bp, &mask); 9838 vnic->rx_mask |= mask; 9839 } 9840 9841 rc = bnxt_cfg_rx_mode(bp); 9842 if (rc) 9843 goto err_out; 9844 9845 skip_rx_mask: 9846 rc = bnxt_hwrm_set_coal(bp); 9847 if (rc) 9848 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 9849 rc); 9850 9851 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9852 rc = bnxt_setup_nitroa0_vnic(bp); 9853 if (rc) 9854 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 9855 rc); 9856 } 9857 9858 if (BNXT_VF(bp)) { 9859 bnxt_hwrm_func_qcfg(bp); 9860 netdev_update_features(bp->dev); 9861 } 9862 9863 return 0; 9864 9865 err_out: 9866 bnxt_hwrm_resource_free(bp, 0, true); 9867 9868 return rc; 9869 } 9870 9871 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 9872 { 9873 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 9874 return 0; 9875 } 9876 9877 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 9878 { 9879 bnxt_init_cp_rings(bp); 9880 bnxt_init_rx_rings(bp); 9881 bnxt_init_tx_rings(bp); 9882 bnxt_init_ring_grps(bp, irq_re_init); 9883 bnxt_init_vnics(bp); 9884 9885 return bnxt_init_chip(bp, irq_re_init); 9886 } 9887 9888 static int bnxt_set_real_num_queues(struct bnxt *bp) 9889 { 9890 int rc; 9891 struct net_device *dev = bp->dev; 9892 9893 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 9894 bp->tx_nr_rings_xdp); 9895 if (rc) 9896 return rc; 9897 9898 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 9899 if (rc) 9900 return rc; 9901 9902 #ifdef CONFIG_RFS_ACCEL 9903 if (bp->flags & BNXT_FLAG_RFS) 9904 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 9905 #endif 9906 9907 return rc; 9908 } 9909 9910 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 9911 bool shared) 9912 { 9913 int _rx = *rx, _tx = *tx; 9914 9915 if (shared) { 9916 *rx = min_t(int, _rx, max); 9917 *tx = min_t(int, _tx, max); 9918 } else { 9919 if (max < 2) 9920 return -ENOMEM; 9921 9922 while (_rx + _tx > max) { 9923 if (_rx > _tx && _rx > 1) 9924 _rx--; 9925 else if (_tx > 1) 9926 _tx--; 9927 } 9928 *rx = _rx; 9929 *tx = _tx; 9930 } 9931 return 0; 9932 } 9933 9934 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 9935 { 9936 return (tx - tx_xdp) / tx_sets + tx_xdp; 9937 } 9938 9939 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 9940 { 9941 int tcs = netdev_get_num_tc(bp->dev); 9942 9943 if (!tcs) 9944 tcs = 1; 9945 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 9946 } 9947 9948 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 9949 { 9950 int tcs = netdev_get_num_tc(bp->dev); 9951 9952 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 9953 bp->tx_nr_rings_xdp; 9954 } 9955 9956 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 9957 bool sh) 9958 { 9959 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 9960 9961 if (tx_cp != *tx) { 9962 int tx_saved = tx_cp, rc; 9963 9964 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 9965 if (rc) 9966 return rc; 9967 if (tx_cp != tx_saved) 9968 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 9969 return 0; 9970 } 9971 return __bnxt_trim_rings(bp, rx, tx, max, sh); 9972 } 9973 9974 static void bnxt_setup_msix(struct bnxt *bp) 9975 { 9976 const int len = sizeof(bp->irq_tbl[0].name); 9977 struct net_device *dev = bp->dev; 9978 int tcs, i; 9979 9980 tcs = netdev_get_num_tc(dev); 9981 if (tcs) { 9982 int i, off, count; 9983 9984 for (i = 0; i < tcs; i++) { 9985 count = bp->tx_nr_rings_per_tc; 9986 off = BNXT_TC_TO_RING_BASE(bp, i); 9987 netdev_set_tc_queue(dev, i, count, off); 9988 } 9989 } 9990 9991 for (i = 0; i < bp->cp_nr_rings; i++) { 9992 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 9993 char *attr; 9994 9995 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 9996 attr = "TxRx"; 9997 else if (i < bp->rx_nr_rings) 9998 attr = "rx"; 9999 else 10000 attr = "tx"; 10001 10002 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 10003 attr, i); 10004 bp->irq_tbl[map_idx].handler = bnxt_msix; 10005 } 10006 } 10007 10008 static void bnxt_setup_inta(struct bnxt *bp) 10009 { 10010 const int len = sizeof(bp->irq_tbl[0].name); 10011 10012 if (netdev_get_num_tc(bp->dev)) 10013 netdev_reset_tc(bp->dev); 10014 10015 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 10016 0); 10017 bp->irq_tbl[0].handler = bnxt_inta; 10018 } 10019 10020 static int bnxt_init_int_mode(struct bnxt *bp); 10021 10022 static int bnxt_setup_int_mode(struct bnxt *bp) 10023 { 10024 int rc; 10025 10026 if (!bp->irq_tbl) { 10027 rc = bnxt_init_int_mode(bp); 10028 if (rc || !bp->irq_tbl) 10029 return rc ?: -ENODEV; 10030 } 10031 10032 if (bp->flags & BNXT_FLAG_USING_MSIX) 10033 bnxt_setup_msix(bp); 10034 else 10035 bnxt_setup_inta(bp); 10036 10037 rc = bnxt_set_real_num_queues(bp); 10038 return rc; 10039 } 10040 10041 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 10042 { 10043 return bp->hw_resc.max_rsscos_ctxs; 10044 } 10045 10046 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 10047 { 10048 return bp->hw_resc.max_vnics; 10049 } 10050 10051 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 10052 { 10053 return bp->hw_resc.max_stat_ctxs; 10054 } 10055 10056 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 10057 { 10058 return bp->hw_resc.max_cp_rings; 10059 } 10060 10061 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 10062 { 10063 unsigned int cp = bp->hw_resc.max_cp_rings; 10064 10065 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 10066 cp -= bnxt_get_ulp_msix_num(bp); 10067 10068 return cp; 10069 } 10070 10071 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 10072 { 10073 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 10074 10075 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10076 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 10077 10078 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 10079 } 10080 10081 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 10082 { 10083 bp->hw_resc.max_irqs = max_irqs; 10084 } 10085 10086 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 10087 { 10088 unsigned int cp; 10089 10090 cp = bnxt_get_max_func_cp_rings_for_en(bp); 10091 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10092 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 10093 else 10094 return cp - bp->cp_nr_rings; 10095 } 10096 10097 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 10098 { 10099 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 10100 } 10101 10102 int bnxt_get_avail_msix(struct bnxt *bp, int num) 10103 { 10104 int max_cp = bnxt_get_max_func_cp_rings(bp); 10105 int max_irq = bnxt_get_max_func_irqs(bp); 10106 int total_req = bp->cp_nr_rings + num; 10107 int max_idx, avail_msix; 10108 10109 max_idx = bp->total_irqs; 10110 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 10111 max_idx = min_t(int, bp->total_irqs, max_cp); 10112 avail_msix = max_idx - bp->cp_nr_rings; 10113 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 10114 return avail_msix; 10115 10116 if (max_irq < total_req) { 10117 num = max_irq - bp->cp_nr_rings; 10118 if (num <= 0) 10119 return 0; 10120 } 10121 return num; 10122 } 10123 10124 static int bnxt_get_num_msix(struct bnxt *bp) 10125 { 10126 if (!BNXT_NEW_RM(bp)) 10127 return bnxt_get_max_func_irqs(bp); 10128 10129 return bnxt_nq_rings_in_use(bp); 10130 } 10131 10132 static int bnxt_init_msix(struct bnxt *bp) 10133 { 10134 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp; 10135 struct msix_entry *msix_ent; 10136 10137 total_vecs = bnxt_get_num_msix(bp); 10138 max = bnxt_get_max_func_irqs(bp); 10139 if (total_vecs > max) 10140 total_vecs = max; 10141 10142 if (!total_vecs) 10143 return 0; 10144 10145 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 10146 if (!msix_ent) 10147 return -ENOMEM; 10148 10149 for (i = 0; i < total_vecs; i++) { 10150 msix_ent[i].entry = i; 10151 msix_ent[i].vector = 0; 10152 } 10153 10154 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 10155 min = 2; 10156 10157 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 10158 ulp_msix = bnxt_get_ulp_msix_num(bp); 10159 if (total_vecs < 0 || total_vecs < ulp_msix) { 10160 rc = -ENODEV; 10161 goto msix_setup_exit; 10162 } 10163 10164 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 10165 if (bp->irq_tbl) { 10166 for (i = 0; i < total_vecs; i++) 10167 bp->irq_tbl[i].vector = msix_ent[i].vector; 10168 10169 bp->total_irqs = total_vecs; 10170 /* Trim rings based upon num of vectors allocated */ 10171 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 10172 total_vecs - ulp_msix, min == 1); 10173 if (rc) 10174 goto msix_setup_exit; 10175 10176 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 10177 bp->cp_nr_rings = (min == 1) ? 10178 max_t(int, tx_cp, bp->rx_nr_rings) : 10179 tx_cp + bp->rx_nr_rings; 10180 10181 } else { 10182 rc = -ENOMEM; 10183 goto msix_setup_exit; 10184 } 10185 bp->flags |= BNXT_FLAG_USING_MSIX; 10186 kfree(msix_ent); 10187 return 0; 10188 10189 msix_setup_exit: 10190 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 10191 kfree(bp->irq_tbl); 10192 bp->irq_tbl = NULL; 10193 pci_disable_msix(bp->pdev); 10194 kfree(msix_ent); 10195 return rc; 10196 } 10197 10198 static int bnxt_init_inta(struct bnxt *bp) 10199 { 10200 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL); 10201 if (!bp->irq_tbl) 10202 return -ENOMEM; 10203 10204 bp->total_irqs = 1; 10205 bp->rx_nr_rings = 1; 10206 bp->tx_nr_rings = 1; 10207 bp->cp_nr_rings = 1; 10208 bp->flags |= BNXT_FLAG_SHARED_RINGS; 10209 bp->irq_tbl[0].vector = bp->pdev->irq; 10210 return 0; 10211 } 10212 10213 static int bnxt_init_int_mode(struct bnxt *bp) 10214 { 10215 int rc = -ENODEV; 10216 10217 if (bp->flags & BNXT_FLAG_MSIX_CAP) 10218 rc = bnxt_init_msix(bp); 10219 10220 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 10221 /* fallback to INTA */ 10222 rc = bnxt_init_inta(bp); 10223 } 10224 return rc; 10225 } 10226 10227 static void bnxt_clear_int_mode(struct bnxt *bp) 10228 { 10229 if (bp->flags & BNXT_FLAG_USING_MSIX) 10230 pci_disable_msix(bp->pdev); 10231 10232 kfree(bp->irq_tbl); 10233 bp->irq_tbl = NULL; 10234 bp->flags &= ~BNXT_FLAG_USING_MSIX; 10235 } 10236 10237 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 10238 { 10239 int tcs = netdev_get_num_tc(bp->dev); 10240 bool irq_cleared = false; 10241 int rc; 10242 10243 if (!bnxt_need_reserve_rings(bp)) 10244 return 0; 10245 10246 if (irq_re_init && BNXT_NEW_RM(bp) && 10247 bnxt_get_num_msix(bp) != bp->total_irqs) { 10248 bnxt_ulp_irq_stop(bp); 10249 bnxt_clear_int_mode(bp); 10250 irq_cleared = true; 10251 } 10252 rc = __bnxt_reserve_rings(bp); 10253 if (irq_cleared) { 10254 if (!rc) 10255 rc = bnxt_init_int_mode(bp); 10256 bnxt_ulp_irq_restart(bp, rc); 10257 } 10258 if (rc) { 10259 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 10260 return rc; 10261 } 10262 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 10263 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 10264 netdev_err(bp->dev, "tx ring reservation failure\n"); 10265 netdev_reset_tc(bp->dev); 10266 if (bp->tx_nr_rings_xdp) 10267 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 10268 else 10269 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 10270 return -ENOMEM; 10271 } 10272 return 0; 10273 } 10274 10275 static void bnxt_free_irq(struct bnxt *bp) 10276 { 10277 struct bnxt_irq *irq; 10278 int i; 10279 10280 #ifdef CONFIG_RFS_ACCEL 10281 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 10282 bp->dev->rx_cpu_rmap = NULL; 10283 #endif 10284 if (!bp->irq_tbl || !bp->bnapi) 10285 return; 10286 10287 for (i = 0; i < bp->cp_nr_rings; i++) { 10288 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10289 10290 irq = &bp->irq_tbl[map_idx]; 10291 if (irq->requested) { 10292 if (irq->have_cpumask) { 10293 irq_set_affinity_hint(irq->vector, NULL); 10294 free_cpumask_var(irq->cpu_mask); 10295 irq->have_cpumask = 0; 10296 } 10297 free_irq(irq->vector, bp->bnapi[i]); 10298 } 10299 10300 irq->requested = 0; 10301 } 10302 } 10303 10304 static int bnxt_request_irq(struct bnxt *bp) 10305 { 10306 int i, j, rc = 0; 10307 unsigned long flags = 0; 10308 #ifdef CONFIG_RFS_ACCEL 10309 struct cpu_rmap *rmap; 10310 #endif 10311 10312 rc = bnxt_setup_int_mode(bp); 10313 if (rc) { 10314 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 10315 rc); 10316 return rc; 10317 } 10318 #ifdef CONFIG_RFS_ACCEL 10319 rmap = bp->dev->rx_cpu_rmap; 10320 #endif 10321 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 10322 flags = IRQF_SHARED; 10323 10324 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 10325 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 10326 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 10327 10328 #ifdef CONFIG_RFS_ACCEL 10329 if (rmap && bp->bnapi[i]->rx_ring) { 10330 rc = irq_cpu_rmap_add(rmap, irq->vector); 10331 if (rc) 10332 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 10333 j); 10334 j++; 10335 } 10336 #endif 10337 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 10338 bp->bnapi[i]); 10339 if (rc) 10340 break; 10341 10342 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); 10343 irq->requested = 1; 10344 10345 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 10346 int numa_node = dev_to_node(&bp->pdev->dev); 10347 10348 irq->have_cpumask = 1; 10349 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 10350 irq->cpu_mask); 10351 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 10352 if (rc) { 10353 netdev_warn(bp->dev, 10354 "Set affinity failed, IRQ = %d\n", 10355 irq->vector); 10356 break; 10357 } 10358 } 10359 } 10360 return rc; 10361 } 10362 10363 static void bnxt_del_napi(struct bnxt *bp) 10364 { 10365 int i; 10366 10367 if (!bp->bnapi) 10368 return; 10369 10370 for (i = 0; i < bp->rx_nr_rings; i++) 10371 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); 10372 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) 10373 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); 10374 10375 for (i = 0; i < bp->cp_nr_rings; i++) { 10376 struct bnxt_napi *bnapi = bp->bnapi[i]; 10377 10378 __netif_napi_del(&bnapi->napi); 10379 } 10380 /* We called __netif_napi_del(), we need 10381 * to respect an RCU grace period before freeing napi structures. 10382 */ 10383 synchronize_net(); 10384 } 10385 10386 static void bnxt_init_napi(struct bnxt *bp) 10387 { 10388 int i; 10389 unsigned int cp_nr_rings = bp->cp_nr_rings; 10390 struct bnxt_napi *bnapi; 10391 10392 if (bp->flags & BNXT_FLAG_USING_MSIX) { 10393 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 10394 10395 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10396 poll_fn = bnxt_poll_p5; 10397 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10398 cp_nr_rings--; 10399 for (i = 0; i < cp_nr_rings; i++) { 10400 bnapi = bp->bnapi[i]; 10401 netif_napi_add(bp->dev, &bnapi->napi, poll_fn); 10402 } 10403 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10404 bnapi = bp->bnapi[cp_nr_rings]; 10405 netif_napi_add(bp->dev, &bnapi->napi, 10406 bnxt_poll_nitroa0); 10407 } 10408 } else { 10409 bnapi = bp->bnapi[0]; 10410 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll); 10411 } 10412 } 10413 10414 static void bnxt_disable_napi(struct bnxt *bp) 10415 { 10416 int i; 10417 10418 if (!bp->bnapi || 10419 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 10420 return; 10421 10422 for (i = 0; i < bp->cp_nr_rings; i++) { 10423 struct bnxt_napi *bnapi = bp->bnapi[i]; 10424 struct bnxt_cp_ring_info *cpr; 10425 10426 cpr = &bnapi->cp_ring; 10427 if (bnapi->tx_fault) 10428 cpr->sw_stats.tx.tx_resets++; 10429 if (bnapi->in_reset) 10430 cpr->sw_stats.rx.rx_resets++; 10431 napi_disable(&bnapi->napi); 10432 if (bnapi->rx_ring) 10433 cancel_work_sync(&cpr->dim.work); 10434 } 10435 } 10436 10437 static void bnxt_enable_napi(struct bnxt *bp) 10438 { 10439 int i; 10440 10441 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 10442 for (i = 0; i < bp->cp_nr_rings; i++) { 10443 struct bnxt_napi *bnapi = bp->bnapi[i]; 10444 struct bnxt_cp_ring_info *cpr; 10445 10446 bnapi->tx_fault = 0; 10447 10448 cpr = &bnapi->cp_ring; 10449 bnapi->in_reset = false; 10450 10451 if (bnapi->rx_ring) { 10452 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 10453 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 10454 } 10455 napi_enable(&bnapi->napi); 10456 } 10457 } 10458 10459 void bnxt_tx_disable(struct bnxt *bp) 10460 { 10461 int i; 10462 struct bnxt_tx_ring_info *txr; 10463 10464 if (bp->tx_ring) { 10465 for (i = 0; i < bp->tx_nr_rings; i++) { 10466 txr = &bp->tx_ring[i]; 10467 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 10468 } 10469 } 10470 /* Make sure napi polls see @dev_state change */ 10471 synchronize_net(); 10472 /* Drop carrier first to prevent TX timeout */ 10473 netif_carrier_off(bp->dev); 10474 /* Stop all TX queues */ 10475 netif_tx_disable(bp->dev); 10476 } 10477 10478 void bnxt_tx_enable(struct bnxt *bp) 10479 { 10480 int i; 10481 struct bnxt_tx_ring_info *txr; 10482 10483 for (i = 0; i < bp->tx_nr_rings; i++) { 10484 txr = &bp->tx_ring[i]; 10485 WRITE_ONCE(txr->dev_state, 0); 10486 } 10487 /* Make sure napi polls see @dev_state change */ 10488 synchronize_net(); 10489 netif_tx_wake_all_queues(bp->dev); 10490 if (BNXT_LINK_IS_UP(bp)) 10491 netif_carrier_on(bp->dev); 10492 } 10493 10494 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 10495 { 10496 u8 active_fec = link_info->active_fec_sig_mode & 10497 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 10498 10499 switch (active_fec) { 10500 default: 10501 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 10502 return "None"; 10503 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 10504 return "Clause 74 BaseR"; 10505 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 10506 return "Clause 91 RS(528,514)"; 10507 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 10508 return "Clause 91 RS544_1XN"; 10509 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 10510 return "Clause 91 RS(544,514)"; 10511 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 10512 return "Clause 91 RS272_1XN"; 10513 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 10514 return "Clause 91 RS(272,257)"; 10515 } 10516 } 10517 10518 void bnxt_report_link(struct bnxt *bp) 10519 { 10520 if (BNXT_LINK_IS_UP(bp)) { 10521 const char *signal = ""; 10522 const char *flow_ctrl; 10523 const char *duplex; 10524 u32 speed; 10525 u16 fec; 10526 10527 netif_carrier_on(bp->dev); 10528 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 10529 if (speed == SPEED_UNKNOWN) { 10530 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 10531 return; 10532 } 10533 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 10534 duplex = "full"; 10535 else 10536 duplex = "half"; 10537 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 10538 flow_ctrl = "ON - receive & transmit"; 10539 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 10540 flow_ctrl = "ON - transmit"; 10541 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 10542 flow_ctrl = "ON - receive"; 10543 else 10544 flow_ctrl = "none"; 10545 if (bp->link_info.phy_qcfg_resp.option_flags & 10546 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 10547 u8 sig_mode = bp->link_info.active_fec_sig_mode & 10548 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 10549 switch (sig_mode) { 10550 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 10551 signal = "(NRZ) "; 10552 break; 10553 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 10554 signal = "(PAM4 56Gbps) "; 10555 break; 10556 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: 10557 signal = "(PAM4 112Gbps) "; 10558 break; 10559 default: 10560 break; 10561 } 10562 } 10563 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 10564 speed, signal, duplex, flow_ctrl); 10565 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 10566 netdev_info(bp->dev, "EEE is %s\n", 10567 bp->eee.eee_active ? "active" : 10568 "not active"); 10569 fec = bp->link_info.fec_cfg; 10570 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 10571 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 10572 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 10573 bnxt_report_fec(&bp->link_info)); 10574 } else { 10575 netif_carrier_off(bp->dev); 10576 netdev_err(bp->dev, "NIC Link is Down\n"); 10577 } 10578 } 10579 10580 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 10581 { 10582 if (!resp->supported_speeds_auto_mode && 10583 !resp->supported_speeds_force_mode && 10584 !resp->supported_pam4_speeds_auto_mode && 10585 !resp->supported_pam4_speeds_force_mode && 10586 !resp->supported_speeds2_auto_mode && 10587 !resp->supported_speeds2_force_mode) 10588 return true; 10589 return false; 10590 } 10591 10592 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 10593 { 10594 struct bnxt_link_info *link_info = &bp->link_info; 10595 struct hwrm_port_phy_qcaps_output *resp; 10596 struct hwrm_port_phy_qcaps_input *req; 10597 int rc = 0; 10598 10599 if (bp->hwrm_spec_code < 0x10201) 10600 return 0; 10601 10602 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 10603 if (rc) 10604 return rc; 10605 10606 resp = hwrm_req_hold(bp, req); 10607 rc = hwrm_req_send(bp, req); 10608 if (rc) 10609 goto hwrm_phy_qcaps_exit; 10610 10611 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 10612 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 10613 struct ethtool_eee *eee = &bp->eee; 10614 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 10615 10616 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10617 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 10618 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 10619 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 10620 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 10621 } 10622 10623 if (bp->hwrm_spec_code >= 0x10a01) { 10624 if (bnxt_phy_qcaps_no_speed(resp)) { 10625 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 10626 netdev_warn(bp->dev, "Ethernet link disabled\n"); 10627 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 10628 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 10629 netdev_info(bp->dev, "Ethernet link enabled\n"); 10630 /* Phy re-enabled, reprobe the speeds */ 10631 link_info->support_auto_speeds = 0; 10632 link_info->support_pam4_auto_speeds = 0; 10633 link_info->support_auto_speeds2 = 0; 10634 } 10635 } 10636 if (resp->supported_speeds_auto_mode) 10637 link_info->support_auto_speeds = 10638 le16_to_cpu(resp->supported_speeds_auto_mode); 10639 if (resp->supported_pam4_speeds_auto_mode) 10640 link_info->support_pam4_auto_speeds = 10641 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 10642 if (resp->supported_speeds2_auto_mode) 10643 link_info->support_auto_speeds2 = 10644 le16_to_cpu(resp->supported_speeds2_auto_mode); 10645 10646 bp->port_count = resp->port_cnt; 10647 10648 hwrm_phy_qcaps_exit: 10649 hwrm_req_drop(bp, req); 10650 return rc; 10651 } 10652 10653 static bool bnxt_support_dropped(u16 advertising, u16 supported) 10654 { 10655 u16 diff = advertising ^ supported; 10656 10657 return ((supported | diff) != supported); 10658 } 10659 10660 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 10661 { 10662 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 10663 10664 /* Check if any advertised speeds are no longer supported. The caller 10665 * holds the link_lock mutex, so we can modify link_info settings. 10666 */ 10667 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 10668 if (bnxt_support_dropped(link_info->advertising, 10669 link_info->support_auto_speeds2)) { 10670 link_info->advertising = link_info->support_auto_speeds2; 10671 return true; 10672 } 10673 return false; 10674 } 10675 if (bnxt_support_dropped(link_info->advertising, 10676 link_info->support_auto_speeds)) { 10677 link_info->advertising = link_info->support_auto_speeds; 10678 return true; 10679 } 10680 if (bnxt_support_dropped(link_info->advertising_pam4, 10681 link_info->support_pam4_auto_speeds)) { 10682 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 10683 return true; 10684 } 10685 return false; 10686 } 10687 10688 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 10689 { 10690 struct bnxt_link_info *link_info = &bp->link_info; 10691 struct hwrm_port_phy_qcfg_output *resp; 10692 struct hwrm_port_phy_qcfg_input *req; 10693 u8 link_state = link_info->link_state; 10694 bool support_changed; 10695 int rc; 10696 10697 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 10698 if (rc) 10699 return rc; 10700 10701 resp = hwrm_req_hold(bp, req); 10702 rc = hwrm_req_send(bp, req); 10703 if (rc) { 10704 hwrm_req_drop(bp, req); 10705 if (BNXT_VF(bp) && rc == -ENODEV) { 10706 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 10707 rc = 0; 10708 } 10709 return rc; 10710 } 10711 10712 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 10713 link_info->phy_link_status = resp->link; 10714 link_info->duplex = resp->duplex_cfg; 10715 if (bp->hwrm_spec_code >= 0x10800) 10716 link_info->duplex = resp->duplex_state; 10717 link_info->pause = resp->pause; 10718 link_info->auto_mode = resp->auto_mode; 10719 link_info->auto_pause_setting = resp->auto_pause; 10720 link_info->lp_pause = resp->link_partner_adv_pause; 10721 link_info->force_pause_setting = resp->force_pause; 10722 link_info->duplex_setting = resp->duplex_cfg; 10723 if (link_info->phy_link_status == BNXT_LINK_LINK) { 10724 link_info->link_speed = le16_to_cpu(resp->link_speed); 10725 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 10726 link_info->active_lanes = resp->active_lanes; 10727 } else { 10728 link_info->link_speed = 0; 10729 link_info->active_lanes = 0; 10730 } 10731 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 10732 link_info->force_pam4_link_speed = 10733 le16_to_cpu(resp->force_pam4_link_speed); 10734 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); 10735 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 10736 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 10737 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); 10738 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 10739 link_info->auto_pam4_link_speeds = 10740 le16_to_cpu(resp->auto_pam4_link_speed_mask); 10741 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); 10742 link_info->lp_auto_link_speeds = 10743 le16_to_cpu(resp->link_partner_adv_speeds); 10744 link_info->lp_auto_pam4_link_speeds = 10745 resp->link_partner_pam4_adv_speeds; 10746 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 10747 link_info->phy_ver[0] = resp->phy_maj; 10748 link_info->phy_ver[1] = resp->phy_min; 10749 link_info->phy_ver[2] = resp->phy_bld; 10750 link_info->media_type = resp->media_type; 10751 link_info->phy_type = resp->phy_type; 10752 link_info->transceiver = resp->xcvr_pkg_type; 10753 link_info->phy_addr = resp->eee_config_phy_addr & 10754 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 10755 link_info->module_status = resp->module_status; 10756 10757 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 10758 struct ethtool_eee *eee = &bp->eee; 10759 u16 fw_speeds; 10760 10761 eee->eee_active = 0; 10762 if (resp->eee_config_phy_addr & 10763 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 10764 eee->eee_active = 1; 10765 fw_speeds = le16_to_cpu( 10766 resp->link_partner_adv_eee_link_speed_mask); 10767 eee->lp_advertised = 10768 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10769 } 10770 10771 /* Pull initial EEE config */ 10772 if (!chng_link_state) { 10773 if (resp->eee_config_phy_addr & 10774 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 10775 eee->eee_enabled = 1; 10776 10777 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 10778 eee->advertised = 10779 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10780 10781 if (resp->eee_config_phy_addr & 10782 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 10783 __le32 tmr; 10784 10785 eee->tx_lpi_enabled = 1; 10786 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 10787 eee->tx_lpi_timer = le32_to_cpu(tmr) & 10788 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 10789 } 10790 } 10791 } 10792 10793 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 10794 if (bp->hwrm_spec_code >= 0x10504) { 10795 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 10796 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 10797 } 10798 /* TODO: need to add more logic to report VF link */ 10799 if (chng_link_state) { 10800 if (link_info->phy_link_status == BNXT_LINK_LINK) 10801 link_info->link_state = BNXT_LINK_STATE_UP; 10802 else 10803 link_info->link_state = BNXT_LINK_STATE_DOWN; 10804 if (link_state != link_info->link_state) 10805 bnxt_report_link(bp); 10806 } else { 10807 /* always link down if not require to update link state */ 10808 link_info->link_state = BNXT_LINK_STATE_DOWN; 10809 } 10810 hwrm_req_drop(bp, req); 10811 10812 if (!BNXT_PHY_CFG_ABLE(bp)) 10813 return 0; 10814 10815 support_changed = bnxt_support_speed_dropped(link_info); 10816 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 10817 bnxt_hwrm_set_link_setting(bp, true, false); 10818 return 0; 10819 } 10820 10821 static void bnxt_get_port_module_status(struct bnxt *bp) 10822 { 10823 struct bnxt_link_info *link_info = &bp->link_info; 10824 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 10825 u8 module_status; 10826 10827 if (bnxt_update_link(bp, true)) 10828 return; 10829 10830 module_status = link_info->module_status; 10831 switch (module_status) { 10832 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 10833 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 10834 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 10835 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 10836 bp->pf.port_id); 10837 if (bp->hwrm_spec_code >= 0x10201) { 10838 netdev_warn(bp->dev, "Module part number %s\n", 10839 resp->phy_vendor_partnumber); 10840 } 10841 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 10842 netdev_warn(bp->dev, "TX is disabled\n"); 10843 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 10844 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 10845 } 10846 } 10847 10848 static void 10849 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 10850 { 10851 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 10852 if (bp->hwrm_spec_code >= 0x10201) 10853 req->auto_pause = 10854 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 10855 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 10856 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 10857 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 10858 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 10859 req->enables |= 10860 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 10861 } else { 10862 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 10863 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 10864 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 10865 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 10866 req->enables |= 10867 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 10868 if (bp->hwrm_spec_code >= 0x10201) { 10869 req->auto_pause = req->force_pause; 10870 req->enables |= cpu_to_le32( 10871 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 10872 } 10873 } 10874 } 10875 10876 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 10877 { 10878 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 10879 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 10880 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 10881 req->enables |= 10882 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); 10883 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); 10884 } else if (bp->link_info.advertising) { 10885 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 10886 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 10887 } 10888 if (bp->link_info.advertising_pam4) { 10889 req->enables |= 10890 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 10891 req->auto_link_pam4_speed_mask = 10892 cpu_to_le16(bp->link_info.advertising_pam4); 10893 } 10894 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 10895 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 10896 } else { 10897 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 10898 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 10899 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); 10900 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); 10901 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", 10902 (u32)bp->link_info.req_link_speed); 10903 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 10904 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 10905 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 10906 } else { 10907 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 10908 } 10909 } 10910 10911 /* tell chimp that the setting takes effect immediately */ 10912 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 10913 } 10914 10915 int bnxt_hwrm_set_pause(struct bnxt *bp) 10916 { 10917 struct hwrm_port_phy_cfg_input *req; 10918 int rc; 10919 10920 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10921 if (rc) 10922 return rc; 10923 10924 bnxt_hwrm_set_pause_common(bp, req); 10925 10926 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 10927 bp->link_info.force_link_chng) 10928 bnxt_hwrm_set_link_common(bp, req); 10929 10930 rc = hwrm_req_send(bp, req); 10931 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 10932 /* since changing of pause setting doesn't trigger any link 10933 * change event, the driver needs to update the current pause 10934 * result upon successfully return of the phy_cfg command 10935 */ 10936 bp->link_info.pause = 10937 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 10938 bp->link_info.auto_pause_setting = 0; 10939 if (!bp->link_info.force_link_chng) 10940 bnxt_report_link(bp); 10941 } 10942 bp->link_info.force_link_chng = false; 10943 return rc; 10944 } 10945 10946 static void bnxt_hwrm_set_eee(struct bnxt *bp, 10947 struct hwrm_port_phy_cfg_input *req) 10948 { 10949 struct ethtool_eee *eee = &bp->eee; 10950 10951 if (eee->eee_enabled) { 10952 u16 eee_speeds; 10953 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 10954 10955 if (eee->tx_lpi_enabled) 10956 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 10957 else 10958 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 10959 10960 req->flags |= cpu_to_le32(flags); 10961 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 10962 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 10963 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 10964 } else { 10965 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 10966 } 10967 } 10968 10969 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 10970 { 10971 struct hwrm_port_phy_cfg_input *req; 10972 int rc; 10973 10974 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10975 if (rc) 10976 return rc; 10977 10978 if (set_pause) 10979 bnxt_hwrm_set_pause_common(bp, req); 10980 10981 bnxt_hwrm_set_link_common(bp, req); 10982 10983 if (set_eee) 10984 bnxt_hwrm_set_eee(bp, req); 10985 return hwrm_req_send(bp, req); 10986 } 10987 10988 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 10989 { 10990 struct hwrm_port_phy_cfg_input *req; 10991 int rc; 10992 10993 if (!BNXT_SINGLE_PF(bp)) 10994 return 0; 10995 10996 if (pci_num_vf(bp->pdev) && 10997 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 10998 return 0; 10999 11000 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 11001 if (rc) 11002 return rc; 11003 11004 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 11005 rc = hwrm_req_send(bp, req); 11006 if (!rc) { 11007 mutex_lock(&bp->link_lock); 11008 /* Device is not obliged link down in certain scenarios, even 11009 * when forced. Setting the state unknown is consistent with 11010 * driver startup and will force link state to be reported 11011 * during subsequent open based on PORT_PHY_QCFG. 11012 */ 11013 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 11014 mutex_unlock(&bp->link_lock); 11015 } 11016 return rc; 11017 } 11018 11019 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 11020 { 11021 #ifdef CONFIG_TEE_BNXT_FW 11022 int rc = tee_bnxt_fw_load(); 11023 11024 if (rc) 11025 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 11026 11027 return rc; 11028 #else 11029 netdev_err(bp->dev, "OP-TEE not supported\n"); 11030 return -ENODEV; 11031 #endif 11032 } 11033 11034 static int bnxt_try_recover_fw(struct bnxt *bp) 11035 { 11036 if (bp->fw_health && bp->fw_health->status_reliable) { 11037 int retry = 0, rc; 11038 u32 sts; 11039 11040 do { 11041 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11042 rc = bnxt_hwrm_poll(bp); 11043 if (!BNXT_FW_IS_BOOTING(sts) && 11044 !BNXT_FW_IS_RECOVERING(sts)) 11045 break; 11046 retry++; 11047 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 11048 11049 if (!BNXT_FW_IS_HEALTHY(sts)) { 11050 netdev_err(bp->dev, 11051 "Firmware not responding, status: 0x%x\n", 11052 sts); 11053 rc = -ENODEV; 11054 } 11055 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 11056 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 11057 return bnxt_fw_reset_via_optee(bp); 11058 } 11059 return rc; 11060 } 11061 11062 return -ENODEV; 11063 } 11064 11065 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 11066 { 11067 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11068 11069 if (!BNXT_NEW_RM(bp)) 11070 return; /* no resource reservations required */ 11071 11072 hw_resc->resv_cp_rings = 0; 11073 hw_resc->resv_stat_ctxs = 0; 11074 hw_resc->resv_irqs = 0; 11075 hw_resc->resv_tx_rings = 0; 11076 hw_resc->resv_rx_rings = 0; 11077 hw_resc->resv_hw_ring_grps = 0; 11078 hw_resc->resv_vnics = 0; 11079 if (!fw_reset) { 11080 bp->tx_nr_rings = 0; 11081 bp->rx_nr_rings = 0; 11082 } 11083 } 11084 11085 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 11086 { 11087 int rc; 11088 11089 if (!BNXT_NEW_RM(bp)) 11090 return 0; /* no resource reservations required */ 11091 11092 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 11093 if (rc) 11094 netdev_err(bp->dev, "resc_qcaps failed\n"); 11095 11096 bnxt_clear_reservations(bp, fw_reset); 11097 11098 return rc; 11099 } 11100 11101 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 11102 { 11103 struct hwrm_func_drv_if_change_output *resp; 11104 struct hwrm_func_drv_if_change_input *req; 11105 bool fw_reset = !bp->irq_tbl; 11106 bool resc_reinit = false; 11107 int rc, retry = 0; 11108 u32 flags = 0; 11109 11110 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 11111 return 0; 11112 11113 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 11114 if (rc) 11115 return rc; 11116 11117 if (up) 11118 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 11119 resp = hwrm_req_hold(bp, req); 11120 11121 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 11122 while (retry < BNXT_FW_IF_RETRY) { 11123 rc = hwrm_req_send(bp, req); 11124 if (rc != -EAGAIN) 11125 break; 11126 11127 msleep(50); 11128 retry++; 11129 } 11130 11131 if (rc == -EAGAIN) { 11132 hwrm_req_drop(bp, req); 11133 return rc; 11134 } else if (!rc) { 11135 flags = le32_to_cpu(resp->flags); 11136 } else if (up) { 11137 rc = bnxt_try_recover_fw(bp); 11138 fw_reset = true; 11139 } 11140 hwrm_req_drop(bp, req); 11141 if (rc) 11142 return rc; 11143 11144 if (!up) { 11145 bnxt_inv_fw_health_reg(bp); 11146 return 0; 11147 } 11148 11149 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 11150 resc_reinit = true; 11151 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 11152 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 11153 fw_reset = true; 11154 else 11155 bnxt_remap_fw_health_regs(bp); 11156 11157 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 11158 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 11159 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11160 return -ENODEV; 11161 } 11162 if (resc_reinit || fw_reset) { 11163 if (fw_reset) { 11164 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11165 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11166 bnxt_ulp_stop(bp); 11167 bnxt_free_ctx_mem(bp); 11168 bnxt_dcb_free(bp); 11169 rc = bnxt_fw_init_one(bp); 11170 if (rc) { 11171 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11172 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11173 return rc; 11174 } 11175 bnxt_clear_int_mode(bp); 11176 rc = bnxt_init_int_mode(bp); 11177 if (rc) { 11178 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11179 netdev_err(bp->dev, "init int mode failed\n"); 11180 return rc; 11181 } 11182 } 11183 rc = bnxt_cancel_reservations(bp, fw_reset); 11184 } 11185 return rc; 11186 } 11187 11188 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 11189 { 11190 struct hwrm_port_led_qcaps_output *resp; 11191 struct hwrm_port_led_qcaps_input *req; 11192 struct bnxt_pf_info *pf = &bp->pf; 11193 int rc; 11194 11195 bp->num_leds = 0; 11196 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 11197 return 0; 11198 11199 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 11200 if (rc) 11201 return rc; 11202 11203 req->port_id = cpu_to_le16(pf->port_id); 11204 resp = hwrm_req_hold(bp, req); 11205 rc = hwrm_req_send(bp, req); 11206 if (rc) { 11207 hwrm_req_drop(bp, req); 11208 return rc; 11209 } 11210 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 11211 int i; 11212 11213 bp->num_leds = resp->num_leds; 11214 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 11215 bp->num_leds); 11216 for (i = 0; i < bp->num_leds; i++) { 11217 struct bnxt_led_info *led = &bp->leds[i]; 11218 __le16 caps = led->led_state_caps; 11219 11220 if (!led->led_group_id || 11221 !BNXT_LED_ALT_BLINK_CAP(caps)) { 11222 bp->num_leds = 0; 11223 break; 11224 } 11225 } 11226 } 11227 hwrm_req_drop(bp, req); 11228 return 0; 11229 } 11230 11231 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 11232 { 11233 struct hwrm_wol_filter_alloc_output *resp; 11234 struct hwrm_wol_filter_alloc_input *req; 11235 int rc; 11236 11237 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 11238 if (rc) 11239 return rc; 11240 11241 req->port_id = cpu_to_le16(bp->pf.port_id); 11242 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 11243 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 11244 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 11245 11246 resp = hwrm_req_hold(bp, req); 11247 rc = hwrm_req_send(bp, req); 11248 if (!rc) 11249 bp->wol_filter_id = resp->wol_filter_id; 11250 hwrm_req_drop(bp, req); 11251 return rc; 11252 } 11253 11254 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 11255 { 11256 struct hwrm_wol_filter_free_input *req; 11257 int rc; 11258 11259 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 11260 if (rc) 11261 return rc; 11262 11263 req->port_id = cpu_to_le16(bp->pf.port_id); 11264 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 11265 req->wol_filter_id = bp->wol_filter_id; 11266 11267 return hwrm_req_send(bp, req); 11268 } 11269 11270 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 11271 { 11272 struct hwrm_wol_filter_qcfg_output *resp; 11273 struct hwrm_wol_filter_qcfg_input *req; 11274 u16 next_handle = 0; 11275 int rc; 11276 11277 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 11278 if (rc) 11279 return rc; 11280 11281 req->port_id = cpu_to_le16(bp->pf.port_id); 11282 req->handle = cpu_to_le16(handle); 11283 resp = hwrm_req_hold(bp, req); 11284 rc = hwrm_req_send(bp, req); 11285 if (!rc) { 11286 next_handle = le16_to_cpu(resp->next_handle); 11287 if (next_handle != 0) { 11288 if (resp->wol_type == 11289 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 11290 bp->wol = 1; 11291 bp->wol_filter_id = resp->wol_filter_id; 11292 } 11293 } 11294 } 11295 hwrm_req_drop(bp, req); 11296 return next_handle; 11297 } 11298 11299 static void bnxt_get_wol_settings(struct bnxt *bp) 11300 { 11301 u16 handle = 0; 11302 11303 bp->wol = 0; 11304 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 11305 return; 11306 11307 do { 11308 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 11309 } while (handle && handle != 0xffff); 11310 } 11311 11312 static bool bnxt_eee_config_ok(struct bnxt *bp) 11313 { 11314 struct ethtool_eee *eee = &bp->eee; 11315 struct bnxt_link_info *link_info = &bp->link_info; 11316 11317 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 11318 return true; 11319 11320 if (eee->eee_enabled) { 11321 u32 advertising = 11322 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 11323 11324 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 11325 eee->eee_enabled = 0; 11326 return false; 11327 } 11328 if (eee->advertised & ~advertising) { 11329 eee->advertised = advertising & eee->supported; 11330 return false; 11331 } 11332 } 11333 return true; 11334 } 11335 11336 static int bnxt_update_phy_setting(struct bnxt *bp) 11337 { 11338 int rc; 11339 bool update_link = false; 11340 bool update_pause = false; 11341 bool update_eee = false; 11342 struct bnxt_link_info *link_info = &bp->link_info; 11343 11344 rc = bnxt_update_link(bp, true); 11345 if (rc) { 11346 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 11347 rc); 11348 return rc; 11349 } 11350 if (!BNXT_SINGLE_PF(bp)) 11351 return 0; 11352 11353 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 11354 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 11355 link_info->req_flow_ctrl) 11356 update_pause = true; 11357 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 11358 link_info->force_pause_setting != link_info->req_flow_ctrl) 11359 update_pause = true; 11360 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 11361 if (BNXT_AUTO_MODE(link_info->auto_mode)) 11362 update_link = true; 11363 if (bnxt_force_speed_updated(link_info)) 11364 update_link = true; 11365 if (link_info->req_duplex != link_info->duplex_setting) 11366 update_link = true; 11367 } else { 11368 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 11369 update_link = true; 11370 if (bnxt_auto_speed_updated(link_info)) 11371 update_link = true; 11372 } 11373 11374 /* The last close may have shutdown the link, so need to call 11375 * PHY_CFG to bring it back up. 11376 */ 11377 if (!BNXT_LINK_IS_UP(bp)) 11378 update_link = true; 11379 11380 if (!bnxt_eee_config_ok(bp)) 11381 update_eee = true; 11382 11383 if (update_link) 11384 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 11385 else if (update_pause) 11386 rc = bnxt_hwrm_set_pause(bp); 11387 if (rc) { 11388 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 11389 rc); 11390 return rc; 11391 } 11392 11393 return rc; 11394 } 11395 11396 /* Common routine to pre-map certain register block to different GRC window. 11397 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 11398 * in PF and 3 windows in VF that can be customized to map in different 11399 * register blocks. 11400 */ 11401 static void bnxt_preset_reg_win(struct bnxt *bp) 11402 { 11403 if (BNXT_PF(bp)) { 11404 /* CAG registers map to GRC window #4 */ 11405 writel(BNXT_CAG_REG_BASE, 11406 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 11407 } 11408 } 11409 11410 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 11411 11412 static int bnxt_reinit_after_abort(struct bnxt *bp) 11413 { 11414 int rc; 11415 11416 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11417 return -EBUSY; 11418 11419 if (bp->dev->reg_state == NETREG_UNREGISTERED) 11420 return -ENODEV; 11421 11422 rc = bnxt_fw_init_one(bp); 11423 if (!rc) { 11424 bnxt_clear_int_mode(bp); 11425 rc = bnxt_init_int_mode(bp); 11426 if (!rc) { 11427 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11428 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11429 } 11430 } 11431 return rc; 11432 } 11433 11434 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11435 { 11436 int rc = 0; 11437 11438 bnxt_preset_reg_win(bp); 11439 netif_carrier_off(bp->dev); 11440 if (irq_re_init) { 11441 /* Reserve rings now if none were reserved at driver probe. */ 11442 rc = bnxt_init_dflt_ring_mode(bp); 11443 if (rc) { 11444 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 11445 return rc; 11446 } 11447 } 11448 rc = bnxt_reserve_rings(bp, irq_re_init); 11449 if (rc) 11450 return rc; 11451 if ((bp->flags & BNXT_FLAG_RFS) && 11452 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 11453 /* disable RFS if falling back to INTA */ 11454 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 11455 bp->flags &= ~BNXT_FLAG_RFS; 11456 } 11457 11458 rc = bnxt_alloc_mem(bp, irq_re_init); 11459 if (rc) { 11460 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 11461 goto open_err_free_mem; 11462 } 11463 11464 if (irq_re_init) { 11465 bnxt_init_napi(bp); 11466 rc = bnxt_request_irq(bp); 11467 if (rc) { 11468 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 11469 goto open_err_irq; 11470 } 11471 } 11472 11473 rc = bnxt_init_nic(bp, irq_re_init); 11474 if (rc) { 11475 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 11476 goto open_err_irq; 11477 } 11478 11479 bnxt_enable_napi(bp); 11480 bnxt_debug_dev_init(bp); 11481 11482 if (link_re_init) { 11483 mutex_lock(&bp->link_lock); 11484 rc = bnxt_update_phy_setting(bp); 11485 mutex_unlock(&bp->link_lock); 11486 if (rc) { 11487 netdev_warn(bp->dev, "failed to update phy settings\n"); 11488 if (BNXT_SINGLE_PF(bp)) { 11489 bp->link_info.phy_retry = true; 11490 bp->link_info.phy_retry_expires = 11491 jiffies + 5 * HZ; 11492 } 11493 } 11494 } 11495 11496 if (irq_re_init) 11497 udp_tunnel_nic_reset_ntf(bp->dev); 11498 11499 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 11500 if (!static_key_enabled(&bnxt_xdp_locking_key)) 11501 static_branch_enable(&bnxt_xdp_locking_key); 11502 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 11503 static_branch_disable(&bnxt_xdp_locking_key); 11504 } 11505 set_bit(BNXT_STATE_OPEN, &bp->state); 11506 bnxt_enable_int(bp); 11507 /* Enable TX queues */ 11508 bnxt_tx_enable(bp); 11509 mod_timer(&bp->timer, jiffies + bp->current_interval); 11510 /* Poll link status and check for SFP+ module status */ 11511 mutex_lock(&bp->link_lock); 11512 bnxt_get_port_module_status(bp); 11513 mutex_unlock(&bp->link_lock); 11514 11515 /* VF-reps may need to be re-opened after the PF is re-opened */ 11516 if (BNXT_PF(bp)) 11517 bnxt_vf_reps_open(bp); 11518 bnxt_ptp_init_rtc(bp, true); 11519 bnxt_ptp_cfg_tstamp_filters(bp); 11520 return 0; 11521 11522 open_err_irq: 11523 bnxt_del_napi(bp); 11524 11525 open_err_free_mem: 11526 bnxt_free_skbs(bp); 11527 bnxt_free_irq(bp); 11528 bnxt_free_mem(bp, true); 11529 return rc; 11530 } 11531 11532 /* rtnl_lock held */ 11533 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11534 { 11535 int rc = 0; 11536 11537 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 11538 rc = -EIO; 11539 if (!rc) 11540 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 11541 if (rc) { 11542 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 11543 dev_close(bp->dev); 11544 } 11545 return rc; 11546 } 11547 11548 /* rtnl_lock held, open the NIC half way by allocating all resources, but 11549 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 11550 * self tests. 11551 */ 11552 int bnxt_half_open_nic(struct bnxt *bp) 11553 { 11554 int rc = 0; 11555 11556 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 11557 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 11558 rc = -ENODEV; 11559 goto half_open_err; 11560 } 11561 11562 rc = bnxt_alloc_mem(bp, true); 11563 if (rc) { 11564 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 11565 goto half_open_err; 11566 } 11567 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11568 rc = bnxt_init_nic(bp, true); 11569 if (rc) { 11570 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11571 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 11572 goto half_open_err; 11573 } 11574 return 0; 11575 11576 half_open_err: 11577 bnxt_free_skbs(bp); 11578 bnxt_free_mem(bp, true); 11579 dev_close(bp->dev); 11580 return rc; 11581 } 11582 11583 /* rtnl_lock held, this call can only be made after a previous successful 11584 * call to bnxt_half_open_nic(). 11585 */ 11586 void bnxt_half_close_nic(struct bnxt *bp) 11587 { 11588 bnxt_hwrm_resource_free(bp, false, true); 11589 bnxt_free_skbs(bp); 11590 bnxt_free_mem(bp, true); 11591 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11592 } 11593 11594 void bnxt_reenable_sriov(struct bnxt *bp) 11595 { 11596 if (BNXT_PF(bp)) { 11597 struct bnxt_pf_info *pf = &bp->pf; 11598 int n = pf->active_vfs; 11599 11600 if (n) 11601 bnxt_cfg_hw_sriov(bp, &n, true); 11602 } 11603 } 11604 11605 static int bnxt_open(struct net_device *dev) 11606 { 11607 struct bnxt *bp = netdev_priv(dev); 11608 int rc; 11609 11610 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 11611 rc = bnxt_reinit_after_abort(bp); 11612 if (rc) { 11613 if (rc == -EBUSY) 11614 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 11615 else 11616 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 11617 return -ENODEV; 11618 } 11619 } 11620 11621 rc = bnxt_hwrm_if_change(bp, true); 11622 if (rc) 11623 return rc; 11624 11625 rc = __bnxt_open_nic(bp, true, true); 11626 if (rc) { 11627 bnxt_hwrm_if_change(bp, false); 11628 } else { 11629 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 11630 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11631 bnxt_ulp_start(bp, 0); 11632 bnxt_reenable_sriov(bp); 11633 } 11634 } 11635 } 11636 11637 return rc; 11638 } 11639 11640 static bool bnxt_drv_busy(struct bnxt *bp) 11641 { 11642 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 11643 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 11644 } 11645 11646 static void bnxt_get_ring_stats(struct bnxt *bp, 11647 struct rtnl_link_stats64 *stats); 11648 11649 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 11650 bool link_re_init) 11651 { 11652 /* Close the VF-reps before closing PF */ 11653 if (BNXT_PF(bp)) 11654 bnxt_vf_reps_close(bp); 11655 11656 /* Change device state to avoid TX queue wake up's */ 11657 bnxt_tx_disable(bp); 11658 11659 clear_bit(BNXT_STATE_OPEN, &bp->state); 11660 smp_mb__after_atomic(); 11661 while (bnxt_drv_busy(bp)) 11662 msleep(20); 11663 11664 /* Flush rings and disable interrupts */ 11665 bnxt_shutdown_nic(bp, irq_re_init); 11666 11667 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 11668 11669 bnxt_debug_dev_exit(bp); 11670 bnxt_disable_napi(bp); 11671 del_timer_sync(&bp->timer); 11672 bnxt_free_skbs(bp); 11673 11674 /* Save ring stats before shutdown */ 11675 if (bp->bnapi && irq_re_init) { 11676 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 11677 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 11678 } 11679 if (irq_re_init) { 11680 bnxt_free_irq(bp); 11681 bnxt_del_napi(bp); 11682 } 11683 bnxt_free_mem(bp, irq_re_init); 11684 } 11685 11686 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11687 { 11688 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11689 /* If we get here, it means firmware reset is in progress 11690 * while we are trying to close. We can safely proceed with 11691 * the close because we are holding rtnl_lock(). Some firmware 11692 * messages may fail as we proceed to close. We set the 11693 * ABORT_ERR flag here so that the FW reset thread will later 11694 * abort when it gets the rtnl_lock() and sees the flag. 11695 */ 11696 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 11697 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11698 } 11699 11700 #ifdef CONFIG_BNXT_SRIOV 11701 if (bp->sriov_cfg) { 11702 int rc; 11703 11704 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 11705 !bp->sriov_cfg, 11706 BNXT_SRIOV_CFG_WAIT_TMO); 11707 if (!rc) 11708 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); 11709 else if (rc < 0) 11710 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); 11711 } 11712 #endif 11713 __bnxt_close_nic(bp, irq_re_init, link_re_init); 11714 } 11715 11716 static int bnxt_close(struct net_device *dev) 11717 { 11718 struct bnxt *bp = netdev_priv(dev); 11719 11720 bnxt_close_nic(bp, true, true); 11721 bnxt_hwrm_shutdown_link(bp); 11722 bnxt_hwrm_if_change(bp, false); 11723 return 0; 11724 } 11725 11726 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 11727 u16 *val) 11728 { 11729 struct hwrm_port_phy_mdio_read_output *resp; 11730 struct hwrm_port_phy_mdio_read_input *req; 11731 int rc; 11732 11733 if (bp->hwrm_spec_code < 0x10a00) 11734 return -EOPNOTSUPP; 11735 11736 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 11737 if (rc) 11738 return rc; 11739 11740 req->port_id = cpu_to_le16(bp->pf.port_id); 11741 req->phy_addr = phy_addr; 11742 req->reg_addr = cpu_to_le16(reg & 0x1f); 11743 if (mdio_phy_id_is_c45(phy_addr)) { 11744 req->cl45_mdio = 1; 11745 req->phy_addr = mdio_phy_id_prtad(phy_addr); 11746 req->dev_addr = mdio_phy_id_devad(phy_addr); 11747 req->reg_addr = cpu_to_le16(reg); 11748 } 11749 11750 resp = hwrm_req_hold(bp, req); 11751 rc = hwrm_req_send(bp, req); 11752 if (!rc) 11753 *val = le16_to_cpu(resp->reg_data); 11754 hwrm_req_drop(bp, req); 11755 return rc; 11756 } 11757 11758 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 11759 u16 val) 11760 { 11761 struct hwrm_port_phy_mdio_write_input *req; 11762 int rc; 11763 11764 if (bp->hwrm_spec_code < 0x10a00) 11765 return -EOPNOTSUPP; 11766 11767 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 11768 if (rc) 11769 return rc; 11770 11771 req->port_id = cpu_to_le16(bp->pf.port_id); 11772 req->phy_addr = phy_addr; 11773 req->reg_addr = cpu_to_le16(reg & 0x1f); 11774 if (mdio_phy_id_is_c45(phy_addr)) { 11775 req->cl45_mdio = 1; 11776 req->phy_addr = mdio_phy_id_prtad(phy_addr); 11777 req->dev_addr = mdio_phy_id_devad(phy_addr); 11778 req->reg_addr = cpu_to_le16(reg); 11779 } 11780 req->reg_data = cpu_to_le16(val); 11781 11782 return hwrm_req_send(bp, req); 11783 } 11784 11785 /* rtnl_lock held */ 11786 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 11787 { 11788 struct mii_ioctl_data *mdio = if_mii(ifr); 11789 struct bnxt *bp = netdev_priv(dev); 11790 int rc; 11791 11792 switch (cmd) { 11793 case SIOCGMIIPHY: 11794 mdio->phy_id = bp->link_info.phy_addr; 11795 11796 fallthrough; 11797 case SIOCGMIIREG: { 11798 u16 mii_regval = 0; 11799 11800 if (!netif_running(dev)) 11801 return -EAGAIN; 11802 11803 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 11804 &mii_regval); 11805 mdio->val_out = mii_regval; 11806 return rc; 11807 } 11808 11809 case SIOCSMIIREG: 11810 if (!netif_running(dev)) 11811 return -EAGAIN; 11812 11813 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 11814 mdio->val_in); 11815 11816 case SIOCSHWTSTAMP: 11817 return bnxt_hwtstamp_set(dev, ifr); 11818 11819 case SIOCGHWTSTAMP: 11820 return bnxt_hwtstamp_get(dev, ifr); 11821 11822 default: 11823 /* do nothing */ 11824 break; 11825 } 11826 return -EOPNOTSUPP; 11827 } 11828 11829 static void bnxt_get_ring_stats(struct bnxt *bp, 11830 struct rtnl_link_stats64 *stats) 11831 { 11832 int i; 11833 11834 for (i = 0; i < bp->cp_nr_rings; i++) { 11835 struct bnxt_napi *bnapi = bp->bnapi[i]; 11836 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 11837 u64 *sw = cpr->stats.sw_stats; 11838 11839 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 11840 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 11841 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 11842 11843 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 11844 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 11845 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 11846 11847 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 11848 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 11849 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 11850 11851 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 11852 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 11853 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 11854 11855 stats->rx_missed_errors += 11856 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 11857 11858 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 11859 11860 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 11861 11862 stats->rx_dropped += 11863 cpr->sw_stats.rx.rx_netpoll_discards + 11864 cpr->sw_stats.rx.rx_oom_discards; 11865 } 11866 } 11867 11868 static void bnxt_add_prev_stats(struct bnxt *bp, 11869 struct rtnl_link_stats64 *stats) 11870 { 11871 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 11872 11873 stats->rx_packets += prev_stats->rx_packets; 11874 stats->tx_packets += prev_stats->tx_packets; 11875 stats->rx_bytes += prev_stats->rx_bytes; 11876 stats->tx_bytes += prev_stats->tx_bytes; 11877 stats->rx_missed_errors += prev_stats->rx_missed_errors; 11878 stats->multicast += prev_stats->multicast; 11879 stats->rx_dropped += prev_stats->rx_dropped; 11880 stats->tx_dropped += prev_stats->tx_dropped; 11881 } 11882 11883 static void 11884 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 11885 { 11886 struct bnxt *bp = netdev_priv(dev); 11887 11888 set_bit(BNXT_STATE_READ_STATS, &bp->state); 11889 /* Make sure bnxt_close_nic() sees that we are reading stats before 11890 * we check the BNXT_STATE_OPEN flag. 11891 */ 11892 smp_mb__after_atomic(); 11893 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11894 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 11895 *stats = bp->net_stats_prev; 11896 return; 11897 } 11898 11899 bnxt_get_ring_stats(bp, stats); 11900 bnxt_add_prev_stats(bp, stats); 11901 11902 if (bp->flags & BNXT_FLAG_PORT_STATS) { 11903 u64 *rx = bp->port_stats.sw_stats; 11904 u64 *tx = bp->port_stats.sw_stats + 11905 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 11906 11907 stats->rx_crc_errors = 11908 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 11909 stats->rx_frame_errors = 11910 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 11911 stats->rx_length_errors = 11912 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 11913 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 11914 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 11915 stats->rx_errors = 11916 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 11917 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 11918 stats->collisions = 11919 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 11920 stats->tx_fifo_errors = 11921 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 11922 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 11923 } 11924 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 11925 } 11926 11927 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 11928 struct bnxt_total_ring_err_stats *stats, 11929 struct bnxt_cp_ring_info *cpr) 11930 { 11931 struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; 11932 u64 *hw_stats = cpr->stats.sw_stats; 11933 11934 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 11935 stats->rx_total_resets += sw_stats->rx.rx_resets; 11936 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 11937 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 11938 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 11939 stats->rx_total_ring_discards += 11940 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 11941 stats->tx_total_resets += sw_stats->tx.tx_resets; 11942 stats->tx_total_ring_discards += 11943 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 11944 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 11945 } 11946 11947 void bnxt_get_ring_err_stats(struct bnxt *bp, 11948 struct bnxt_total_ring_err_stats *stats) 11949 { 11950 int i; 11951 11952 for (i = 0; i < bp->cp_nr_rings; i++) 11953 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 11954 } 11955 11956 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 11957 { 11958 struct net_device *dev = bp->dev; 11959 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11960 struct netdev_hw_addr *ha; 11961 u8 *haddr; 11962 int mc_count = 0; 11963 bool update = false; 11964 int off = 0; 11965 11966 netdev_for_each_mc_addr(ha, dev) { 11967 if (mc_count >= BNXT_MAX_MC_ADDRS) { 11968 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11969 vnic->mc_list_count = 0; 11970 return false; 11971 } 11972 haddr = ha->addr; 11973 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 11974 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 11975 update = true; 11976 } 11977 off += ETH_ALEN; 11978 mc_count++; 11979 } 11980 if (mc_count) 11981 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 11982 11983 if (mc_count != vnic->mc_list_count) { 11984 vnic->mc_list_count = mc_count; 11985 update = true; 11986 } 11987 return update; 11988 } 11989 11990 static bool bnxt_uc_list_updated(struct bnxt *bp) 11991 { 11992 struct net_device *dev = bp->dev; 11993 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11994 struct netdev_hw_addr *ha; 11995 int off = 0; 11996 11997 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 11998 return true; 11999 12000 netdev_for_each_uc_addr(ha, dev) { 12001 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 12002 return true; 12003 12004 off += ETH_ALEN; 12005 } 12006 return false; 12007 } 12008 12009 static void bnxt_set_rx_mode(struct net_device *dev) 12010 { 12011 struct bnxt *bp = netdev_priv(dev); 12012 struct bnxt_vnic_info *vnic; 12013 bool mc_update = false; 12014 bool uc_update; 12015 u32 mask; 12016 12017 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 12018 return; 12019 12020 vnic = &bp->vnic_info[0]; 12021 mask = vnic->rx_mask; 12022 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 12023 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 12024 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 12025 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 12026 12027 if (dev->flags & IFF_PROMISC) 12028 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12029 12030 uc_update = bnxt_uc_list_updated(bp); 12031 12032 if (dev->flags & IFF_BROADCAST) 12033 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 12034 if (dev->flags & IFF_ALLMULTI) { 12035 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12036 vnic->mc_list_count = 0; 12037 } else if (dev->flags & IFF_MULTICAST) { 12038 mc_update = bnxt_mc_list_updated(bp, &mask); 12039 } 12040 12041 if (mask != vnic->rx_mask || uc_update || mc_update) { 12042 vnic->rx_mask = mask; 12043 12044 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 12045 } 12046 } 12047 12048 static int bnxt_cfg_rx_mode(struct bnxt *bp) 12049 { 12050 struct net_device *dev = bp->dev; 12051 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 12052 struct netdev_hw_addr *ha; 12053 int i, off = 0, rc; 12054 bool uc_update; 12055 12056 netif_addr_lock_bh(dev); 12057 uc_update = bnxt_uc_list_updated(bp); 12058 netif_addr_unlock_bh(dev); 12059 12060 if (!uc_update) 12061 goto skip_uc; 12062 12063 for (i = 1; i < vnic->uc_filter_count; i++) { 12064 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; 12065 12066 bnxt_hwrm_l2_filter_free(bp, fltr); 12067 bnxt_del_l2_filter(bp, fltr); 12068 } 12069 12070 vnic->uc_filter_count = 1; 12071 12072 netif_addr_lock_bh(dev); 12073 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 12074 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12075 } else { 12076 netdev_for_each_uc_addr(ha, dev) { 12077 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 12078 off += ETH_ALEN; 12079 vnic->uc_filter_count++; 12080 } 12081 } 12082 netif_addr_unlock_bh(dev); 12083 12084 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 12085 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 12086 if (rc) { 12087 if (BNXT_VF(bp) && rc == -ENODEV) { 12088 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12089 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 12090 else 12091 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 12092 rc = 0; 12093 } else { 12094 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 12095 } 12096 vnic->uc_filter_count = i; 12097 return rc; 12098 } 12099 } 12100 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12101 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 12102 12103 skip_uc: 12104 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 12105 !bnxt_promisc_ok(bp)) 12106 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 12107 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 12108 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 12109 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 12110 rc); 12111 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 12112 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 12113 vnic->mc_list_count = 0; 12114 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 12115 } 12116 if (rc) 12117 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 12118 rc); 12119 12120 return rc; 12121 } 12122 12123 static bool bnxt_can_reserve_rings(struct bnxt *bp) 12124 { 12125 #ifdef CONFIG_BNXT_SRIOV 12126 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 12127 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12128 12129 /* No minimum rings were provisioned by the PF. Don't 12130 * reserve rings by default when device is down. 12131 */ 12132 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 12133 return true; 12134 12135 if (!netif_running(bp->dev)) 12136 return false; 12137 } 12138 #endif 12139 return true; 12140 } 12141 12142 /* If the chip and firmware supports RFS */ 12143 static bool bnxt_rfs_supported(struct bnxt *bp) 12144 { 12145 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 12146 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 12147 return true; 12148 return false; 12149 } 12150 /* 212 firmware is broken for aRFS */ 12151 if (BNXT_FW_MAJ(bp) == 212) 12152 return false; 12153 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 12154 return true; 12155 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 12156 return true; 12157 return false; 12158 } 12159 12160 /* If runtime conditions support RFS */ 12161 static bool bnxt_rfs_capable(struct bnxt *bp) 12162 { 12163 int vnics, max_vnics, max_rss_ctxs; 12164 12165 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 12166 return bnxt_rfs_supported(bp); 12167 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 12168 return false; 12169 12170 vnics = 1 + bp->rx_nr_rings; 12171 max_vnics = bnxt_get_max_func_vnics(bp); 12172 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 12173 12174 /* RSS contexts not a limiting factor */ 12175 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 12176 max_rss_ctxs = max_vnics; 12177 if (vnics > max_vnics || vnics > max_rss_ctxs) { 12178 if (bp->rx_nr_rings > 1) 12179 netdev_warn(bp->dev, 12180 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 12181 min(max_rss_ctxs - 1, max_vnics - 1)); 12182 return false; 12183 } 12184 12185 if (!BNXT_NEW_RM(bp)) 12186 return true; 12187 12188 if (vnics == bp->hw_resc.resv_vnics) 12189 return true; 12190 12191 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 12192 if (vnics <= bp->hw_resc.resv_vnics) 12193 return true; 12194 12195 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 12196 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 12197 return false; 12198 } 12199 12200 static netdev_features_t bnxt_fix_features(struct net_device *dev, 12201 netdev_features_t features) 12202 { 12203 struct bnxt *bp = netdev_priv(dev); 12204 netdev_features_t vlan_features; 12205 12206 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 12207 features &= ~NETIF_F_NTUPLE; 12208 12209 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 12210 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12211 12212 if (!(features & NETIF_F_GRO)) 12213 features &= ~NETIF_F_GRO_HW; 12214 12215 if (features & NETIF_F_GRO_HW) 12216 features &= ~NETIF_F_LRO; 12217 12218 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 12219 * turned on or off together. 12220 */ 12221 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 12222 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 12223 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 12224 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 12225 else if (vlan_features) 12226 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 12227 } 12228 #ifdef CONFIG_BNXT_SRIOV 12229 if (BNXT_VF(bp) && bp->vf.vlan) 12230 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 12231 #endif 12232 return features; 12233 } 12234 12235 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 12236 { 12237 struct bnxt *bp = netdev_priv(dev); 12238 u32 flags = bp->flags; 12239 u32 changes; 12240 int rc = 0; 12241 bool re_init = false; 12242 bool update_tpa = false; 12243 12244 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 12245 if (features & NETIF_F_GRO_HW) 12246 flags |= BNXT_FLAG_GRO; 12247 else if (features & NETIF_F_LRO) 12248 flags |= BNXT_FLAG_LRO; 12249 12250 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 12251 flags &= ~BNXT_FLAG_TPA; 12252 12253 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 12254 flags |= BNXT_FLAG_STRIP_VLAN; 12255 12256 if (features & NETIF_F_NTUPLE) 12257 flags |= BNXT_FLAG_RFS; 12258 12259 changes = flags ^ bp->flags; 12260 if (changes & BNXT_FLAG_TPA) { 12261 update_tpa = true; 12262 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 12263 (flags & BNXT_FLAG_TPA) == 0 || 12264 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 12265 re_init = true; 12266 } 12267 12268 if (changes & ~BNXT_FLAG_TPA) 12269 re_init = true; 12270 12271 if (flags != bp->flags) { 12272 u32 old_flags = bp->flags; 12273 12274 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12275 bp->flags = flags; 12276 if (update_tpa) 12277 bnxt_set_ring_params(bp); 12278 return rc; 12279 } 12280 12281 if (re_init) { 12282 bnxt_close_nic(bp, false, false); 12283 bp->flags = flags; 12284 if (update_tpa) 12285 bnxt_set_ring_params(bp); 12286 12287 return bnxt_open_nic(bp, false, false); 12288 } 12289 if (update_tpa) { 12290 bp->flags = flags; 12291 rc = bnxt_set_tpa(bp, 12292 (flags & BNXT_FLAG_TPA) ? 12293 true : false); 12294 if (rc) 12295 bp->flags = old_flags; 12296 } 12297 } 12298 return rc; 12299 } 12300 12301 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 12302 u8 **nextp) 12303 { 12304 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 12305 struct hop_jumbo_hdr *jhdr; 12306 int hdr_count = 0; 12307 u8 *nexthdr; 12308 int start; 12309 12310 /* Check that there are at most 2 IPv6 extension headers, no 12311 * fragment header, and each is <= 64 bytes. 12312 */ 12313 start = nw_off + sizeof(*ip6h); 12314 nexthdr = &ip6h->nexthdr; 12315 while (ipv6_ext_hdr(*nexthdr)) { 12316 struct ipv6_opt_hdr *hp; 12317 int hdrlen; 12318 12319 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 12320 *nexthdr == NEXTHDR_FRAGMENT) 12321 return false; 12322 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 12323 skb_headlen(skb), NULL); 12324 if (!hp) 12325 return false; 12326 if (*nexthdr == NEXTHDR_AUTH) 12327 hdrlen = ipv6_authlen(hp); 12328 else 12329 hdrlen = ipv6_optlen(hp); 12330 12331 if (hdrlen > 64) 12332 return false; 12333 12334 /* The ext header may be a hop-by-hop header inserted for 12335 * big TCP purposes. This will be removed before sending 12336 * from NIC, so do not count it. 12337 */ 12338 if (*nexthdr == NEXTHDR_HOP) { 12339 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 12340 goto increment_hdr; 12341 12342 jhdr = (struct hop_jumbo_hdr *)hp; 12343 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 12344 jhdr->nexthdr != IPPROTO_TCP) 12345 goto increment_hdr; 12346 12347 goto next_hdr; 12348 } 12349 increment_hdr: 12350 hdr_count++; 12351 next_hdr: 12352 nexthdr = &hp->nexthdr; 12353 start += hdrlen; 12354 } 12355 if (nextp) { 12356 /* Caller will check inner protocol */ 12357 if (skb->encapsulation) { 12358 *nextp = nexthdr; 12359 return true; 12360 } 12361 *nextp = NULL; 12362 } 12363 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 12364 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 12365 } 12366 12367 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 12368 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 12369 { 12370 struct udphdr *uh = udp_hdr(skb); 12371 __be16 udp_port = uh->dest; 12372 12373 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && 12374 udp_port != bp->vxlan_gpe_port) 12375 return false; 12376 if (skb->inner_protocol == htons(ETH_P_TEB)) { 12377 struct ethhdr *eh = inner_eth_hdr(skb); 12378 12379 switch (eh->h_proto) { 12380 case htons(ETH_P_IP): 12381 return true; 12382 case htons(ETH_P_IPV6): 12383 return bnxt_exthdr_check(bp, skb, 12384 skb_inner_network_offset(skb), 12385 NULL); 12386 } 12387 } else if (skb->inner_protocol == htons(ETH_P_IP)) { 12388 return true; 12389 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { 12390 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 12391 NULL); 12392 } 12393 return false; 12394 } 12395 12396 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 12397 { 12398 switch (l4_proto) { 12399 case IPPROTO_UDP: 12400 return bnxt_udp_tunl_check(bp, skb); 12401 case IPPROTO_IPIP: 12402 return true; 12403 case IPPROTO_GRE: { 12404 switch (skb->inner_protocol) { 12405 default: 12406 return false; 12407 case htons(ETH_P_IP): 12408 return true; 12409 case htons(ETH_P_IPV6): 12410 fallthrough; 12411 } 12412 } 12413 case IPPROTO_IPV6: 12414 /* Check ext headers of inner ipv6 */ 12415 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 12416 NULL); 12417 } 12418 return false; 12419 } 12420 12421 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 12422 struct net_device *dev, 12423 netdev_features_t features) 12424 { 12425 struct bnxt *bp = netdev_priv(dev); 12426 u8 *l4_proto; 12427 12428 features = vlan_features_check(skb, features); 12429 switch (vlan_get_protocol(skb)) { 12430 case htons(ETH_P_IP): 12431 if (!skb->encapsulation) 12432 return features; 12433 l4_proto = &ip_hdr(skb)->protocol; 12434 if (bnxt_tunl_check(bp, skb, *l4_proto)) 12435 return features; 12436 break; 12437 case htons(ETH_P_IPV6): 12438 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 12439 &l4_proto)) 12440 break; 12441 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 12442 return features; 12443 break; 12444 } 12445 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 12446 } 12447 12448 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 12449 u32 *reg_buf) 12450 { 12451 struct hwrm_dbg_read_direct_output *resp; 12452 struct hwrm_dbg_read_direct_input *req; 12453 __le32 *dbg_reg_buf; 12454 dma_addr_t mapping; 12455 int rc, i; 12456 12457 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 12458 if (rc) 12459 return rc; 12460 12461 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 12462 &mapping); 12463 if (!dbg_reg_buf) { 12464 rc = -ENOMEM; 12465 goto dbg_rd_reg_exit; 12466 } 12467 12468 req->host_dest_addr = cpu_to_le64(mapping); 12469 12470 resp = hwrm_req_hold(bp, req); 12471 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 12472 req->read_len32 = cpu_to_le32(num_words); 12473 12474 rc = hwrm_req_send(bp, req); 12475 if (rc || resp->error_code) { 12476 rc = -EIO; 12477 goto dbg_rd_reg_exit; 12478 } 12479 for (i = 0; i < num_words; i++) 12480 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 12481 12482 dbg_rd_reg_exit: 12483 hwrm_req_drop(bp, req); 12484 return rc; 12485 } 12486 12487 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 12488 u32 ring_id, u32 *prod, u32 *cons) 12489 { 12490 struct hwrm_dbg_ring_info_get_output *resp; 12491 struct hwrm_dbg_ring_info_get_input *req; 12492 int rc; 12493 12494 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 12495 if (rc) 12496 return rc; 12497 12498 req->ring_type = ring_type; 12499 req->fw_ring_id = cpu_to_le32(ring_id); 12500 resp = hwrm_req_hold(bp, req); 12501 rc = hwrm_req_send(bp, req); 12502 if (!rc) { 12503 *prod = le32_to_cpu(resp->producer_index); 12504 *cons = le32_to_cpu(resp->consumer_index); 12505 } 12506 hwrm_req_drop(bp, req); 12507 return rc; 12508 } 12509 12510 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 12511 { 12512 struct bnxt_tx_ring_info *txr; 12513 int i = bnapi->index, j; 12514 12515 bnxt_for_each_napi_tx(j, bnapi, txr) 12516 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 12517 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 12518 txr->tx_cons); 12519 } 12520 12521 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 12522 { 12523 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 12524 int i = bnapi->index; 12525 12526 if (!rxr) 12527 return; 12528 12529 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 12530 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 12531 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 12532 rxr->rx_sw_agg_prod); 12533 } 12534 12535 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 12536 { 12537 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 12538 int i = bnapi->index; 12539 12540 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 12541 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 12542 } 12543 12544 static void bnxt_dbg_dump_states(struct bnxt *bp) 12545 { 12546 int i; 12547 struct bnxt_napi *bnapi; 12548 12549 for (i = 0; i < bp->cp_nr_rings; i++) { 12550 bnapi = bp->bnapi[i]; 12551 if (netif_msg_drv(bp)) { 12552 bnxt_dump_tx_sw_state(bnapi); 12553 bnxt_dump_rx_sw_state(bnapi); 12554 bnxt_dump_cp_sw_state(bnapi); 12555 } 12556 } 12557 } 12558 12559 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 12560 { 12561 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 12562 struct hwrm_ring_reset_input *req; 12563 struct bnxt_napi *bnapi = rxr->bnapi; 12564 struct bnxt_cp_ring_info *cpr; 12565 u16 cp_ring_id; 12566 int rc; 12567 12568 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 12569 if (rc) 12570 return rc; 12571 12572 cpr = &bnapi->cp_ring; 12573 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 12574 req->cmpl_ring = cpu_to_le16(cp_ring_id); 12575 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 12576 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 12577 return hwrm_req_send_silent(bp, req); 12578 } 12579 12580 static void bnxt_reset_task(struct bnxt *bp, bool silent) 12581 { 12582 if (!silent) 12583 bnxt_dbg_dump_states(bp); 12584 if (netif_running(bp->dev)) { 12585 int rc; 12586 12587 if (silent) { 12588 bnxt_close_nic(bp, false, false); 12589 bnxt_open_nic(bp, false, false); 12590 } else { 12591 bnxt_ulp_stop(bp); 12592 bnxt_close_nic(bp, true, false); 12593 rc = bnxt_open_nic(bp, true, false); 12594 bnxt_ulp_start(bp, rc); 12595 } 12596 } 12597 } 12598 12599 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 12600 { 12601 struct bnxt *bp = netdev_priv(dev); 12602 12603 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 12604 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 12605 } 12606 12607 static void bnxt_fw_health_check(struct bnxt *bp) 12608 { 12609 struct bnxt_fw_health *fw_health = bp->fw_health; 12610 struct pci_dev *pdev = bp->pdev; 12611 u32 val; 12612 12613 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12614 return; 12615 12616 /* Make sure it is enabled before checking the tmr_counter. */ 12617 smp_rmb(); 12618 if (fw_health->tmr_counter) { 12619 fw_health->tmr_counter--; 12620 return; 12621 } 12622 12623 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 12624 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 12625 fw_health->arrests++; 12626 goto fw_reset; 12627 } 12628 12629 fw_health->last_fw_heartbeat = val; 12630 12631 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12632 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 12633 fw_health->discoveries++; 12634 goto fw_reset; 12635 } 12636 12637 fw_health->tmr_counter = fw_health->tmr_multiplier; 12638 return; 12639 12640 fw_reset: 12641 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 12642 } 12643 12644 static void bnxt_timer(struct timer_list *t) 12645 { 12646 struct bnxt *bp = from_timer(bp, t, timer); 12647 struct net_device *dev = bp->dev; 12648 12649 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 12650 return; 12651 12652 if (atomic_read(&bp->intr_sem) != 0) 12653 goto bnxt_restart_timer; 12654 12655 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 12656 bnxt_fw_health_check(bp); 12657 12658 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 12659 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 12660 12661 if (bnxt_tc_flower_enabled(bp)) 12662 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 12663 12664 #ifdef CONFIG_RFS_ACCEL 12665 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 12666 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 12667 #endif /*CONFIG_RFS_ACCEL*/ 12668 12669 if (bp->link_info.phy_retry) { 12670 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 12671 bp->link_info.phy_retry = false; 12672 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 12673 } else { 12674 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 12675 } 12676 } 12677 12678 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12679 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 12680 12681 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 12682 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 12683 12684 bnxt_restart_timer: 12685 mod_timer(&bp->timer, jiffies + bp->current_interval); 12686 } 12687 12688 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 12689 { 12690 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 12691 * set. If the device is being closed, bnxt_close() may be holding 12692 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 12693 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 12694 */ 12695 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12696 rtnl_lock(); 12697 } 12698 12699 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 12700 { 12701 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12702 rtnl_unlock(); 12703 } 12704 12705 /* Only called from bnxt_sp_task() */ 12706 static void bnxt_reset(struct bnxt *bp, bool silent) 12707 { 12708 bnxt_rtnl_lock_sp(bp); 12709 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 12710 bnxt_reset_task(bp, silent); 12711 bnxt_rtnl_unlock_sp(bp); 12712 } 12713 12714 /* Only called from bnxt_sp_task() */ 12715 static void bnxt_rx_ring_reset(struct bnxt *bp) 12716 { 12717 int i; 12718 12719 bnxt_rtnl_lock_sp(bp); 12720 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12721 bnxt_rtnl_unlock_sp(bp); 12722 return; 12723 } 12724 /* Disable and flush TPA before resetting the RX ring */ 12725 if (bp->flags & BNXT_FLAG_TPA) 12726 bnxt_set_tpa(bp, false); 12727 for (i = 0; i < bp->rx_nr_rings; i++) { 12728 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 12729 struct bnxt_cp_ring_info *cpr; 12730 int rc; 12731 12732 if (!rxr->bnapi->in_reset) 12733 continue; 12734 12735 rc = bnxt_hwrm_rx_ring_reset(bp, i); 12736 if (rc) { 12737 if (rc == -EINVAL || rc == -EOPNOTSUPP) 12738 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 12739 else 12740 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 12741 rc); 12742 bnxt_reset_task(bp, true); 12743 break; 12744 } 12745 bnxt_free_one_rx_ring_skbs(bp, i); 12746 rxr->rx_prod = 0; 12747 rxr->rx_agg_prod = 0; 12748 rxr->rx_sw_agg_prod = 0; 12749 rxr->rx_next_cons = 0; 12750 rxr->bnapi->in_reset = false; 12751 bnxt_alloc_one_rx_ring(bp, i); 12752 cpr = &rxr->bnapi->cp_ring; 12753 cpr->sw_stats.rx.rx_resets++; 12754 if (bp->flags & BNXT_FLAG_AGG_RINGS) 12755 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 12756 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 12757 } 12758 if (bp->flags & BNXT_FLAG_TPA) 12759 bnxt_set_tpa(bp, true); 12760 bnxt_rtnl_unlock_sp(bp); 12761 } 12762 12763 static void bnxt_fw_reset_close(struct bnxt *bp) 12764 { 12765 bnxt_ulp_stop(bp); 12766 /* When firmware is in fatal state, quiesce device and disable 12767 * bus master to prevent any potential bad DMAs before freeing 12768 * kernel memory. 12769 */ 12770 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 12771 u16 val = 0; 12772 12773 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 12774 if (val == 0xffff) 12775 bp->fw_reset_min_dsecs = 0; 12776 bnxt_tx_disable(bp); 12777 bnxt_disable_napi(bp); 12778 bnxt_disable_int_sync(bp); 12779 bnxt_free_irq(bp); 12780 bnxt_clear_int_mode(bp); 12781 pci_disable_device(bp->pdev); 12782 } 12783 __bnxt_close_nic(bp, true, false); 12784 bnxt_vf_reps_free(bp); 12785 bnxt_clear_int_mode(bp); 12786 bnxt_hwrm_func_drv_unrgtr(bp); 12787 if (pci_is_enabled(bp->pdev)) 12788 pci_disable_device(bp->pdev); 12789 bnxt_free_ctx_mem(bp); 12790 } 12791 12792 static bool is_bnxt_fw_ok(struct bnxt *bp) 12793 { 12794 struct bnxt_fw_health *fw_health = bp->fw_health; 12795 bool no_heartbeat = false, has_reset = false; 12796 u32 val; 12797 12798 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 12799 if (val == fw_health->last_fw_heartbeat) 12800 no_heartbeat = true; 12801 12802 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12803 if (val != fw_health->last_fw_reset_cnt) 12804 has_reset = true; 12805 12806 if (!no_heartbeat && has_reset) 12807 return true; 12808 12809 return false; 12810 } 12811 12812 /* rtnl_lock is acquired before calling this function */ 12813 static void bnxt_force_fw_reset(struct bnxt *bp) 12814 { 12815 struct bnxt_fw_health *fw_health = bp->fw_health; 12816 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 12817 u32 wait_dsecs; 12818 12819 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 12820 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12821 return; 12822 12823 if (ptp) { 12824 spin_lock_bh(&ptp->ptp_lock); 12825 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12826 spin_unlock_bh(&ptp->ptp_lock); 12827 } else { 12828 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12829 } 12830 bnxt_fw_reset_close(bp); 12831 wait_dsecs = fw_health->master_func_wait_dsecs; 12832 if (fw_health->primary) { 12833 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 12834 wait_dsecs = 0; 12835 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 12836 } else { 12837 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 12838 wait_dsecs = fw_health->normal_func_wait_dsecs; 12839 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12840 } 12841 12842 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 12843 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 12844 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 12845 } 12846 12847 void bnxt_fw_exception(struct bnxt *bp) 12848 { 12849 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 12850 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 12851 bnxt_rtnl_lock_sp(bp); 12852 bnxt_force_fw_reset(bp); 12853 bnxt_rtnl_unlock_sp(bp); 12854 } 12855 12856 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 12857 * < 0 on error. 12858 */ 12859 static int bnxt_get_registered_vfs(struct bnxt *bp) 12860 { 12861 #ifdef CONFIG_BNXT_SRIOV 12862 int rc; 12863 12864 if (!BNXT_PF(bp)) 12865 return 0; 12866 12867 rc = bnxt_hwrm_func_qcfg(bp); 12868 if (rc) { 12869 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 12870 return rc; 12871 } 12872 if (bp->pf.registered_vfs) 12873 return bp->pf.registered_vfs; 12874 if (bp->sriov_cfg) 12875 return 1; 12876 #endif 12877 return 0; 12878 } 12879 12880 void bnxt_fw_reset(struct bnxt *bp) 12881 { 12882 bnxt_rtnl_lock_sp(bp); 12883 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 12884 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12885 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 12886 int n = 0, tmo; 12887 12888 if (ptp) { 12889 spin_lock_bh(&ptp->ptp_lock); 12890 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12891 spin_unlock_bh(&ptp->ptp_lock); 12892 } else { 12893 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12894 } 12895 if (bp->pf.active_vfs && 12896 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 12897 n = bnxt_get_registered_vfs(bp); 12898 if (n < 0) { 12899 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 12900 n); 12901 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12902 dev_close(bp->dev); 12903 goto fw_reset_exit; 12904 } else if (n > 0) { 12905 u16 vf_tmo_dsecs = n * 10; 12906 12907 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 12908 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 12909 bp->fw_reset_state = 12910 BNXT_FW_RESET_STATE_POLL_VF; 12911 bnxt_queue_fw_reset_work(bp, HZ / 10); 12912 goto fw_reset_exit; 12913 } 12914 bnxt_fw_reset_close(bp); 12915 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 12916 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 12917 tmo = HZ / 10; 12918 } else { 12919 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12920 tmo = bp->fw_reset_min_dsecs * HZ / 10; 12921 } 12922 bnxt_queue_fw_reset_work(bp, tmo); 12923 } 12924 fw_reset_exit: 12925 bnxt_rtnl_unlock_sp(bp); 12926 } 12927 12928 static void bnxt_chk_missed_irq(struct bnxt *bp) 12929 { 12930 int i; 12931 12932 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 12933 return; 12934 12935 for (i = 0; i < bp->cp_nr_rings; i++) { 12936 struct bnxt_napi *bnapi = bp->bnapi[i]; 12937 struct bnxt_cp_ring_info *cpr; 12938 u32 fw_ring_id; 12939 int j; 12940 12941 if (!bnapi) 12942 continue; 12943 12944 cpr = &bnapi->cp_ring; 12945 for (j = 0; j < cpr->cp_ring_count; j++) { 12946 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 12947 u32 val[2]; 12948 12949 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 12950 continue; 12951 12952 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 12953 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 12954 continue; 12955 } 12956 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 12957 bnxt_dbg_hwrm_ring_info_get(bp, 12958 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 12959 fw_ring_id, &val[0], &val[1]); 12960 cpr->sw_stats.cmn.missed_irqs++; 12961 } 12962 } 12963 } 12964 12965 static void bnxt_cfg_ntp_filters(struct bnxt *); 12966 12967 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 12968 { 12969 struct bnxt_link_info *link_info = &bp->link_info; 12970 12971 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 12972 link_info->autoneg = BNXT_AUTONEG_SPEED; 12973 if (bp->hwrm_spec_code >= 0x10201) { 12974 if (link_info->auto_pause_setting & 12975 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 12976 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 12977 } else { 12978 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 12979 } 12980 bnxt_set_auto_speed(link_info); 12981 } else { 12982 bnxt_set_force_speed(link_info); 12983 link_info->req_duplex = link_info->duplex_setting; 12984 } 12985 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 12986 link_info->req_flow_ctrl = 12987 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 12988 else 12989 link_info->req_flow_ctrl = link_info->force_pause_setting; 12990 } 12991 12992 static void bnxt_fw_echo_reply(struct bnxt *bp) 12993 { 12994 struct bnxt_fw_health *fw_health = bp->fw_health; 12995 struct hwrm_func_echo_response_input *req; 12996 int rc; 12997 12998 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 12999 if (rc) 13000 return; 13001 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 13002 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 13003 hwrm_req_send(bp, req); 13004 } 13005 13006 static void bnxt_sp_task(struct work_struct *work) 13007 { 13008 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 13009 13010 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13011 smp_mb__after_atomic(); 13012 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13013 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13014 return; 13015 } 13016 13017 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 13018 bnxt_cfg_rx_mode(bp); 13019 13020 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 13021 bnxt_cfg_ntp_filters(bp); 13022 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 13023 bnxt_hwrm_exec_fwd_req(bp); 13024 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 13025 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 13026 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 13027 bnxt_hwrm_port_qstats(bp, 0); 13028 bnxt_hwrm_port_qstats_ext(bp, 0); 13029 bnxt_accumulate_all_stats(bp); 13030 } 13031 13032 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 13033 int rc; 13034 13035 mutex_lock(&bp->link_lock); 13036 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 13037 &bp->sp_event)) 13038 bnxt_hwrm_phy_qcaps(bp); 13039 13040 rc = bnxt_update_link(bp, true); 13041 if (rc) 13042 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 13043 rc); 13044 13045 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 13046 &bp->sp_event)) 13047 bnxt_init_ethtool_link_settings(bp); 13048 mutex_unlock(&bp->link_lock); 13049 } 13050 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 13051 int rc; 13052 13053 mutex_lock(&bp->link_lock); 13054 rc = bnxt_update_phy_setting(bp); 13055 mutex_unlock(&bp->link_lock); 13056 if (rc) { 13057 netdev_warn(bp->dev, "update phy settings retry failed\n"); 13058 } else { 13059 bp->link_info.phy_retry = false; 13060 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 13061 } 13062 } 13063 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 13064 mutex_lock(&bp->link_lock); 13065 bnxt_get_port_module_status(bp); 13066 mutex_unlock(&bp->link_lock); 13067 } 13068 13069 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 13070 bnxt_tc_flow_stats_work(bp); 13071 13072 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 13073 bnxt_chk_missed_irq(bp); 13074 13075 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 13076 bnxt_fw_echo_reply(bp); 13077 13078 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 13079 bnxt_hwmon_notify_event(bp); 13080 13081 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 13082 * must be the last functions to be called before exiting. 13083 */ 13084 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 13085 bnxt_reset(bp, false); 13086 13087 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 13088 bnxt_reset(bp, true); 13089 13090 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 13091 bnxt_rx_ring_reset(bp); 13092 13093 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 13094 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 13095 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 13096 bnxt_devlink_health_fw_report(bp); 13097 else 13098 bnxt_fw_reset(bp); 13099 } 13100 13101 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 13102 if (!is_bnxt_fw_ok(bp)) 13103 bnxt_devlink_health_fw_report(bp); 13104 } 13105 13106 smp_mb__before_atomic(); 13107 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 13108 } 13109 13110 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 13111 int *max_cp); 13112 13113 /* Under rtnl_lock */ 13114 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 13115 int tx_xdp) 13116 { 13117 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 13118 int tx_rings_needed, stats; 13119 int rx_rings = rx; 13120 int cp, vnics; 13121 13122 if (tcs) 13123 tx_sets = tcs; 13124 13125 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 13126 13127 if (max_rx < rx_rings) 13128 return -ENOMEM; 13129 13130 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13131 rx_rings <<= 1; 13132 13133 tx_rings_needed = tx * tx_sets + tx_xdp; 13134 if (max_tx < tx_rings_needed) 13135 return -ENOMEM; 13136 13137 vnics = 1; 13138 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == 13139 BNXT_FLAG_RFS) 13140 vnics += rx; 13141 13142 tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp); 13143 cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 13144 if (max_cp < cp) 13145 return -ENOMEM; 13146 stats = cp; 13147 if (BNXT_NEW_RM(bp)) { 13148 cp += bnxt_get_ulp_msix_num(bp); 13149 stats += bnxt_get_ulp_stat_ctxs(bp); 13150 } 13151 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 13152 stats, vnics); 13153 } 13154 13155 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 13156 { 13157 if (bp->bar2) { 13158 pci_iounmap(pdev, bp->bar2); 13159 bp->bar2 = NULL; 13160 } 13161 13162 if (bp->bar1) { 13163 pci_iounmap(pdev, bp->bar1); 13164 bp->bar1 = NULL; 13165 } 13166 13167 if (bp->bar0) { 13168 pci_iounmap(pdev, bp->bar0); 13169 bp->bar0 = NULL; 13170 } 13171 } 13172 13173 static void bnxt_cleanup_pci(struct bnxt *bp) 13174 { 13175 bnxt_unmap_bars(bp, bp->pdev); 13176 pci_release_regions(bp->pdev); 13177 if (pci_is_enabled(bp->pdev)) 13178 pci_disable_device(bp->pdev); 13179 } 13180 13181 static void bnxt_init_dflt_coal(struct bnxt *bp) 13182 { 13183 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 13184 struct bnxt_coal *coal; 13185 u16 flags = 0; 13186 13187 if (coal_cap->cmpl_params & 13188 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 13189 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 13190 13191 /* Tick values in micro seconds. 13192 * 1 coal_buf x bufs_per_record = 1 completion record. 13193 */ 13194 coal = &bp->rx_coal; 13195 coal->coal_ticks = 10; 13196 coal->coal_bufs = 30; 13197 coal->coal_ticks_irq = 1; 13198 coal->coal_bufs_irq = 2; 13199 coal->idle_thresh = 50; 13200 coal->bufs_per_record = 2; 13201 coal->budget = 64; /* NAPI budget */ 13202 coal->flags = flags; 13203 13204 coal = &bp->tx_coal; 13205 coal->coal_ticks = 28; 13206 coal->coal_bufs = 30; 13207 coal->coal_ticks_irq = 2; 13208 coal->coal_bufs_irq = 2; 13209 coal->bufs_per_record = 1; 13210 coal->flags = flags; 13211 13212 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 13213 } 13214 13215 /* FW that pre-reserves 1 VNIC per function */ 13216 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 13217 { 13218 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 13219 13220 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13221 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 13222 return true; 13223 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13224 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 13225 return true; 13226 return false; 13227 } 13228 13229 static int bnxt_fw_init_one_p1(struct bnxt *bp) 13230 { 13231 int rc; 13232 13233 bp->fw_cap = 0; 13234 rc = bnxt_hwrm_ver_get(bp); 13235 bnxt_try_map_fw_health_reg(bp); 13236 if (rc) { 13237 rc = bnxt_try_recover_fw(bp); 13238 if (rc) 13239 return rc; 13240 rc = bnxt_hwrm_ver_get(bp); 13241 if (rc) 13242 return rc; 13243 } 13244 13245 bnxt_nvm_cfg_ver_get(bp); 13246 13247 rc = bnxt_hwrm_func_reset(bp); 13248 if (rc) 13249 return -ENODEV; 13250 13251 bnxt_hwrm_fw_set_time(bp); 13252 return 0; 13253 } 13254 13255 static int bnxt_fw_init_one_p2(struct bnxt *bp) 13256 { 13257 int rc; 13258 13259 /* Get the MAX capabilities for this function */ 13260 rc = bnxt_hwrm_func_qcaps(bp); 13261 if (rc) { 13262 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 13263 rc); 13264 return -ENODEV; 13265 } 13266 13267 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 13268 if (rc) 13269 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 13270 rc); 13271 13272 if (bnxt_alloc_fw_health(bp)) { 13273 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 13274 } else { 13275 rc = bnxt_hwrm_error_recovery_qcfg(bp); 13276 if (rc) 13277 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 13278 rc); 13279 } 13280 13281 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 13282 if (rc) 13283 return -ENODEV; 13284 13285 if (bnxt_fw_pre_resv_vnics(bp)) 13286 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 13287 13288 bnxt_hwrm_func_qcfg(bp); 13289 bnxt_hwrm_vnic_qcaps(bp); 13290 bnxt_hwrm_port_led_qcaps(bp); 13291 bnxt_ethtool_init(bp); 13292 if (bp->fw_cap & BNXT_FW_CAP_PTP) 13293 __bnxt_hwrm_ptp_qcfg(bp); 13294 bnxt_dcb_init(bp); 13295 bnxt_hwmon_init(bp); 13296 return 0; 13297 } 13298 13299 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 13300 { 13301 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; 13302 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 13303 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 13304 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 13305 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 13306 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 13307 bp->rss_hash_delta = bp->rss_hash_cfg; 13308 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 13309 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; 13310 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 13311 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 13312 } 13313 } 13314 13315 static void bnxt_set_dflt_rfs(struct bnxt *bp) 13316 { 13317 struct net_device *dev = bp->dev; 13318 13319 dev->hw_features &= ~NETIF_F_NTUPLE; 13320 dev->features &= ~NETIF_F_NTUPLE; 13321 bp->flags &= ~BNXT_FLAG_RFS; 13322 if (bnxt_rfs_supported(bp)) { 13323 dev->hw_features |= NETIF_F_NTUPLE; 13324 if (bnxt_rfs_capable(bp)) { 13325 bp->flags |= BNXT_FLAG_RFS; 13326 dev->features |= NETIF_F_NTUPLE; 13327 } 13328 } 13329 } 13330 13331 static void bnxt_fw_init_one_p3(struct bnxt *bp) 13332 { 13333 struct pci_dev *pdev = bp->pdev; 13334 13335 bnxt_set_dflt_rss_hash_type(bp); 13336 bnxt_set_dflt_rfs(bp); 13337 13338 bnxt_get_wol_settings(bp); 13339 if (bp->flags & BNXT_FLAG_WOL_CAP) 13340 device_set_wakeup_enable(&pdev->dev, bp->wol); 13341 else 13342 device_set_wakeup_capable(&pdev->dev, false); 13343 13344 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 13345 bnxt_hwrm_coal_params_qcaps(bp); 13346 } 13347 13348 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 13349 13350 int bnxt_fw_init_one(struct bnxt *bp) 13351 { 13352 int rc; 13353 13354 rc = bnxt_fw_init_one_p1(bp); 13355 if (rc) { 13356 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 13357 return rc; 13358 } 13359 rc = bnxt_fw_init_one_p2(bp); 13360 if (rc) { 13361 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 13362 return rc; 13363 } 13364 rc = bnxt_probe_phy(bp, false); 13365 if (rc) 13366 return rc; 13367 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 13368 if (rc) 13369 return rc; 13370 13371 bnxt_fw_init_one_p3(bp); 13372 return 0; 13373 } 13374 13375 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 13376 { 13377 struct bnxt_fw_health *fw_health = bp->fw_health; 13378 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 13379 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 13380 u32 reg_type, reg_off, delay_msecs; 13381 13382 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 13383 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 13384 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 13385 switch (reg_type) { 13386 case BNXT_FW_HEALTH_REG_TYPE_CFG: 13387 pci_write_config_dword(bp->pdev, reg_off, val); 13388 break; 13389 case BNXT_FW_HEALTH_REG_TYPE_GRC: 13390 writel(reg_off & BNXT_GRC_BASE_MASK, 13391 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 13392 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 13393 fallthrough; 13394 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 13395 writel(val, bp->bar0 + reg_off); 13396 break; 13397 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 13398 writel(val, bp->bar1 + reg_off); 13399 break; 13400 } 13401 if (delay_msecs) { 13402 pci_read_config_dword(bp->pdev, 0, &val); 13403 msleep(delay_msecs); 13404 } 13405 } 13406 13407 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 13408 { 13409 struct hwrm_func_qcfg_output *resp; 13410 struct hwrm_func_qcfg_input *req; 13411 bool result = true; /* firmware will enforce if unknown */ 13412 13413 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 13414 return result; 13415 13416 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 13417 return result; 13418 13419 req->fid = cpu_to_le16(0xffff); 13420 resp = hwrm_req_hold(bp, req); 13421 if (!hwrm_req_send(bp, req)) 13422 result = !!(le16_to_cpu(resp->flags) & 13423 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 13424 hwrm_req_drop(bp, req); 13425 return result; 13426 } 13427 13428 static void bnxt_reset_all(struct bnxt *bp) 13429 { 13430 struct bnxt_fw_health *fw_health = bp->fw_health; 13431 int i, rc; 13432 13433 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 13434 bnxt_fw_reset_via_optee(bp); 13435 bp->fw_reset_timestamp = jiffies; 13436 return; 13437 } 13438 13439 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 13440 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 13441 bnxt_fw_reset_writel(bp, i); 13442 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 13443 struct hwrm_fw_reset_input *req; 13444 13445 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 13446 if (!rc) { 13447 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 13448 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 13449 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 13450 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 13451 rc = hwrm_req_send(bp, req); 13452 } 13453 if (rc != -ENODEV) 13454 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 13455 } 13456 bp->fw_reset_timestamp = jiffies; 13457 } 13458 13459 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 13460 { 13461 return time_after(jiffies, bp->fw_reset_timestamp + 13462 (bp->fw_reset_max_dsecs * HZ / 10)); 13463 } 13464 13465 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 13466 { 13467 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13468 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { 13469 bnxt_ulp_start(bp, rc); 13470 bnxt_dl_health_fw_status_update(bp, false); 13471 } 13472 bp->fw_reset_state = 0; 13473 dev_close(bp->dev); 13474 } 13475 13476 static void bnxt_fw_reset_task(struct work_struct *work) 13477 { 13478 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 13479 int rc = 0; 13480 13481 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 13482 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 13483 return; 13484 } 13485 13486 switch (bp->fw_reset_state) { 13487 case BNXT_FW_RESET_STATE_POLL_VF: { 13488 int n = bnxt_get_registered_vfs(bp); 13489 int tmo; 13490 13491 if (n < 0) { 13492 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 13493 n, jiffies_to_msecs(jiffies - 13494 bp->fw_reset_timestamp)); 13495 goto fw_reset_abort; 13496 } else if (n > 0) { 13497 if (bnxt_fw_reset_timeout(bp)) { 13498 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13499 bp->fw_reset_state = 0; 13500 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 13501 n); 13502 return; 13503 } 13504 bnxt_queue_fw_reset_work(bp, HZ / 10); 13505 return; 13506 } 13507 bp->fw_reset_timestamp = jiffies; 13508 rtnl_lock(); 13509 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 13510 bnxt_fw_reset_abort(bp, rc); 13511 rtnl_unlock(); 13512 return; 13513 } 13514 bnxt_fw_reset_close(bp); 13515 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 13516 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 13517 tmo = HZ / 10; 13518 } else { 13519 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13520 tmo = bp->fw_reset_min_dsecs * HZ / 10; 13521 } 13522 rtnl_unlock(); 13523 bnxt_queue_fw_reset_work(bp, tmo); 13524 return; 13525 } 13526 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 13527 u32 val; 13528 13529 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 13530 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 13531 !bnxt_fw_reset_timeout(bp)) { 13532 bnxt_queue_fw_reset_work(bp, HZ / 5); 13533 return; 13534 } 13535 13536 if (!bp->fw_health->primary) { 13537 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 13538 13539 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13540 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 13541 return; 13542 } 13543 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 13544 } 13545 fallthrough; 13546 case BNXT_FW_RESET_STATE_RESET_FW: 13547 bnxt_reset_all(bp); 13548 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13549 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 13550 return; 13551 case BNXT_FW_RESET_STATE_ENABLE_DEV: 13552 bnxt_inv_fw_health_reg(bp); 13553 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 13554 !bp->fw_reset_min_dsecs) { 13555 u16 val; 13556 13557 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 13558 if (val == 0xffff) { 13559 if (bnxt_fw_reset_timeout(bp)) { 13560 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 13561 rc = -ETIMEDOUT; 13562 goto fw_reset_abort; 13563 } 13564 bnxt_queue_fw_reset_work(bp, HZ / 1000); 13565 return; 13566 } 13567 } 13568 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 13569 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 13570 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 13571 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 13572 bnxt_dl_remote_reload(bp); 13573 if (pci_enable_device(bp->pdev)) { 13574 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 13575 rc = -ENODEV; 13576 goto fw_reset_abort; 13577 } 13578 pci_set_master(bp->pdev); 13579 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 13580 fallthrough; 13581 case BNXT_FW_RESET_STATE_POLL_FW: 13582 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 13583 rc = bnxt_hwrm_poll(bp); 13584 if (rc) { 13585 if (bnxt_fw_reset_timeout(bp)) { 13586 netdev_err(bp->dev, "Firmware reset aborted\n"); 13587 goto fw_reset_abort_status; 13588 } 13589 bnxt_queue_fw_reset_work(bp, HZ / 5); 13590 return; 13591 } 13592 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 13593 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 13594 fallthrough; 13595 case BNXT_FW_RESET_STATE_OPENING: 13596 while (!rtnl_trylock()) { 13597 bnxt_queue_fw_reset_work(bp, HZ / 10); 13598 return; 13599 } 13600 rc = bnxt_open(bp->dev); 13601 if (rc) { 13602 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 13603 bnxt_fw_reset_abort(bp, rc); 13604 rtnl_unlock(); 13605 return; 13606 } 13607 13608 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 13609 bp->fw_health->enabled) { 13610 bp->fw_health->last_fw_reset_cnt = 13611 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13612 } 13613 bp->fw_reset_state = 0; 13614 /* Make sure fw_reset_state is 0 before clearing the flag */ 13615 smp_mb__before_atomic(); 13616 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13617 bnxt_ulp_start(bp, 0); 13618 bnxt_reenable_sriov(bp); 13619 bnxt_vf_reps_alloc(bp); 13620 bnxt_vf_reps_open(bp); 13621 bnxt_ptp_reapply_pps(bp); 13622 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 13623 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 13624 bnxt_dl_health_fw_recovery_done(bp); 13625 bnxt_dl_health_fw_status_update(bp, true); 13626 } 13627 rtnl_unlock(); 13628 break; 13629 } 13630 return; 13631 13632 fw_reset_abort_status: 13633 if (bp->fw_health->status_reliable || 13634 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 13635 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 13636 13637 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 13638 } 13639 fw_reset_abort: 13640 rtnl_lock(); 13641 bnxt_fw_reset_abort(bp, rc); 13642 rtnl_unlock(); 13643 } 13644 13645 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 13646 { 13647 int rc; 13648 struct bnxt *bp = netdev_priv(dev); 13649 13650 SET_NETDEV_DEV(dev, &pdev->dev); 13651 13652 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 13653 rc = pci_enable_device(pdev); 13654 if (rc) { 13655 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 13656 goto init_err; 13657 } 13658 13659 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 13660 dev_err(&pdev->dev, 13661 "Cannot find PCI device base address, aborting\n"); 13662 rc = -ENODEV; 13663 goto init_err_disable; 13664 } 13665 13666 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 13667 if (rc) { 13668 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 13669 goto init_err_disable; 13670 } 13671 13672 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 13673 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 13674 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 13675 rc = -EIO; 13676 goto init_err_release; 13677 } 13678 13679 pci_set_master(pdev); 13680 13681 bp->dev = dev; 13682 bp->pdev = pdev; 13683 13684 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 13685 * determines the BAR size. 13686 */ 13687 bp->bar0 = pci_ioremap_bar(pdev, 0); 13688 if (!bp->bar0) { 13689 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 13690 rc = -ENOMEM; 13691 goto init_err_release; 13692 } 13693 13694 bp->bar2 = pci_ioremap_bar(pdev, 4); 13695 if (!bp->bar2) { 13696 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 13697 rc = -ENOMEM; 13698 goto init_err_release; 13699 } 13700 13701 INIT_WORK(&bp->sp_task, bnxt_sp_task); 13702 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 13703 13704 spin_lock_init(&bp->ntp_fltr_lock); 13705 #if BITS_PER_LONG == 32 13706 spin_lock_init(&bp->db_lock); 13707 #endif 13708 13709 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 13710 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 13711 13712 timer_setup(&bp->timer, bnxt_timer, 0); 13713 bp->current_interval = BNXT_TIMER_INTERVAL; 13714 13715 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 13716 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 13717 13718 clear_bit(BNXT_STATE_OPEN, &bp->state); 13719 return 0; 13720 13721 init_err_release: 13722 bnxt_unmap_bars(bp, pdev); 13723 pci_release_regions(pdev); 13724 13725 init_err_disable: 13726 pci_disable_device(pdev); 13727 13728 init_err: 13729 return rc; 13730 } 13731 13732 /* rtnl_lock held */ 13733 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 13734 { 13735 struct sockaddr *addr = p; 13736 struct bnxt *bp = netdev_priv(dev); 13737 int rc = 0; 13738 13739 if (!is_valid_ether_addr(addr->sa_data)) 13740 return -EADDRNOTAVAIL; 13741 13742 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 13743 return 0; 13744 13745 rc = bnxt_approve_mac(bp, addr->sa_data, true); 13746 if (rc) 13747 return rc; 13748 13749 eth_hw_addr_set(dev, addr->sa_data); 13750 if (netif_running(dev)) { 13751 bnxt_close_nic(bp, false, false); 13752 rc = bnxt_open_nic(bp, false, false); 13753 } 13754 13755 return rc; 13756 } 13757 13758 /* rtnl_lock held */ 13759 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 13760 { 13761 struct bnxt *bp = netdev_priv(dev); 13762 13763 if (netif_running(dev)) 13764 bnxt_close_nic(bp, true, false); 13765 13766 dev->mtu = new_mtu; 13767 bnxt_set_ring_params(bp); 13768 13769 if (netif_running(dev)) 13770 return bnxt_open_nic(bp, true, false); 13771 13772 return 0; 13773 } 13774 13775 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 13776 { 13777 struct bnxt *bp = netdev_priv(dev); 13778 bool sh = false; 13779 int rc, tx_cp; 13780 13781 if (tc > bp->max_tc) { 13782 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 13783 tc, bp->max_tc); 13784 return -EINVAL; 13785 } 13786 13787 if (netdev_get_num_tc(dev) == tc) 13788 return 0; 13789 13790 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 13791 sh = true; 13792 13793 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 13794 sh, tc, bp->tx_nr_rings_xdp); 13795 if (rc) 13796 return rc; 13797 13798 /* Needs to close the device and do hw resource re-allocations */ 13799 if (netif_running(bp->dev)) 13800 bnxt_close_nic(bp, true, false); 13801 13802 if (tc) { 13803 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 13804 netdev_set_num_tc(dev, tc); 13805 } else { 13806 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13807 netdev_reset_tc(dev); 13808 } 13809 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 13810 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 13811 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 13812 tx_cp + bp->rx_nr_rings; 13813 13814 if (netif_running(bp->dev)) 13815 return bnxt_open_nic(bp, true, false); 13816 13817 return 0; 13818 } 13819 13820 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 13821 void *cb_priv) 13822 { 13823 struct bnxt *bp = cb_priv; 13824 13825 if (!bnxt_tc_flower_enabled(bp) || 13826 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 13827 return -EOPNOTSUPP; 13828 13829 switch (type) { 13830 case TC_SETUP_CLSFLOWER: 13831 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 13832 default: 13833 return -EOPNOTSUPP; 13834 } 13835 } 13836 13837 LIST_HEAD(bnxt_block_cb_list); 13838 13839 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 13840 void *type_data) 13841 { 13842 struct bnxt *bp = netdev_priv(dev); 13843 13844 switch (type) { 13845 case TC_SETUP_BLOCK: 13846 return flow_block_cb_setup_simple(type_data, 13847 &bnxt_block_cb_list, 13848 bnxt_setup_tc_block_cb, 13849 bp, bp, true); 13850 case TC_SETUP_QDISC_MQPRIO: { 13851 struct tc_mqprio_qopt *mqprio = type_data; 13852 13853 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 13854 13855 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 13856 } 13857 default: 13858 return -EOPNOTSUPP; 13859 } 13860 } 13861 13862 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, 13863 const struct sk_buff *skb) 13864 { 13865 struct bnxt_vnic_info *vnic; 13866 13867 if (skb) 13868 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 13869 13870 vnic = &bp->vnic_info[0]; 13871 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); 13872 } 13873 13874 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, 13875 u32 idx) 13876 { 13877 struct hlist_head *head; 13878 int bit_id; 13879 13880 spin_lock_bh(&bp->ntp_fltr_lock); 13881 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0); 13882 if (bit_id < 0) { 13883 spin_unlock_bh(&bp->ntp_fltr_lock); 13884 return -ENOMEM; 13885 } 13886 13887 fltr->base.sw_id = (u16)bit_id; 13888 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; 13889 fltr->base.flags |= BNXT_ACT_RING_DST; 13890 head = &bp->ntp_fltr_hash_tbl[idx]; 13891 hlist_add_head_rcu(&fltr->base.hash, head); 13892 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 13893 bp->ntp_fltr_count++; 13894 spin_unlock_bh(&bp->ntp_fltr_lock); 13895 return 0; 13896 } 13897 13898 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 13899 struct bnxt_ntuple_filter *f2) 13900 { 13901 struct flow_keys *keys1 = &f1->fkeys; 13902 struct flow_keys *keys2 = &f2->fkeys; 13903 13904 if (f1->ntuple_flags != f2->ntuple_flags) 13905 return false; 13906 13907 if (keys1->basic.n_proto != keys2->basic.n_proto || 13908 keys1->basic.ip_proto != keys2->basic.ip_proto) 13909 return false; 13910 13911 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 13912 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && 13913 keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) || 13914 ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && 13915 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)) 13916 return false; 13917 } else { 13918 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && 13919 memcmp(&keys1->addrs.v6addrs.src, 13920 &keys2->addrs.v6addrs.src, 13921 sizeof(keys1->addrs.v6addrs.src))) || 13922 ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && 13923 memcmp(&keys1->addrs.v6addrs.dst, 13924 &keys2->addrs.v6addrs.dst, 13925 sizeof(keys1->addrs.v6addrs.dst)))) 13926 return false; 13927 } 13928 13929 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) && 13930 keys1->ports.src != keys2->ports.src) || 13931 ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) && 13932 keys1->ports.dst != keys2->ports.dst)) 13933 return false; 13934 13935 if (keys1->control.flags == keys2->control.flags && 13936 f1->l2_fltr == f2->l2_fltr) 13937 return true; 13938 13939 return false; 13940 } 13941 13942 struct bnxt_ntuple_filter * 13943 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, 13944 struct bnxt_ntuple_filter *fltr, u32 idx) 13945 { 13946 struct bnxt_ntuple_filter *f; 13947 struct hlist_head *head; 13948 13949 head = &bp->ntp_fltr_hash_tbl[idx]; 13950 hlist_for_each_entry_rcu(f, head, base.hash) { 13951 if (bnxt_fltr_match(f, fltr)) 13952 return f; 13953 } 13954 return NULL; 13955 } 13956 13957 #ifdef CONFIG_RFS_ACCEL 13958 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 13959 u16 rxq_index, u32 flow_id) 13960 { 13961 struct bnxt *bp = netdev_priv(dev); 13962 struct bnxt_ntuple_filter *fltr, *new_fltr; 13963 struct flow_keys *fkeys; 13964 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 13965 struct bnxt_l2_filter *l2_fltr; 13966 int rc = 0, idx; 13967 u32 flags; 13968 13969 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { 13970 l2_fltr = bp->vnic_info[0].l2_filters[0]; 13971 atomic_inc(&l2_fltr->refcnt); 13972 } else { 13973 struct bnxt_l2_key key; 13974 13975 ether_addr_copy(key.dst_mac_addr, eth->h_dest); 13976 key.vlan = 0; 13977 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); 13978 if (!l2_fltr) 13979 return -EINVAL; 13980 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { 13981 bnxt_del_l2_filter(bp, l2_fltr); 13982 return -EINVAL; 13983 } 13984 } 13985 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 13986 if (!new_fltr) { 13987 bnxt_del_l2_filter(bp, l2_fltr); 13988 return -ENOMEM; 13989 } 13990 13991 fkeys = &new_fltr->fkeys; 13992 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 13993 rc = -EPROTONOSUPPORT; 13994 goto err_free; 13995 } 13996 13997 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 13998 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 13999 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 14000 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 14001 rc = -EPROTONOSUPPORT; 14002 goto err_free; 14003 } 14004 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 14005 bp->hwrm_spec_code < 0x10601) { 14006 rc = -EPROTONOSUPPORT; 14007 goto err_free; 14008 } 14009 flags = fkeys->control.flags; 14010 if (((flags & FLOW_DIS_ENCAPSULATION) && 14011 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 14012 rc = -EPROTONOSUPPORT; 14013 goto err_free; 14014 } 14015 14016 new_fltr->l2_fltr = l2_fltr; 14017 new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL; 14018 14019 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); 14020 rcu_read_lock(); 14021 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 14022 if (fltr) { 14023 rc = fltr->base.sw_id; 14024 rcu_read_unlock(); 14025 goto err_free; 14026 } 14027 rcu_read_unlock(); 14028 14029 new_fltr->flow_id = flow_id; 14030 new_fltr->base.rxq = rxq_index; 14031 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 14032 if (!rc) { 14033 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 14034 return new_fltr->base.sw_id; 14035 } 14036 14037 err_free: 14038 bnxt_del_l2_filter(bp, l2_fltr); 14039 kfree(new_fltr); 14040 return rc; 14041 } 14042 #endif 14043 14044 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) 14045 { 14046 spin_lock_bh(&bp->ntp_fltr_lock); 14047 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 14048 spin_unlock_bh(&bp->ntp_fltr_lock); 14049 return; 14050 } 14051 hlist_del_rcu(&fltr->base.hash); 14052 bp->ntp_fltr_count--; 14053 spin_unlock_bh(&bp->ntp_fltr_lock); 14054 bnxt_del_l2_filter(bp, fltr->l2_fltr); 14055 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 14056 kfree_rcu(fltr, base.rcu); 14057 } 14058 14059 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 14060 { 14061 #ifdef CONFIG_RFS_ACCEL 14062 int i; 14063 14064 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 14065 struct hlist_head *head; 14066 struct hlist_node *tmp; 14067 struct bnxt_ntuple_filter *fltr; 14068 int rc; 14069 14070 head = &bp->ntp_fltr_hash_tbl[i]; 14071 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 14072 bool del = false; 14073 14074 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { 14075 if (fltr->base.flags & BNXT_ACT_NO_AGING) 14076 continue; 14077 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, 14078 fltr->flow_id, 14079 fltr->base.sw_id)) { 14080 bnxt_hwrm_cfa_ntuple_filter_free(bp, 14081 fltr); 14082 del = true; 14083 } 14084 } else { 14085 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 14086 fltr); 14087 if (rc) 14088 del = true; 14089 else 14090 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 14091 } 14092 14093 if (del) 14094 bnxt_del_ntp_filter(bp, fltr); 14095 } 14096 } 14097 #endif 14098 } 14099 14100 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 14101 unsigned int entry, struct udp_tunnel_info *ti) 14102 { 14103 struct bnxt *bp = netdev_priv(netdev); 14104 unsigned int cmd; 14105 14106 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 14107 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 14108 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 14109 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; 14110 else 14111 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; 14112 14113 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 14114 } 14115 14116 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 14117 unsigned int entry, struct udp_tunnel_info *ti) 14118 { 14119 struct bnxt *bp = netdev_priv(netdev); 14120 unsigned int cmd; 14121 14122 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 14123 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 14124 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 14125 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 14126 else 14127 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; 14128 14129 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 14130 } 14131 14132 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 14133 .set_port = bnxt_udp_tunnel_set_port, 14134 .unset_port = bnxt_udp_tunnel_unset_port, 14135 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 14136 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 14137 .tables = { 14138 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 14139 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 14140 }, 14141 }, bnxt_udp_tunnels_p7 = { 14142 .set_port = bnxt_udp_tunnel_set_port, 14143 .unset_port = bnxt_udp_tunnel_unset_port, 14144 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 14145 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 14146 .tables = { 14147 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 14148 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 14149 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, 14150 }, 14151 }; 14152 14153 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 14154 struct net_device *dev, u32 filter_mask, 14155 int nlflags) 14156 { 14157 struct bnxt *bp = netdev_priv(dev); 14158 14159 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 14160 nlflags, filter_mask, NULL); 14161 } 14162 14163 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 14164 u16 flags, struct netlink_ext_ack *extack) 14165 { 14166 struct bnxt *bp = netdev_priv(dev); 14167 struct nlattr *attr, *br_spec; 14168 int rem, rc = 0; 14169 14170 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 14171 return -EOPNOTSUPP; 14172 14173 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 14174 if (!br_spec) 14175 return -EINVAL; 14176 14177 nla_for_each_nested(attr, br_spec, rem) { 14178 u16 mode; 14179 14180 if (nla_type(attr) != IFLA_BRIDGE_MODE) 14181 continue; 14182 14183 mode = nla_get_u16(attr); 14184 if (mode == bp->br_mode) 14185 break; 14186 14187 rc = bnxt_hwrm_set_br_mode(bp, mode); 14188 if (!rc) 14189 bp->br_mode = mode; 14190 break; 14191 } 14192 return rc; 14193 } 14194 14195 int bnxt_get_port_parent_id(struct net_device *dev, 14196 struct netdev_phys_item_id *ppid) 14197 { 14198 struct bnxt *bp = netdev_priv(dev); 14199 14200 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 14201 return -EOPNOTSUPP; 14202 14203 /* The PF and it's VF-reps only support the switchdev framework */ 14204 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 14205 return -EOPNOTSUPP; 14206 14207 ppid->id_len = sizeof(bp->dsn); 14208 memcpy(ppid->id, bp->dsn, ppid->id_len); 14209 14210 return 0; 14211 } 14212 14213 static const struct net_device_ops bnxt_netdev_ops = { 14214 .ndo_open = bnxt_open, 14215 .ndo_start_xmit = bnxt_start_xmit, 14216 .ndo_stop = bnxt_close, 14217 .ndo_get_stats64 = bnxt_get_stats64, 14218 .ndo_set_rx_mode = bnxt_set_rx_mode, 14219 .ndo_eth_ioctl = bnxt_ioctl, 14220 .ndo_validate_addr = eth_validate_addr, 14221 .ndo_set_mac_address = bnxt_change_mac_addr, 14222 .ndo_change_mtu = bnxt_change_mtu, 14223 .ndo_fix_features = bnxt_fix_features, 14224 .ndo_set_features = bnxt_set_features, 14225 .ndo_features_check = bnxt_features_check, 14226 .ndo_tx_timeout = bnxt_tx_timeout, 14227 #ifdef CONFIG_BNXT_SRIOV 14228 .ndo_get_vf_config = bnxt_get_vf_config, 14229 .ndo_set_vf_mac = bnxt_set_vf_mac, 14230 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 14231 .ndo_set_vf_rate = bnxt_set_vf_bw, 14232 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 14233 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 14234 .ndo_set_vf_trust = bnxt_set_vf_trust, 14235 #endif 14236 .ndo_setup_tc = bnxt_setup_tc, 14237 #ifdef CONFIG_RFS_ACCEL 14238 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 14239 #endif 14240 .ndo_bpf = bnxt_xdp, 14241 .ndo_xdp_xmit = bnxt_xdp_xmit, 14242 .ndo_bridge_getlink = bnxt_bridge_getlink, 14243 .ndo_bridge_setlink = bnxt_bridge_setlink, 14244 }; 14245 14246 static void bnxt_remove_one(struct pci_dev *pdev) 14247 { 14248 struct net_device *dev = pci_get_drvdata(pdev); 14249 struct bnxt *bp = netdev_priv(dev); 14250 14251 if (BNXT_PF(bp)) 14252 bnxt_sriov_disable(bp); 14253 14254 bnxt_rdma_aux_device_uninit(bp); 14255 14256 bnxt_ptp_clear(bp); 14257 unregister_netdev(dev); 14258 bnxt_free_l2_filters(bp, true); 14259 bnxt_free_ntp_fltrs(bp, true); 14260 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14261 /* Flush any pending tasks */ 14262 cancel_work_sync(&bp->sp_task); 14263 cancel_delayed_work_sync(&bp->fw_reset_task); 14264 bp->sp_event = 0; 14265 14266 bnxt_dl_fw_reporters_destroy(bp); 14267 bnxt_dl_unregister(bp); 14268 bnxt_shutdown_tc(bp); 14269 14270 bnxt_clear_int_mode(bp); 14271 bnxt_hwrm_func_drv_unrgtr(bp); 14272 bnxt_free_hwrm_resources(bp); 14273 bnxt_hwmon_uninit(bp); 14274 bnxt_ethtool_free(bp); 14275 bnxt_dcb_free(bp); 14276 kfree(bp->ptp_cfg); 14277 bp->ptp_cfg = NULL; 14278 kfree(bp->fw_health); 14279 bp->fw_health = NULL; 14280 bnxt_cleanup_pci(bp); 14281 bnxt_free_ctx_mem(bp); 14282 kfree(bp->rss_indir_tbl); 14283 bp->rss_indir_tbl = NULL; 14284 bnxt_free_port_stats(bp); 14285 free_netdev(dev); 14286 } 14287 14288 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 14289 { 14290 int rc = 0; 14291 struct bnxt_link_info *link_info = &bp->link_info; 14292 14293 bp->phy_flags = 0; 14294 rc = bnxt_hwrm_phy_qcaps(bp); 14295 if (rc) { 14296 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 14297 rc); 14298 return rc; 14299 } 14300 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 14301 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 14302 else 14303 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 14304 if (!fw_dflt) 14305 return 0; 14306 14307 mutex_lock(&bp->link_lock); 14308 rc = bnxt_update_link(bp, false); 14309 if (rc) { 14310 mutex_unlock(&bp->link_lock); 14311 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 14312 rc); 14313 return rc; 14314 } 14315 14316 /* Older firmware does not have supported_auto_speeds, so assume 14317 * that all supported speeds can be autonegotiated. 14318 */ 14319 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 14320 link_info->support_auto_speeds = link_info->support_speeds; 14321 14322 bnxt_init_ethtool_link_settings(bp); 14323 mutex_unlock(&bp->link_lock); 14324 return 0; 14325 } 14326 14327 static int bnxt_get_max_irq(struct pci_dev *pdev) 14328 { 14329 u16 ctrl; 14330 14331 if (!pdev->msix_cap) 14332 return 1; 14333 14334 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 14335 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 14336 } 14337 14338 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 14339 int *max_cp) 14340 { 14341 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 14342 int max_ring_grps = 0, max_irq; 14343 14344 *max_tx = hw_resc->max_tx_rings; 14345 *max_rx = hw_resc->max_rx_rings; 14346 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 14347 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 14348 bnxt_get_ulp_msix_num(bp), 14349 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 14350 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 14351 *max_cp = min_t(int, *max_cp, max_irq); 14352 max_ring_grps = hw_resc->max_hw_ring_grps; 14353 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 14354 *max_cp -= 1; 14355 *max_rx -= 2; 14356 } 14357 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14358 *max_rx >>= 1; 14359 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 14360 int rc; 14361 14362 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 14363 if (rc) { 14364 *max_rx = 0; 14365 *max_tx = 0; 14366 } 14367 /* On P5 chips, max_cp output param should be available NQs */ 14368 *max_cp = max_irq; 14369 } 14370 *max_rx = min_t(int, *max_rx, max_ring_grps); 14371 } 14372 14373 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 14374 { 14375 int rx, tx, cp; 14376 14377 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 14378 *max_rx = rx; 14379 *max_tx = tx; 14380 if (!rx || !tx || !cp) 14381 return -ENOMEM; 14382 14383 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 14384 } 14385 14386 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 14387 bool shared) 14388 { 14389 int rc; 14390 14391 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 14392 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 14393 /* Not enough rings, try disabling agg rings. */ 14394 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 14395 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 14396 if (rc) { 14397 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 14398 bp->flags |= BNXT_FLAG_AGG_RINGS; 14399 return rc; 14400 } 14401 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 14402 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 14403 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 14404 bnxt_set_ring_params(bp); 14405 } 14406 14407 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 14408 int max_cp, max_stat, max_irq; 14409 14410 /* Reserve minimum resources for RoCE */ 14411 max_cp = bnxt_get_max_func_cp_rings(bp); 14412 max_stat = bnxt_get_max_func_stat_ctxs(bp); 14413 max_irq = bnxt_get_max_func_irqs(bp); 14414 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 14415 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 14416 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 14417 return 0; 14418 14419 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 14420 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 14421 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 14422 max_cp = min_t(int, max_cp, max_irq); 14423 max_cp = min_t(int, max_cp, max_stat); 14424 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 14425 if (rc) 14426 rc = 0; 14427 } 14428 return rc; 14429 } 14430 14431 /* In initial default shared ring setting, each shared ring must have a 14432 * RX/TX ring pair. 14433 */ 14434 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 14435 { 14436 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 14437 bp->rx_nr_rings = bp->cp_nr_rings; 14438 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 14439 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 14440 } 14441 14442 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 14443 { 14444 int dflt_rings, max_rx_rings, max_tx_rings, rc; 14445 14446 if (!bnxt_can_reserve_rings(bp)) 14447 return 0; 14448 14449 if (sh) 14450 bp->flags |= BNXT_FLAG_SHARED_RINGS; 14451 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 14452 /* Reduce default rings on multi-port cards so that total default 14453 * rings do not exceed CPU count. 14454 */ 14455 if (bp->port_count > 1) { 14456 int max_rings = 14457 max_t(int, num_online_cpus() / bp->port_count, 1); 14458 14459 dflt_rings = min_t(int, dflt_rings, max_rings); 14460 } 14461 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 14462 if (rc) 14463 return rc; 14464 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 14465 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 14466 if (sh) 14467 bnxt_trim_dflt_sh_rings(bp); 14468 else 14469 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 14470 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 14471 14472 rc = __bnxt_reserve_rings(bp); 14473 if (rc && rc != -ENODEV) 14474 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 14475 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14476 if (sh) 14477 bnxt_trim_dflt_sh_rings(bp); 14478 14479 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 14480 if (bnxt_need_reserve_rings(bp)) { 14481 rc = __bnxt_reserve_rings(bp); 14482 if (rc && rc != -ENODEV) 14483 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 14484 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14485 } 14486 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 14487 bp->rx_nr_rings++; 14488 bp->cp_nr_rings++; 14489 } 14490 if (rc) { 14491 bp->tx_nr_rings = 0; 14492 bp->rx_nr_rings = 0; 14493 } 14494 return rc; 14495 } 14496 14497 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 14498 { 14499 int rc; 14500 14501 if (bp->tx_nr_rings) 14502 return 0; 14503 14504 bnxt_ulp_irq_stop(bp); 14505 bnxt_clear_int_mode(bp); 14506 rc = bnxt_set_dflt_rings(bp, true); 14507 if (rc) { 14508 if (BNXT_VF(bp) && rc == -ENODEV) 14509 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 14510 else 14511 netdev_err(bp->dev, "Not enough rings available.\n"); 14512 goto init_dflt_ring_err; 14513 } 14514 rc = bnxt_init_int_mode(bp); 14515 if (rc) 14516 goto init_dflt_ring_err; 14517 14518 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14519 14520 bnxt_set_dflt_rfs(bp); 14521 14522 init_dflt_ring_err: 14523 bnxt_ulp_irq_restart(bp, rc); 14524 return rc; 14525 } 14526 14527 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 14528 { 14529 int rc; 14530 14531 ASSERT_RTNL(); 14532 bnxt_hwrm_func_qcaps(bp); 14533 14534 if (netif_running(bp->dev)) 14535 __bnxt_close_nic(bp, true, false); 14536 14537 bnxt_ulp_irq_stop(bp); 14538 bnxt_clear_int_mode(bp); 14539 rc = bnxt_init_int_mode(bp); 14540 bnxt_ulp_irq_restart(bp, rc); 14541 14542 if (netif_running(bp->dev)) { 14543 if (rc) 14544 dev_close(bp->dev); 14545 else 14546 rc = bnxt_open_nic(bp, true, false); 14547 } 14548 14549 return rc; 14550 } 14551 14552 static int bnxt_init_mac_addr(struct bnxt *bp) 14553 { 14554 int rc = 0; 14555 14556 if (BNXT_PF(bp)) { 14557 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 14558 } else { 14559 #ifdef CONFIG_BNXT_SRIOV 14560 struct bnxt_vf_info *vf = &bp->vf; 14561 bool strict_approval = true; 14562 14563 if (is_valid_ether_addr(vf->mac_addr)) { 14564 /* overwrite netdev dev_addr with admin VF MAC */ 14565 eth_hw_addr_set(bp->dev, vf->mac_addr); 14566 /* Older PF driver or firmware may not approve this 14567 * correctly. 14568 */ 14569 strict_approval = false; 14570 } else { 14571 eth_hw_addr_random(bp->dev); 14572 } 14573 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 14574 #endif 14575 } 14576 return rc; 14577 } 14578 14579 static void bnxt_vpd_read_info(struct bnxt *bp) 14580 { 14581 struct pci_dev *pdev = bp->pdev; 14582 unsigned int vpd_size, kw_len; 14583 int pos, size; 14584 u8 *vpd_data; 14585 14586 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 14587 if (IS_ERR(vpd_data)) { 14588 pci_warn(pdev, "Unable to read VPD\n"); 14589 return; 14590 } 14591 14592 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 14593 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 14594 if (pos < 0) 14595 goto read_sn; 14596 14597 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 14598 memcpy(bp->board_partno, &vpd_data[pos], size); 14599 14600 read_sn: 14601 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 14602 PCI_VPD_RO_KEYWORD_SERIALNO, 14603 &kw_len); 14604 if (pos < 0) 14605 goto exit; 14606 14607 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 14608 memcpy(bp->board_serialno, &vpd_data[pos], size); 14609 exit: 14610 kfree(vpd_data); 14611 } 14612 14613 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 14614 { 14615 struct pci_dev *pdev = bp->pdev; 14616 u64 qword; 14617 14618 qword = pci_get_dsn(pdev); 14619 if (!qword) { 14620 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 14621 return -EOPNOTSUPP; 14622 } 14623 14624 put_unaligned_le64(qword, dsn); 14625 14626 bp->flags |= BNXT_FLAG_DSN_VALID; 14627 return 0; 14628 } 14629 14630 static int bnxt_map_db_bar(struct bnxt *bp) 14631 { 14632 if (!bp->db_size) 14633 return -ENODEV; 14634 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 14635 if (!bp->bar1) 14636 return -ENOMEM; 14637 return 0; 14638 } 14639 14640 void bnxt_print_device_info(struct bnxt *bp) 14641 { 14642 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 14643 board_info[bp->board_idx].name, 14644 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 14645 14646 pcie_print_link_status(bp->pdev); 14647 } 14648 14649 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 14650 { 14651 struct net_device *dev; 14652 struct bnxt *bp; 14653 int rc, max_irqs; 14654 14655 if (pci_is_bridge(pdev)) 14656 return -ENODEV; 14657 14658 /* Clear any pending DMA transactions from crash kernel 14659 * while loading driver in capture kernel. 14660 */ 14661 if (is_kdump_kernel()) { 14662 pci_clear_master(pdev); 14663 pcie_flr(pdev); 14664 } 14665 14666 max_irqs = bnxt_get_max_irq(pdev); 14667 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 14668 max_irqs); 14669 if (!dev) 14670 return -ENOMEM; 14671 14672 bp = netdev_priv(dev); 14673 bp->board_idx = ent->driver_data; 14674 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 14675 bnxt_set_max_func_irqs(bp, max_irqs); 14676 14677 if (bnxt_vf_pciid(bp->board_idx)) 14678 bp->flags |= BNXT_FLAG_VF; 14679 14680 /* No devlink port registration in case of a VF */ 14681 if (BNXT_PF(bp)) 14682 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 14683 14684 if (pdev->msix_cap) 14685 bp->flags |= BNXT_FLAG_MSIX_CAP; 14686 14687 rc = bnxt_init_board(pdev, dev); 14688 if (rc < 0) 14689 goto init_err_free; 14690 14691 dev->netdev_ops = &bnxt_netdev_ops; 14692 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 14693 dev->ethtool_ops = &bnxt_ethtool_ops; 14694 pci_set_drvdata(pdev, dev); 14695 14696 rc = bnxt_alloc_hwrm_resources(bp); 14697 if (rc) 14698 goto init_err_pci_clean; 14699 14700 mutex_init(&bp->hwrm_cmd_lock); 14701 mutex_init(&bp->link_lock); 14702 14703 rc = bnxt_fw_init_one_p1(bp); 14704 if (rc) 14705 goto init_err_pci_clean; 14706 14707 if (BNXT_PF(bp)) 14708 bnxt_vpd_read_info(bp); 14709 14710 if (BNXT_CHIP_P5_PLUS(bp)) { 14711 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 14712 if (BNXT_CHIP_P7(bp)) 14713 bp->flags |= BNXT_FLAG_CHIP_P7; 14714 } 14715 14716 rc = bnxt_alloc_rss_indir_tbl(bp); 14717 if (rc) 14718 goto init_err_pci_clean; 14719 14720 rc = bnxt_fw_init_one_p2(bp); 14721 if (rc) 14722 goto init_err_pci_clean; 14723 14724 rc = bnxt_map_db_bar(bp); 14725 if (rc) { 14726 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 14727 rc); 14728 goto init_err_pci_clean; 14729 } 14730 14731 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 14732 NETIF_F_TSO | NETIF_F_TSO6 | 14733 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 14734 NETIF_F_GSO_IPXIP4 | 14735 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 14736 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 14737 NETIF_F_RXCSUM | NETIF_F_GRO; 14738 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 14739 dev->hw_features |= NETIF_F_GSO_UDP_L4; 14740 14741 if (BNXT_SUPPORTS_TPA(bp)) 14742 dev->hw_features |= NETIF_F_LRO; 14743 14744 dev->hw_enc_features = 14745 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 14746 NETIF_F_TSO | NETIF_F_TSO6 | 14747 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 14748 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 14749 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 14750 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 14751 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 14752 if (bp->flags & BNXT_FLAG_CHIP_P7) 14753 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; 14754 else 14755 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 14756 14757 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 14758 NETIF_F_GSO_GRE_CSUM; 14759 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 14760 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 14761 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 14762 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 14763 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 14764 if (BNXT_SUPPORTS_TPA(bp)) 14765 dev->hw_features |= NETIF_F_GRO_HW; 14766 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 14767 if (dev->features & NETIF_F_GRO_HW) 14768 dev->features &= ~NETIF_F_LRO; 14769 dev->priv_flags |= IFF_UNICAST_FLT; 14770 14771 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 14772 14773 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 14774 NETDEV_XDP_ACT_RX_SG; 14775 14776 #ifdef CONFIG_BNXT_SRIOV 14777 init_waitqueue_head(&bp->sriov_cfg_wait); 14778 #endif 14779 if (BNXT_SUPPORTS_TPA(bp)) { 14780 bp->gro_func = bnxt_gro_func_5730x; 14781 if (BNXT_CHIP_P4(bp)) 14782 bp->gro_func = bnxt_gro_func_5731x; 14783 else if (BNXT_CHIP_P5_PLUS(bp)) 14784 bp->gro_func = bnxt_gro_func_5750x; 14785 } 14786 if (!BNXT_CHIP_P4_PLUS(bp)) 14787 bp->flags |= BNXT_FLAG_DOUBLE_DB; 14788 14789 rc = bnxt_init_mac_addr(bp); 14790 if (rc) { 14791 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 14792 rc = -EADDRNOTAVAIL; 14793 goto init_err_pci_clean; 14794 } 14795 14796 if (BNXT_PF(bp)) { 14797 /* Read the adapter's DSN to use as the eswitch switch_id */ 14798 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 14799 } 14800 14801 /* MTU range: 60 - FW defined max */ 14802 dev->min_mtu = ETH_ZLEN; 14803 dev->max_mtu = bp->max_mtu; 14804 14805 rc = bnxt_probe_phy(bp, true); 14806 if (rc) 14807 goto init_err_pci_clean; 14808 14809 bnxt_init_l2_fltr_tbl(bp); 14810 bnxt_set_rx_skb_mode(bp, false); 14811 bnxt_set_tpa_flags(bp); 14812 bnxt_set_ring_params(bp); 14813 rc = bnxt_set_dflt_rings(bp, true); 14814 if (rc) { 14815 if (BNXT_VF(bp) && rc == -ENODEV) { 14816 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 14817 } else { 14818 netdev_err(bp->dev, "Not enough rings available.\n"); 14819 rc = -ENOMEM; 14820 } 14821 goto init_err_pci_clean; 14822 } 14823 14824 bnxt_fw_init_one_p3(bp); 14825 14826 bnxt_init_dflt_coal(bp); 14827 14828 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 14829 bp->flags |= BNXT_FLAG_STRIP_VLAN; 14830 14831 rc = bnxt_init_int_mode(bp); 14832 if (rc) 14833 goto init_err_pci_clean; 14834 14835 /* No TC has been set yet and rings may have been trimmed due to 14836 * limited MSIX, so we re-initialize the TX rings per TC. 14837 */ 14838 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14839 14840 if (BNXT_PF(bp)) { 14841 if (!bnxt_pf_wq) { 14842 bnxt_pf_wq = 14843 create_singlethread_workqueue("bnxt_pf_wq"); 14844 if (!bnxt_pf_wq) { 14845 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 14846 rc = -ENOMEM; 14847 goto init_err_pci_clean; 14848 } 14849 } 14850 rc = bnxt_init_tc(bp); 14851 if (rc) 14852 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 14853 rc); 14854 } 14855 14856 bnxt_inv_fw_health_reg(bp); 14857 rc = bnxt_dl_register(bp); 14858 if (rc) 14859 goto init_err_dl; 14860 14861 rc = register_netdev(dev); 14862 if (rc) 14863 goto init_err_cleanup; 14864 14865 bnxt_dl_fw_reporters_create(bp); 14866 14867 bnxt_rdma_aux_device_init(bp); 14868 14869 bnxt_print_device_info(bp); 14870 14871 pci_save_state(pdev); 14872 14873 return 0; 14874 init_err_cleanup: 14875 bnxt_dl_unregister(bp); 14876 init_err_dl: 14877 bnxt_shutdown_tc(bp); 14878 bnxt_clear_int_mode(bp); 14879 14880 init_err_pci_clean: 14881 bnxt_hwrm_func_drv_unrgtr(bp); 14882 bnxt_free_hwrm_resources(bp); 14883 bnxt_hwmon_uninit(bp); 14884 bnxt_ethtool_free(bp); 14885 bnxt_ptp_clear(bp); 14886 kfree(bp->ptp_cfg); 14887 bp->ptp_cfg = NULL; 14888 kfree(bp->fw_health); 14889 bp->fw_health = NULL; 14890 bnxt_cleanup_pci(bp); 14891 bnxt_free_ctx_mem(bp); 14892 kfree(bp->rss_indir_tbl); 14893 bp->rss_indir_tbl = NULL; 14894 14895 init_err_free: 14896 free_netdev(dev); 14897 return rc; 14898 } 14899 14900 static void bnxt_shutdown(struct pci_dev *pdev) 14901 { 14902 struct net_device *dev = pci_get_drvdata(pdev); 14903 struct bnxt *bp; 14904 14905 if (!dev) 14906 return; 14907 14908 rtnl_lock(); 14909 bp = netdev_priv(dev); 14910 if (!bp) 14911 goto shutdown_exit; 14912 14913 if (netif_running(dev)) 14914 dev_close(dev); 14915 14916 bnxt_clear_int_mode(bp); 14917 pci_disable_device(pdev); 14918 14919 if (system_state == SYSTEM_POWER_OFF) { 14920 pci_wake_from_d3(pdev, bp->wol); 14921 pci_set_power_state(pdev, PCI_D3hot); 14922 } 14923 14924 shutdown_exit: 14925 rtnl_unlock(); 14926 } 14927 14928 #ifdef CONFIG_PM_SLEEP 14929 static int bnxt_suspend(struct device *device) 14930 { 14931 struct net_device *dev = dev_get_drvdata(device); 14932 struct bnxt *bp = netdev_priv(dev); 14933 int rc = 0; 14934 14935 rtnl_lock(); 14936 bnxt_ulp_stop(bp); 14937 if (netif_running(dev)) { 14938 netif_device_detach(dev); 14939 rc = bnxt_close(dev); 14940 } 14941 bnxt_hwrm_func_drv_unrgtr(bp); 14942 pci_disable_device(bp->pdev); 14943 bnxt_free_ctx_mem(bp); 14944 rtnl_unlock(); 14945 return rc; 14946 } 14947 14948 static int bnxt_resume(struct device *device) 14949 { 14950 struct net_device *dev = dev_get_drvdata(device); 14951 struct bnxt *bp = netdev_priv(dev); 14952 int rc = 0; 14953 14954 rtnl_lock(); 14955 rc = pci_enable_device(bp->pdev); 14956 if (rc) { 14957 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 14958 rc); 14959 goto resume_exit; 14960 } 14961 pci_set_master(bp->pdev); 14962 if (bnxt_hwrm_ver_get(bp)) { 14963 rc = -ENODEV; 14964 goto resume_exit; 14965 } 14966 rc = bnxt_hwrm_func_reset(bp); 14967 if (rc) { 14968 rc = -EBUSY; 14969 goto resume_exit; 14970 } 14971 14972 rc = bnxt_hwrm_func_qcaps(bp); 14973 if (rc) 14974 goto resume_exit; 14975 14976 bnxt_clear_reservations(bp, true); 14977 14978 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 14979 rc = -ENODEV; 14980 goto resume_exit; 14981 } 14982 14983 bnxt_get_wol_settings(bp); 14984 if (netif_running(dev)) { 14985 rc = bnxt_open(dev); 14986 if (!rc) 14987 netif_device_attach(dev); 14988 } 14989 14990 resume_exit: 14991 bnxt_ulp_start(bp, rc); 14992 if (!rc) 14993 bnxt_reenable_sriov(bp); 14994 rtnl_unlock(); 14995 return rc; 14996 } 14997 14998 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 14999 #define BNXT_PM_OPS (&bnxt_pm_ops) 15000 15001 #else 15002 15003 #define BNXT_PM_OPS NULL 15004 15005 #endif /* CONFIG_PM_SLEEP */ 15006 15007 /** 15008 * bnxt_io_error_detected - called when PCI error is detected 15009 * @pdev: Pointer to PCI device 15010 * @state: The current pci connection state 15011 * 15012 * This function is called after a PCI bus error affecting 15013 * this device has been detected. 15014 */ 15015 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 15016 pci_channel_state_t state) 15017 { 15018 struct net_device *netdev = pci_get_drvdata(pdev); 15019 struct bnxt *bp = netdev_priv(netdev); 15020 15021 netdev_info(netdev, "PCI I/O error detected\n"); 15022 15023 rtnl_lock(); 15024 netif_device_detach(netdev); 15025 15026 bnxt_ulp_stop(bp); 15027 15028 if (state == pci_channel_io_perm_failure) { 15029 rtnl_unlock(); 15030 return PCI_ERS_RESULT_DISCONNECT; 15031 } 15032 15033 if (state == pci_channel_io_frozen) 15034 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 15035 15036 if (netif_running(netdev)) 15037 bnxt_close(netdev); 15038 15039 if (pci_is_enabled(pdev)) 15040 pci_disable_device(pdev); 15041 bnxt_free_ctx_mem(bp); 15042 rtnl_unlock(); 15043 15044 /* Request a slot slot reset. */ 15045 return PCI_ERS_RESULT_NEED_RESET; 15046 } 15047 15048 /** 15049 * bnxt_io_slot_reset - called after the pci bus has been reset. 15050 * @pdev: Pointer to PCI device 15051 * 15052 * Restart the card from scratch, as if from a cold-boot. 15053 * At this point, the card has exprienced a hard reset, 15054 * followed by fixups by BIOS, and has its config space 15055 * set up identically to what it was at cold boot. 15056 */ 15057 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 15058 { 15059 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 15060 struct net_device *netdev = pci_get_drvdata(pdev); 15061 struct bnxt *bp = netdev_priv(netdev); 15062 int retry = 0; 15063 int err = 0; 15064 int off; 15065 15066 netdev_info(bp->dev, "PCI Slot Reset\n"); 15067 15068 rtnl_lock(); 15069 15070 if (pci_enable_device(pdev)) { 15071 dev_err(&pdev->dev, 15072 "Cannot re-enable PCI device after reset.\n"); 15073 } else { 15074 pci_set_master(pdev); 15075 /* Upon fatal error, our device internal logic that latches to 15076 * BAR value is getting reset and will restore only upon 15077 * rewritting the BARs. 15078 * 15079 * As pci_restore_state() does not re-write the BARs if the 15080 * value is same as saved value earlier, driver needs to 15081 * write the BARs to 0 to force restore, in case of fatal error. 15082 */ 15083 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 15084 &bp->state)) { 15085 for (off = PCI_BASE_ADDRESS_0; 15086 off <= PCI_BASE_ADDRESS_5; off += 4) 15087 pci_write_config_dword(bp->pdev, off, 0); 15088 } 15089 pci_restore_state(pdev); 15090 pci_save_state(pdev); 15091 15092 bnxt_inv_fw_health_reg(bp); 15093 bnxt_try_map_fw_health_reg(bp); 15094 15095 /* In some PCIe AER scenarios, firmware may take up to 15096 * 10 seconds to become ready in the worst case. 15097 */ 15098 do { 15099 err = bnxt_try_recover_fw(bp); 15100 if (!err) 15101 break; 15102 retry++; 15103 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 15104 15105 if (err) { 15106 dev_err(&pdev->dev, "Firmware not ready\n"); 15107 goto reset_exit; 15108 } 15109 15110 err = bnxt_hwrm_func_reset(bp); 15111 if (!err) 15112 result = PCI_ERS_RESULT_RECOVERED; 15113 15114 bnxt_ulp_irq_stop(bp); 15115 bnxt_clear_int_mode(bp); 15116 err = bnxt_init_int_mode(bp); 15117 bnxt_ulp_irq_restart(bp, err); 15118 } 15119 15120 reset_exit: 15121 bnxt_clear_reservations(bp, true); 15122 rtnl_unlock(); 15123 15124 return result; 15125 } 15126 15127 /** 15128 * bnxt_io_resume - called when traffic can start flowing again. 15129 * @pdev: Pointer to PCI device 15130 * 15131 * This callback is called when the error recovery driver tells 15132 * us that its OK to resume normal operation. 15133 */ 15134 static void bnxt_io_resume(struct pci_dev *pdev) 15135 { 15136 struct net_device *netdev = pci_get_drvdata(pdev); 15137 struct bnxt *bp = netdev_priv(netdev); 15138 int err; 15139 15140 netdev_info(bp->dev, "PCI Slot Resume\n"); 15141 rtnl_lock(); 15142 15143 err = bnxt_hwrm_func_qcaps(bp); 15144 if (!err && netif_running(netdev)) 15145 err = bnxt_open(netdev); 15146 15147 bnxt_ulp_start(bp, err); 15148 if (!err) { 15149 bnxt_reenable_sriov(bp); 15150 netif_device_attach(netdev); 15151 } 15152 15153 rtnl_unlock(); 15154 } 15155 15156 static const struct pci_error_handlers bnxt_err_handler = { 15157 .error_detected = bnxt_io_error_detected, 15158 .slot_reset = bnxt_io_slot_reset, 15159 .resume = bnxt_io_resume 15160 }; 15161 15162 static struct pci_driver bnxt_pci_driver = { 15163 .name = DRV_MODULE_NAME, 15164 .id_table = bnxt_pci_tbl, 15165 .probe = bnxt_init_one, 15166 .remove = bnxt_remove_one, 15167 .shutdown = bnxt_shutdown, 15168 .driver.pm = BNXT_PM_OPS, 15169 .err_handler = &bnxt_err_handler, 15170 #if defined(CONFIG_BNXT_SRIOV) 15171 .sriov_configure = bnxt_sriov_configure, 15172 #endif 15173 }; 15174 15175 static int __init bnxt_init(void) 15176 { 15177 int err; 15178 15179 bnxt_debug_init(); 15180 err = pci_register_driver(&bnxt_pci_driver); 15181 if (err) { 15182 bnxt_debug_exit(); 15183 return err; 15184 } 15185 15186 return 0; 15187 } 15188 15189 static void __exit bnxt_exit(void) 15190 { 15191 pci_unregister_driver(&bnxt_pci_driver); 15192 if (bnxt_pf_wq) 15193 destroy_workqueue(bnxt_pf_wq); 15194 bnxt_debug_exit(); 15195 } 15196 15197 module_init(bnxt_init); 15198 module_exit(bnxt_exit); 15199