1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_queues.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_hwrm.h" 62 #include "bnxt_ulp.h" 63 #include "bnxt_sriov.h" 64 #include "bnxt_ethtool.h" 65 #include "bnxt_dcb.h" 66 #include "bnxt_xdp.h" 67 #include "bnxt_ptp.h" 68 #include "bnxt_vfr.h" 69 #include "bnxt_tc.h" 70 #include "bnxt_devlink.h" 71 #include "bnxt_debugfs.h" 72 #include "bnxt_hwmon.h" 73 74 #define BNXT_TX_TIMEOUT (5 * HZ) 75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 76 NETIF_MSG_TX_ERR) 77 78 MODULE_LICENSE("GPL"); 79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 80 81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 83 #define BNXT_RX_COPY_THRESH 256 84 85 #define BNXT_TX_PUSH_THRESH 164 86 87 /* indexed by enum board_idx */ 88 static const struct { 89 char *name; 90 } board_info[] = { 91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 123 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 124 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 125 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, 126 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 127 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 128 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 129 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 130 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 131 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 132 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 133 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 134 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 135 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 136 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 137 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 138 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 139 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 140 }; 141 142 static const struct pci_device_id bnxt_pci_tbl[] = { 143 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 144 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 145 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 146 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 147 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 148 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 149 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 150 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 151 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 152 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 153 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 154 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 155 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 156 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 157 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 158 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 159 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 160 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 162 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 163 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 164 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 165 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 166 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 167 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 168 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 170 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 171 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 172 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 175 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 176 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 177 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 178 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 179 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 180 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 181 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, 182 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, 183 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, 184 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, 185 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 186 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 187 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 189 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 190 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 192 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 193 #ifdef CONFIG_BNXT_SRIOV 194 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 195 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 196 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 197 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 198 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 199 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 201 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 202 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 203 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 204 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 205 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 206 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 207 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 208 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 209 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 210 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 211 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 212 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 213 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 214 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 215 #endif 216 { 0 } 217 }; 218 219 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 220 221 static const u16 bnxt_vf_req_snif[] = { 222 HWRM_FUNC_CFG, 223 HWRM_FUNC_VF_CFG, 224 HWRM_PORT_PHY_QCFG, 225 HWRM_CFA_L2_FILTER_ALLOC, 226 }; 227 228 static const u16 bnxt_async_events_arr[] = { 229 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 230 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 231 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 232 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 233 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 234 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 235 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 236 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 237 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 238 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 239 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 240 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 241 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 242 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 243 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 244 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 245 }; 246 247 static struct workqueue_struct *bnxt_pf_wq; 248 249 static bool bnxt_vf_pciid(enum board_idx idx) 250 { 251 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 252 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 253 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 254 idx == NETXTREME_E_P5_VF_HV); 255 } 256 257 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 258 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 259 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 260 261 #define BNXT_CP_DB_IRQ_DIS(db) \ 262 writel(DB_CP_IRQ_DIS_FLAGS, db) 263 264 #define BNXT_DB_CQ(db, idx) \ 265 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 266 267 #define BNXT_DB_NQ_P5(db, idx) \ 268 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 269 (db)->doorbell) 270 271 #define BNXT_DB_NQ_P7(db, idx) \ 272 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ 273 DB_RING_IDX(db, idx), (db)->doorbell) 274 275 #define BNXT_DB_CQ_ARM(db, idx) \ 276 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 277 278 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 279 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 280 DB_RING_IDX(db, idx), (db)->doorbell) 281 282 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 283 { 284 if (bp->flags & BNXT_FLAG_CHIP_P7) 285 BNXT_DB_NQ_P7(db, idx); 286 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 287 BNXT_DB_NQ_P5(db, idx); 288 else 289 BNXT_DB_CQ(db, idx); 290 } 291 292 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 293 { 294 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 295 BNXT_DB_NQ_ARM_P5(db, idx); 296 else 297 BNXT_DB_CQ_ARM(db, idx); 298 } 299 300 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 301 { 302 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 303 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 304 DB_RING_IDX(db, idx), db->doorbell); 305 else 306 BNXT_DB_CQ(db, idx); 307 } 308 309 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 310 { 311 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 312 return; 313 314 if (BNXT_PF(bp)) 315 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 316 else 317 schedule_delayed_work(&bp->fw_reset_task, delay); 318 } 319 320 static void __bnxt_queue_sp_work(struct bnxt *bp) 321 { 322 if (BNXT_PF(bp)) 323 queue_work(bnxt_pf_wq, &bp->sp_task); 324 else 325 schedule_work(&bp->sp_task); 326 } 327 328 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 329 { 330 set_bit(event, &bp->sp_event); 331 __bnxt_queue_sp_work(bp); 332 } 333 334 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 335 { 336 if (!rxr->bnapi->in_reset) { 337 rxr->bnapi->in_reset = true; 338 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 339 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 340 else 341 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 342 __bnxt_queue_sp_work(bp); 343 } 344 rxr->rx_next_cons = 0xffff; 345 } 346 347 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 348 u16 curr) 349 { 350 struct bnxt_napi *bnapi = txr->bnapi; 351 352 if (bnapi->tx_fault) 353 return; 354 355 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 356 txr->txq_index, txr->tx_hw_cons, 357 txr->tx_cons, txr->tx_prod, curr); 358 WARN_ON_ONCE(1); 359 bnapi->tx_fault = 1; 360 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 361 } 362 363 const u16 bnxt_lhint_arr[] = { 364 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 365 TX_BD_FLAGS_LHINT_512_TO_1023, 366 TX_BD_FLAGS_LHINT_1024_TO_2047, 367 TX_BD_FLAGS_LHINT_1024_TO_2047, 368 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 369 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 370 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 371 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 372 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 373 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 374 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 375 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 376 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 377 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 378 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 379 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 380 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 381 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 382 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 383 }; 384 385 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 386 { 387 struct metadata_dst *md_dst = skb_metadata_dst(skb); 388 389 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 390 return 0; 391 392 return md_dst->u.port_info.port_id; 393 } 394 395 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 396 u16 prod) 397 { 398 /* Sync BD data before updating doorbell */ 399 wmb(); 400 bnxt_db_write(bp, &txr->tx_db, prod); 401 txr->kick_pending = 0; 402 } 403 404 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 405 { 406 struct bnxt *bp = netdev_priv(dev); 407 struct tx_bd *txbd, *txbd0; 408 struct tx_bd_ext *txbd1; 409 struct netdev_queue *txq; 410 int i; 411 dma_addr_t mapping; 412 unsigned int length, pad = 0; 413 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 414 u16 prod, last_frag; 415 struct pci_dev *pdev = bp->pdev; 416 struct bnxt_tx_ring_info *txr; 417 struct bnxt_sw_tx_bd *tx_buf; 418 __le32 lflags = 0; 419 420 i = skb_get_queue_mapping(skb); 421 if (unlikely(i >= bp->tx_nr_rings)) { 422 dev_kfree_skb_any(skb); 423 dev_core_stats_tx_dropped_inc(dev); 424 return NETDEV_TX_OK; 425 } 426 427 txq = netdev_get_tx_queue(dev, i); 428 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 429 prod = txr->tx_prod; 430 431 free_size = bnxt_tx_avail(bp, txr); 432 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 433 /* We must have raced with NAPI cleanup */ 434 if (net_ratelimit() && txr->kick_pending) 435 netif_warn(bp, tx_err, dev, 436 "bnxt: ring busy w/ flush pending!\n"); 437 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 438 bp->tx_wake_thresh)) 439 return NETDEV_TX_BUSY; 440 } 441 442 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 443 goto tx_free; 444 445 length = skb->len; 446 len = skb_headlen(skb); 447 last_frag = skb_shinfo(skb)->nr_frags; 448 449 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 450 451 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 452 tx_buf->skb = skb; 453 tx_buf->nr_frags = last_frag; 454 455 vlan_tag_flags = 0; 456 cfa_action = bnxt_xmit_get_cfa_action(skb); 457 if (skb_vlan_tag_present(skb)) { 458 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 459 skb_vlan_tag_get(skb); 460 /* Currently supports 8021Q, 8021AD vlan offloads 461 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 462 */ 463 if (skb->vlan_proto == htons(ETH_P_8021Q)) 464 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 465 } 466 467 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 468 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 469 470 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && 471 atomic_dec_if_positive(&ptp->tx_avail) >= 0) { 472 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, 473 &ptp->tx_hdr_off)) { 474 if (vlan_tag_flags) 475 ptp->tx_hdr_off += VLAN_HLEN; 476 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 477 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 478 } else { 479 atomic_inc(&bp->ptp_cfg->tx_avail); 480 } 481 } 482 } 483 484 if (unlikely(skb->no_fcs)) 485 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 486 487 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 488 !lflags) { 489 struct tx_push_buffer *tx_push_buf = txr->tx_push; 490 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 491 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 492 void __iomem *db = txr->tx_db.doorbell; 493 void *pdata = tx_push_buf->data; 494 u64 *end; 495 int j, push_len; 496 497 /* Set COAL_NOW to be ready quickly for the next push */ 498 tx_push->tx_bd_len_flags_type = 499 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 500 TX_BD_TYPE_LONG_TX_BD | 501 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 502 TX_BD_FLAGS_COAL_NOW | 503 TX_BD_FLAGS_PACKET_END | 504 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 505 506 if (skb->ip_summed == CHECKSUM_PARTIAL) 507 tx_push1->tx_bd_hsize_lflags = 508 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 509 else 510 tx_push1->tx_bd_hsize_lflags = 0; 511 512 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 513 tx_push1->tx_bd_cfa_action = 514 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 515 516 end = pdata + length; 517 end = PTR_ALIGN(end, 8) - 1; 518 *end = 0; 519 520 skb_copy_from_linear_data(skb, pdata, len); 521 pdata += len; 522 for (j = 0; j < last_frag; j++) { 523 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 524 void *fptr; 525 526 fptr = skb_frag_address_safe(frag); 527 if (!fptr) 528 goto normal_tx; 529 530 memcpy(pdata, fptr, skb_frag_size(frag)); 531 pdata += skb_frag_size(frag); 532 } 533 534 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 535 txbd->tx_bd_haddr = txr->data_mapping; 536 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 537 prod = NEXT_TX(prod); 538 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 539 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 540 memcpy(txbd, tx_push1, sizeof(*txbd)); 541 prod = NEXT_TX(prod); 542 tx_push->doorbell = 543 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 544 DB_RING_IDX(&txr->tx_db, prod)); 545 WRITE_ONCE(txr->tx_prod, prod); 546 547 tx_buf->is_push = 1; 548 netdev_tx_sent_queue(txq, skb->len); 549 wmb(); /* Sync is_push and byte queue before pushing data */ 550 551 push_len = (length + sizeof(*tx_push) + 7) / 8; 552 if (push_len > 16) { 553 __iowrite64_copy(db, tx_push_buf, 16); 554 __iowrite32_copy(db + 4, tx_push_buf + 1, 555 (push_len - 16) << 1); 556 } else { 557 __iowrite64_copy(db, tx_push_buf, push_len); 558 } 559 560 goto tx_done; 561 } 562 563 normal_tx: 564 if (length < BNXT_MIN_PKT_SIZE) { 565 pad = BNXT_MIN_PKT_SIZE - length; 566 if (skb_pad(skb, pad)) 567 /* SKB already freed. */ 568 goto tx_kick_pending; 569 length = BNXT_MIN_PKT_SIZE; 570 } 571 572 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 573 574 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 575 goto tx_free; 576 577 dma_unmap_addr_set(tx_buf, mapping, mapping); 578 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 579 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 580 581 txbd->tx_bd_haddr = cpu_to_le64(mapping); 582 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 583 584 prod = NEXT_TX(prod); 585 txbd1 = (struct tx_bd_ext *) 586 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 587 588 txbd1->tx_bd_hsize_lflags = lflags; 589 if (skb_is_gso(skb)) { 590 u32 hdr_len; 591 592 if (skb->encapsulation) 593 hdr_len = skb_inner_tcp_all_headers(skb); 594 else 595 hdr_len = skb_tcp_all_headers(skb); 596 597 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 598 TX_BD_FLAGS_T_IPID | 599 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 600 length = skb_shinfo(skb)->gso_size; 601 txbd1->tx_bd_mss = cpu_to_le32(length); 602 length += hdr_len; 603 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 604 txbd1->tx_bd_hsize_lflags |= 605 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 606 txbd1->tx_bd_mss = 0; 607 } 608 609 length >>= 9; 610 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 611 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 612 skb->len); 613 i = 0; 614 goto tx_dma_error; 615 } 616 flags |= bnxt_lhint_arr[length]; 617 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 618 619 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 620 txbd1->tx_bd_cfa_action = 621 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 622 txbd0 = txbd; 623 for (i = 0; i < last_frag; i++) { 624 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 625 626 prod = NEXT_TX(prod); 627 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 628 629 len = skb_frag_size(frag); 630 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 631 DMA_TO_DEVICE); 632 633 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 634 goto tx_dma_error; 635 636 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 637 dma_unmap_addr_set(tx_buf, mapping, mapping); 638 639 txbd->tx_bd_haddr = cpu_to_le64(mapping); 640 641 flags = len << TX_BD_LEN_SHIFT; 642 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 643 } 644 645 flags &= ~TX_BD_LEN; 646 txbd->tx_bd_len_flags_type = 647 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 648 TX_BD_FLAGS_PACKET_END); 649 650 netdev_tx_sent_queue(txq, skb->len); 651 652 skb_tx_timestamp(skb); 653 654 prod = NEXT_TX(prod); 655 WRITE_ONCE(txr->tx_prod, prod); 656 657 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 658 bnxt_txr_db_kick(bp, txr, prod); 659 } else { 660 if (free_size >= bp->tx_wake_thresh) 661 txbd0->tx_bd_len_flags_type |= 662 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 663 txr->kick_pending = 1; 664 } 665 666 tx_done: 667 668 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 669 if (netdev_xmit_more() && !tx_buf->is_push) 670 bnxt_txr_db_kick(bp, txr, prod); 671 672 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 673 bp->tx_wake_thresh); 674 } 675 return NETDEV_TX_OK; 676 677 tx_dma_error: 678 if (BNXT_TX_PTP_IS_SET(lflags)) 679 atomic_inc(&bp->ptp_cfg->tx_avail); 680 681 last_frag = i; 682 683 /* start back at beginning and unmap skb */ 684 prod = txr->tx_prod; 685 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 686 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 687 skb_headlen(skb), DMA_TO_DEVICE); 688 prod = NEXT_TX(prod); 689 690 /* unmap remaining mapped pages */ 691 for (i = 0; i < last_frag; i++) { 692 prod = NEXT_TX(prod); 693 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 694 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 695 skb_frag_size(&skb_shinfo(skb)->frags[i]), 696 DMA_TO_DEVICE); 697 } 698 699 tx_free: 700 dev_kfree_skb_any(skb); 701 tx_kick_pending: 702 if (txr->kick_pending) 703 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 704 txr->tx_buf_ring[txr->tx_prod].skb = NULL; 705 dev_core_stats_tx_dropped_inc(dev); 706 return NETDEV_TX_OK; 707 } 708 709 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 710 int budget) 711 { 712 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 713 struct pci_dev *pdev = bp->pdev; 714 u16 hw_cons = txr->tx_hw_cons; 715 unsigned int tx_bytes = 0; 716 u16 cons = txr->tx_cons; 717 int tx_pkts = 0; 718 719 while (RING_TX(bp, cons) != hw_cons) { 720 struct bnxt_sw_tx_bd *tx_buf; 721 struct sk_buff *skb; 722 int j, last; 723 724 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 725 cons = NEXT_TX(cons); 726 skb = tx_buf->skb; 727 tx_buf->skb = NULL; 728 729 if (unlikely(!skb)) { 730 bnxt_sched_reset_txr(bp, txr, cons); 731 return; 732 } 733 734 tx_pkts++; 735 tx_bytes += skb->len; 736 737 if (tx_buf->is_push) { 738 tx_buf->is_push = 0; 739 goto next_tx_int; 740 } 741 742 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 743 skb_headlen(skb), DMA_TO_DEVICE); 744 last = tx_buf->nr_frags; 745 746 for (j = 0; j < last; j++) { 747 cons = NEXT_TX(cons); 748 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 749 dma_unmap_page( 750 &pdev->dev, 751 dma_unmap_addr(tx_buf, mapping), 752 skb_frag_size(&skb_shinfo(skb)->frags[j]), 753 DMA_TO_DEVICE); 754 } 755 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 756 if (BNXT_CHIP_P5(bp)) { 757 /* PTP worker takes ownership of the skb */ 758 if (!bnxt_get_tx_ts_p5(bp, skb)) 759 skb = NULL; 760 else 761 atomic_inc(&bp->ptp_cfg->tx_avail); 762 } 763 } 764 765 next_tx_int: 766 cons = NEXT_TX(cons); 767 768 dev_consume_skb_any(skb); 769 } 770 771 WRITE_ONCE(txr->tx_cons, cons); 772 773 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 774 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 775 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 776 } 777 778 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 779 { 780 struct bnxt_tx_ring_info *txr; 781 int i; 782 783 bnxt_for_each_napi_tx(i, bnapi, txr) { 784 if (txr->tx_hw_cons != txr->tx_cons) 785 __bnxt_tx_int(bp, txr, budget); 786 } 787 bnapi->events &= ~BNXT_TX_CMP_EVENT; 788 } 789 790 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 791 struct bnxt_rx_ring_info *rxr, 792 unsigned int *offset, 793 gfp_t gfp) 794 { 795 struct page *page; 796 797 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 798 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 799 BNXT_RX_PAGE_SIZE); 800 } else { 801 page = page_pool_dev_alloc_pages(rxr->page_pool); 802 *offset = 0; 803 } 804 if (!page) 805 return NULL; 806 807 *mapping = page_pool_get_dma_addr(page) + *offset; 808 return page; 809 } 810 811 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 812 gfp_t gfp) 813 { 814 u8 *data; 815 struct pci_dev *pdev = bp->pdev; 816 817 if (gfp == GFP_ATOMIC) 818 data = napi_alloc_frag(bp->rx_buf_size); 819 else 820 data = netdev_alloc_frag(bp->rx_buf_size); 821 if (!data) 822 return NULL; 823 824 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 825 bp->rx_buf_use_size, bp->rx_dir, 826 DMA_ATTR_WEAK_ORDERING); 827 828 if (dma_mapping_error(&pdev->dev, *mapping)) { 829 skb_free_frag(data); 830 data = NULL; 831 } 832 return data; 833 } 834 835 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 836 u16 prod, gfp_t gfp) 837 { 838 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 839 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 840 dma_addr_t mapping; 841 842 if (BNXT_RX_PAGE_MODE(bp)) { 843 unsigned int offset; 844 struct page *page = 845 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 846 847 if (!page) 848 return -ENOMEM; 849 850 mapping += bp->rx_dma_offset; 851 rx_buf->data = page; 852 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 853 } else { 854 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); 855 856 if (!data) 857 return -ENOMEM; 858 859 rx_buf->data = data; 860 rx_buf->data_ptr = data + bp->rx_offset; 861 } 862 rx_buf->mapping = mapping; 863 864 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 865 return 0; 866 } 867 868 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 869 { 870 u16 prod = rxr->rx_prod; 871 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 872 struct bnxt *bp = rxr->bnapi->bp; 873 struct rx_bd *cons_bd, *prod_bd; 874 875 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 876 cons_rx_buf = &rxr->rx_buf_ring[cons]; 877 878 prod_rx_buf->data = data; 879 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 880 881 prod_rx_buf->mapping = cons_rx_buf->mapping; 882 883 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 884 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 885 886 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 887 } 888 889 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 890 { 891 u16 next, max = rxr->rx_agg_bmap_size; 892 893 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 894 if (next >= max) 895 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 896 return next; 897 } 898 899 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 900 struct bnxt_rx_ring_info *rxr, 901 u16 prod, gfp_t gfp) 902 { 903 struct rx_bd *rxbd = 904 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 905 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 906 struct page *page; 907 dma_addr_t mapping; 908 u16 sw_prod = rxr->rx_sw_agg_prod; 909 unsigned int offset = 0; 910 911 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 912 913 if (!page) 914 return -ENOMEM; 915 916 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 917 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 918 919 __set_bit(sw_prod, rxr->rx_agg_bmap); 920 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 921 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 922 923 rx_agg_buf->page = page; 924 rx_agg_buf->offset = offset; 925 rx_agg_buf->mapping = mapping; 926 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 927 rxbd->rx_bd_opaque = sw_prod; 928 return 0; 929 } 930 931 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 932 struct bnxt_cp_ring_info *cpr, 933 u16 cp_cons, u16 curr) 934 { 935 struct rx_agg_cmp *agg; 936 937 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 938 agg = (struct rx_agg_cmp *) 939 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 940 return agg; 941 } 942 943 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 944 struct bnxt_rx_ring_info *rxr, 945 u16 agg_id, u16 curr) 946 { 947 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 948 949 return &tpa_info->agg_arr[curr]; 950 } 951 952 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 953 u16 start, u32 agg_bufs, bool tpa) 954 { 955 struct bnxt_napi *bnapi = cpr->bnapi; 956 struct bnxt *bp = bnapi->bp; 957 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 958 u16 prod = rxr->rx_agg_prod; 959 u16 sw_prod = rxr->rx_sw_agg_prod; 960 bool p5_tpa = false; 961 u32 i; 962 963 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 964 p5_tpa = true; 965 966 for (i = 0; i < agg_bufs; i++) { 967 u16 cons; 968 struct rx_agg_cmp *agg; 969 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 970 struct rx_bd *prod_bd; 971 struct page *page; 972 973 if (p5_tpa) 974 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 975 else 976 agg = bnxt_get_agg(bp, cpr, idx, start + i); 977 cons = agg->rx_agg_cmp_opaque; 978 __clear_bit(cons, rxr->rx_agg_bmap); 979 980 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 981 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 982 983 __set_bit(sw_prod, rxr->rx_agg_bmap); 984 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 985 cons_rx_buf = &rxr->rx_agg_ring[cons]; 986 987 /* It is possible for sw_prod to be equal to cons, so 988 * set cons_rx_buf->page to NULL first. 989 */ 990 page = cons_rx_buf->page; 991 cons_rx_buf->page = NULL; 992 prod_rx_buf->page = page; 993 prod_rx_buf->offset = cons_rx_buf->offset; 994 995 prod_rx_buf->mapping = cons_rx_buf->mapping; 996 997 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 998 999 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 1000 prod_bd->rx_bd_opaque = sw_prod; 1001 1002 prod = NEXT_RX_AGG(prod); 1003 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1004 } 1005 rxr->rx_agg_prod = prod; 1006 rxr->rx_sw_agg_prod = sw_prod; 1007 } 1008 1009 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 1010 struct bnxt_rx_ring_info *rxr, 1011 u16 cons, void *data, u8 *data_ptr, 1012 dma_addr_t dma_addr, 1013 unsigned int offset_and_len) 1014 { 1015 unsigned int len = offset_and_len & 0xffff; 1016 struct page *page = data; 1017 u16 prod = rxr->rx_prod; 1018 struct sk_buff *skb; 1019 int err; 1020 1021 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1022 if (unlikely(err)) { 1023 bnxt_reuse_rx_data(rxr, cons, data); 1024 return NULL; 1025 } 1026 dma_addr -= bp->rx_dma_offset; 1027 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1028 bp->rx_dir); 1029 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1030 if (!skb) { 1031 page_pool_recycle_direct(rxr->page_pool, page); 1032 return NULL; 1033 } 1034 skb_mark_for_recycle(skb); 1035 skb_reserve(skb, bp->rx_offset); 1036 __skb_put(skb, len); 1037 1038 return skb; 1039 } 1040 1041 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1042 struct bnxt_rx_ring_info *rxr, 1043 u16 cons, void *data, u8 *data_ptr, 1044 dma_addr_t dma_addr, 1045 unsigned int offset_and_len) 1046 { 1047 unsigned int payload = offset_and_len >> 16; 1048 unsigned int len = offset_and_len & 0xffff; 1049 skb_frag_t *frag; 1050 struct page *page = data; 1051 u16 prod = rxr->rx_prod; 1052 struct sk_buff *skb; 1053 int off, err; 1054 1055 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1056 if (unlikely(err)) { 1057 bnxt_reuse_rx_data(rxr, cons, data); 1058 return NULL; 1059 } 1060 dma_addr -= bp->rx_dma_offset; 1061 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1062 bp->rx_dir); 1063 1064 if (unlikely(!payload)) 1065 payload = eth_get_headlen(bp->dev, data_ptr, len); 1066 1067 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1068 if (!skb) { 1069 page_pool_recycle_direct(rxr->page_pool, page); 1070 return NULL; 1071 } 1072 1073 skb_mark_for_recycle(skb); 1074 off = (void *)data_ptr - page_address(page); 1075 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1076 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1077 payload + NET_IP_ALIGN); 1078 1079 frag = &skb_shinfo(skb)->frags[0]; 1080 skb_frag_size_sub(frag, payload); 1081 skb_frag_off_add(frag, payload); 1082 skb->data_len -= payload; 1083 skb->tail += payload; 1084 1085 return skb; 1086 } 1087 1088 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1089 struct bnxt_rx_ring_info *rxr, u16 cons, 1090 void *data, u8 *data_ptr, 1091 dma_addr_t dma_addr, 1092 unsigned int offset_and_len) 1093 { 1094 u16 prod = rxr->rx_prod; 1095 struct sk_buff *skb; 1096 int err; 1097 1098 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1099 if (unlikely(err)) { 1100 bnxt_reuse_rx_data(rxr, cons, data); 1101 return NULL; 1102 } 1103 1104 skb = napi_build_skb(data, bp->rx_buf_size); 1105 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1106 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 1107 if (!skb) { 1108 skb_free_frag(data); 1109 return NULL; 1110 } 1111 1112 skb_reserve(skb, bp->rx_offset); 1113 skb_put(skb, offset_and_len & 0xffff); 1114 return skb; 1115 } 1116 1117 static u32 __bnxt_rx_agg_pages(struct bnxt *bp, 1118 struct bnxt_cp_ring_info *cpr, 1119 struct skb_shared_info *shinfo, 1120 u16 idx, u32 agg_bufs, bool tpa, 1121 struct xdp_buff *xdp) 1122 { 1123 struct bnxt_napi *bnapi = cpr->bnapi; 1124 struct pci_dev *pdev = bp->pdev; 1125 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1126 u16 prod = rxr->rx_agg_prod; 1127 u32 i, total_frag_len = 0; 1128 bool p5_tpa = false; 1129 1130 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1131 p5_tpa = true; 1132 1133 for (i = 0; i < agg_bufs; i++) { 1134 skb_frag_t *frag = &shinfo->frags[i]; 1135 u16 cons, frag_len; 1136 struct rx_agg_cmp *agg; 1137 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1138 struct page *page; 1139 dma_addr_t mapping; 1140 1141 if (p5_tpa) 1142 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1143 else 1144 agg = bnxt_get_agg(bp, cpr, idx, i); 1145 cons = agg->rx_agg_cmp_opaque; 1146 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1147 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1148 1149 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1150 skb_frag_fill_page_desc(frag, cons_rx_buf->page, 1151 cons_rx_buf->offset, frag_len); 1152 shinfo->nr_frags = i + 1; 1153 __clear_bit(cons, rxr->rx_agg_bmap); 1154 1155 /* It is possible for bnxt_alloc_rx_page() to allocate 1156 * a sw_prod index that equals the cons index, so we 1157 * need to clear the cons entry now. 1158 */ 1159 mapping = cons_rx_buf->mapping; 1160 page = cons_rx_buf->page; 1161 cons_rx_buf->page = NULL; 1162 1163 if (xdp && page_is_pfmemalloc(page)) 1164 xdp_buff_set_frag_pfmemalloc(xdp); 1165 1166 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1167 --shinfo->nr_frags; 1168 cons_rx_buf->page = page; 1169 1170 /* Update prod since possibly some pages have been 1171 * allocated already. 1172 */ 1173 rxr->rx_agg_prod = prod; 1174 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1175 return 0; 1176 } 1177 1178 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1179 bp->rx_dir); 1180 1181 total_frag_len += frag_len; 1182 prod = NEXT_RX_AGG(prod); 1183 } 1184 rxr->rx_agg_prod = prod; 1185 return total_frag_len; 1186 } 1187 1188 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, 1189 struct bnxt_cp_ring_info *cpr, 1190 struct sk_buff *skb, u16 idx, 1191 u32 agg_bufs, bool tpa) 1192 { 1193 struct skb_shared_info *shinfo = skb_shinfo(skb); 1194 u32 total_frag_len = 0; 1195 1196 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, 1197 agg_bufs, tpa, NULL); 1198 if (!total_frag_len) { 1199 skb_mark_for_recycle(skb); 1200 dev_kfree_skb(skb); 1201 return NULL; 1202 } 1203 1204 skb->data_len += total_frag_len; 1205 skb->len += total_frag_len; 1206 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; 1207 return skb; 1208 } 1209 1210 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, 1211 struct bnxt_cp_ring_info *cpr, 1212 struct xdp_buff *xdp, u16 idx, 1213 u32 agg_bufs, bool tpa) 1214 { 1215 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1216 u32 total_frag_len = 0; 1217 1218 if (!xdp_buff_has_frags(xdp)) 1219 shinfo->nr_frags = 0; 1220 1221 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, 1222 idx, agg_bufs, tpa, xdp); 1223 if (total_frag_len) { 1224 xdp_buff_set_frags_flag(xdp); 1225 shinfo->nr_frags = agg_bufs; 1226 shinfo->xdp_frags_size = total_frag_len; 1227 } 1228 return total_frag_len; 1229 } 1230 1231 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1232 u8 agg_bufs, u32 *raw_cons) 1233 { 1234 u16 last; 1235 struct rx_agg_cmp *agg; 1236 1237 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1238 last = RING_CMP(*raw_cons); 1239 agg = (struct rx_agg_cmp *) 1240 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1241 return RX_AGG_CMP_VALID(agg, *raw_cons); 1242 } 1243 1244 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1245 unsigned int len, 1246 dma_addr_t mapping) 1247 { 1248 struct bnxt *bp = bnapi->bp; 1249 struct pci_dev *pdev = bp->pdev; 1250 struct sk_buff *skb; 1251 1252 skb = napi_alloc_skb(&bnapi->napi, len); 1253 if (!skb) 1254 return NULL; 1255 1256 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1257 bp->rx_dir); 1258 1259 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1260 len + NET_IP_ALIGN); 1261 1262 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1263 bp->rx_dir); 1264 1265 skb_put(skb, len); 1266 return skb; 1267 } 1268 1269 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1270 u32 *raw_cons, void *cmp) 1271 { 1272 struct rx_cmp *rxcmp = cmp; 1273 u32 tmp_raw_cons = *raw_cons; 1274 u8 cmp_type, agg_bufs = 0; 1275 1276 cmp_type = RX_CMP_TYPE(rxcmp); 1277 1278 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1279 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1280 RX_CMP_AGG_BUFS) >> 1281 RX_CMP_AGG_BUFS_SHIFT; 1282 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1283 struct rx_tpa_end_cmp *tpa_end = cmp; 1284 1285 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1286 return 0; 1287 1288 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1289 } 1290 1291 if (agg_bufs) { 1292 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1293 return -EBUSY; 1294 } 1295 *raw_cons = tmp_raw_cons; 1296 return 0; 1297 } 1298 1299 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1300 { 1301 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1302 u16 idx = agg_id & MAX_TPA_P5_MASK; 1303 1304 if (test_bit(idx, map->agg_idx_bmap)) 1305 idx = find_first_zero_bit(map->agg_idx_bmap, 1306 BNXT_AGG_IDX_BMAP_SIZE); 1307 __set_bit(idx, map->agg_idx_bmap); 1308 map->agg_id_tbl[agg_id] = idx; 1309 return idx; 1310 } 1311 1312 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1313 { 1314 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1315 1316 __clear_bit(idx, map->agg_idx_bmap); 1317 } 1318 1319 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1320 { 1321 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1322 1323 return map->agg_id_tbl[agg_id]; 1324 } 1325 1326 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, 1327 struct rx_tpa_start_cmp *tpa_start, 1328 struct rx_tpa_start_cmp_ext *tpa_start1) 1329 { 1330 tpa_info->cfa_code_valid = 1; 1331 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1332 tpa_info->vlan_valid = 0; 1333 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1334 tpa_info->vlan_valid = 1; 1335 tpa_info->metadata = 1336 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1337 } 1338 } 1339 1340 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, 1341 struct rx_tpa_start_cmp *tpa_start, 1342 struct rx_tpa_start_cmp_ext *tpa_start1) 1343 { 1344 tpa_info->vlan_valid = 0; 1345 if (TPA_START_VLAN_VALID(tpa_start)) { 1346 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 1347 u32 vlan_proto = ETH_P_8021Q; 1348 1349 tpa_info->vlan_valid = 1; 1350 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 1351 vlan_proto = ETH_P_8021AD; 1352 tpa_info->metadata = vlan_proto << 16 | 1353 TPA_START_METADATA0_TCI(tpa_start1); 1354 } 1355 } 1356 1357 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1358 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 1359 struct rx_tpa_start_cmp_ext *tpa_start1) 1360 { 1361 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1362 struct bnxt_tpa_info *tpa_info; 1363 u16 cons, prod, agg_id; 1364 struct rx_bd *prod_bd; 1365 dma_addr_t mapping; 1366 1367 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1368 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1369 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1370 } else { 1371 agg_id = TPA_START_AGG_ID(tpa_start); 1372 } 1373 cons = tpa_start->rx_tpa_start_cmp_opaque; 1374 prod = rxr->rx_prod; 1375 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1376 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1377 tpa_info = &rxr->rx_tpa[agg_id]; 1378 1379 if (unlikely(cons != rxr->rx_next_cons || 1380 TPA_START_ERROR(tpa_start))) { 1381 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1382 cons, rxr->rx_next_cons, 1383 TPA_START_ERROR_CODE(tpa_start1)); 1384 bnxt_sched_reset_rxr(bp, rxr); 1385 return; 1386 } 1387 prod_rx_buf->data = tpa_info->data; 1388 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1389 1390 mapping = tpa_info->mapping; 1391 prod_rx_buf->mapping = mapping; 1392 1393 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1394 1395 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1396 1397 tpa_info->data = cons_rx_buf->data; 1398 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1399 cons_rx_buf->data = NULL; 1400 tpa_info->mapping = cons_rx_buf->mapping; 1401 1402 tpa_info->len = 1403 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1404 RX_TPA_START_CMP_LEN_SHIFT; 1405 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1406 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1407 tpa_info->gso_type = SKB_GSO_TCPV4; 1408 if (TPA_START_IS_IPV6(tpa_start1)) 1409 tpa_info->gso_type = SKB_GSO_TCPV6; 1410 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1411 else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && 1412 TPA_START_HASH_TYPE(tpa_start) == 3) 1413 tpa_info->gso_type = SKB_GSO_TCPV6; 1414 tpa_info->rss_hash = 1415 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1416 } else { 1417 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1418 tpa_info->gso_type = 0; 1419 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1420 } 1421 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1422 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1423 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 1424 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); 1425 else 1426 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 1427 tpa_info->agg_count = 0; 1428 1429 rxr->rx_prod = NEXT_RX(prod); 1430 cons = RING_RX(bp, NEXT_RX(cons)); 1431 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1432 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1433 1434 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1435 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1436 cons_rx_buf->data = NULL; 1437 } 1438 1439 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1440 { 1441 if (agg_bufs) 1442 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1443 } 1444 1445 #ifdef CONFIG_INET 1446 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1447 { 1448 struct udphdr *uh = NULL; 1449 1450 if (ip_proto == htons(ETH_P_IP)) { 1451 struct iphdr *iph = (struct iphdr *)skb->data; 1452 1453 if (iph->protocol == IPPROTO_UDP) 1454 uh = (struct udphdr *)(iph + 1); 1455 } else { 1456 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1457 1458 if (iph->nexthdr == IPPROTO_UDP) 1459 uh = (struct udphdr *)(iph + 1); 1460 } 1461 if (uh) { 1462 if (uh->check) 1463 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1464 else 1465 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1466 } 1467 } 1468 #endif 1469 1470 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1471 int payload_off, int tcp_ts, 1472 struct sk_buff *skb) 1473 { 1474 #ifdef CONFIG_INET 1475 struct tcphdr *th; 1476 int len, nw_off; 1477 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1478 u32 hdr_info = tpa_info->hdr_info; 1479 bool loopback = false; 1480 1481 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1482 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1483 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1484 1485 /* If the packet is an internal loopback packet, the offsets will 1486 * have an extra 4 bytes. 1487 */ 1488 if (inner_mac_off == 4) { 1489 loopback = true; 1490 } else if (inner_mac_off > 4) { 1491 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1492 ETH_HLEN - 2)); 1493 1494 /* We only support inner iPv4/ipv6. If we don't see the 1495 * correct protocol ID, it must be a loopback packet where 1496 * the offsets are off by 4. 1497 */ 1498 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1499 loopback = true; 1500 } 1501 if (loopback) { 1502 /* internal loopback packet, subtract all offsets by 4 */ 1503 inner_ip_off -= 4; 1504 inner_mac_off -= 4; 1505 outer_ip_off -= 4; 1506 } 1507 1508 nw_off = inner_ip_off - ETH_HLEN; 1509 skb_set_network_header(skb, nw_off); 1510 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1511 struct ipv6hdr *iph = ipv6_hdr(skb); 1512 1513 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1514 len = skb->len - skb_transport_offset(skb); 1515 th = tcp_hdr(skb); 1516 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1517 } else { 1518 struct iphdr *iph = ip_hdr(skb); 1519 1520 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1521 len = skb->len - skb_transport_offset(skb); 1522 th = tcp_hdr(skb); 1523 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1524 } 1525 1526 if (inner_mac_off) { /* tunnel */ 1527 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1528 ETH_HLEN - 2)); 1529 1530 bnxt_gro_tunnel(skb, proto); 1531 } 1532 #endif 1533 return skb; 1534 } 1535 1536 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1537 int payload_off, int tcp_ts, 1538 struct sk_buff *skb) 1539 { 1540 #ifdef CONFIG_INET 1541 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1542 u32 hdr_info = tpa_info->hdr_info; 1543 int iphdr_len, nw_off; 1544 1545 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1546 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1547 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1548 1549 nw_off = inner_ip_off - ETH_HLEN; 1550 skb_set_network_header(skb, nw_off); 1551 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1552 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1553 skb_set_transport_header(skb, nw_off + iphdr_len); 1554 1555 if (inner_mac_off) { /* tunnel */ 1556 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1557 ETH_HLEN - 2)); 1558 1559 bnxt_gro_tunnel(skb, proto); 1560 } 1561 #endif 1562 return skb; 1563 } 1564 1565 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1566 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1567 1568 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1569 int payload_off, int tcp_ts, 1570 struct sk_buff *skb) 1571 { 1572 #ifdef CONFIG_INET 1573 struct tcphdr *th; 1574 int len, nw_off, tcp_opt_len = 0; 1575 1576 if (tcp_ts) 1577 tcp_opt_len = 12; 1578 1579 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1580 struct iphdr *iph; 1581 1582 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1583 ETH_HLEN; 1584 skb_set_network_header(skb, nw_off); 1585 iph = ip_hdr(skb); 1586 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1587 len = skb->len - skb_transport_offset(skb); 1588 th = tcp_hdr(skb); 1589 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1590 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1591 struct ipv6hdr *iph; 1592 1593 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1594 ETH_HLEN; 1595 skb_set_network_header(skb, nw_off); 1596 iph = ipv6_hdr(skb); 1597 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1598 len = skb->len - skb_transport_offset(skb); 1599 th = tcp_hdr(skb); 1600 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1601 } else { 1602 dev_kfree_skb_any(skb); 1603 return NULL; 1604 } 1605 1606 if (nw_off) /* tunnel */ 1607 bnxt_gro_tunnel(skb, skb->protocol); 1608 #endif 1609 return skb; 1610 } 1611 1612 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1613 struct bnxt_tpa_info *tpa_info, 1614 struct rx_tpa_end_cmp *tpa_end, 1615 struct rx_tpa_end_cmp_ext *tpa_end1, 1616 struct sk_buff *skb) 1617 { 1618 #ifdef CONFIG_INET 1619 int payload_off; 1620 u16 segs; 1621 1622 segs = TPA_END_TPA_SEGS(tpa_end); 1623 if (segs == 1) 1624 return skb; 1625 1626 NAPI_GRO_CB(skb)->count = segs; 1627 skb_shinfo(skb)->gso_size = 1628 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1629 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1630 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1631 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1632 else 1633 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1634 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1635 if (likely(skb)) 1636 tcp_gro_complete(skb); 1637 #endif 1638 return skb; 1639 } 1640 1641 /* Given the cfa_code of a received packet determine which 1642 * netdev (vf-rep or PF) the packet is destined to. 1643 */ 1644 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1645 { 1646 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1647 1648 /* if vf-rep dev is NULL, the must belongs to the PF */ 1649 return dev ? dev : bp->dev; 1650 } 1651 1652 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1653 struct bnxt_cp_ring_info *cpr, 1654 u32 *raw_cons, 1655 struct rx_tpa_end_cmp *tpa_end, 1656 struct rx_tpa_end_cmp_ext *tpa_end1, 1657 u8 *event) 1658 { 1659 struct bnxt_napi *bnapi = cpr->bnapi; 1660 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1661 struct net_device *dev = bp->dev; 1662 u8 *data_ptr, agg_bufs; 1663 unsigned int len; 1664 struct bnxt_tpa_info *tpa_info; 1665 dma_addr_t mapping; 1666 struct sk_buff *skb; 1667 u16 idx = 0, agg_id; 1668 void *data; 1669 bool gro; 1670 1671 if (unlikely(bnapi->in_reset)) { 1672 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1673 1674 if (rc < 0) 1675 return ERR_PTR(-EBUSY); 1676 return NULL; 1677 } 1678 1679 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1680 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1681 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1682 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1683 tpa_info = &rxr->rx_tpa[agg_id]; 1684 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1685 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1686 agg_bufs, tpa_info->agg_count); 1687 agg_bufs = tpa_info->agg_count; 1688 } 1689 tpa_info->agg_count = 0; 1690 *event |= BNXT_AGG_EVENT; 1691 bnxt_free_agg_idx(rxr, agg_id); 1692 idx = agg_id; 1693 gro = !!(bp->flags & BNXT_FLAG_GRO); 1694 } else { 1695 agg_id = TPA_END_AGG_ID(tpa_end); 1696 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1697 tpa_info = &rxr->rx_tpa[agg_id]; 1698 idx = RING_CMP(*raw_cons); 1699 if (agg_bufs) { 1700 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1701 return ERR_PTR(-EBUSY); 1702 1703 *event |= BNXT_AGG_EVENT; 1704 idx = NEXT_CMP(idx); 1705 } 1706 gro = !!TPA_END_GRO(tpa_end); 1707 } 1708 data = tpa_info->data; 1709 data_ptr = tpa_info->data_ptr; 1710 prefetch(data_ptr); 1711 len = tpa_info->len; 1712 mapping = tpa_info->mapping; 1713 1714 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1715 bnxt_abort_tpa(cpr, idx, agg_bufs); 1716 if (agg_bufs > MAX_SKB_FRAGS) 1717 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1718 agg_bufs, (int)MAX_SKB_FRAGS); 1719 return NULL; 1720 } 1721 1722 if (len <= bp->rx_copy_thresh) { 1723 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1724 if (!skb) { 1725 bnxt_abort_tpa(cpr, idx, agg_bufs); 1726 cpr->sw_stats.rx.rx_oom_discards += 1; 1727 return NULL; 1728 } 1729 } else { 1730 u8 *new_data; 1731 dma_addr_t new_mapping; 1732 1733 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); 1734 if (!new_data) { 1735 bnxt_abort_tpa(cpr, idx, agg_bufs); 1736 cpr->sw_stats.rx.rx_oom_discards += 1; 1737 return NULL; 1738 } 1739 1740 tpa_info->data = new_data; 1741 tpa_info->data_ptr = new_data + bp->rx_offset; 1742 tpa_info->mapping = new_mapping; 1743 1744 skb = napi_build_skb(data, bp->rx_buf_size); 1745 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1746 bp->rx_buf_use_size, bp->rx_dir, 1747 DMA_ATTR_WEAK_ORDERING); 1748 1749 if (!skb) { 1750 skb_free_frag(data); 1751 bnxt_abort_tpa(cpr, idx, agg_bufs); 1752 cpr->sw_stats.rx.rx_oom_discards += 1; 1753 return NULL; 1754 } 1755 skb_reserve(skb, bp->rx_offset); 1756 skb_put(skb, len); 1757 } 1758 1759 if (agg_bufs) { 1760 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); 1761 if (!skb) { 1762 /* Page reuse already handled by bnxt_rx_pages(). */ 1763 cpr->sw_stats.rx.rx_oom_discards += 1; 1764 return NULL; 1765 } 1766 } 1767 1768 if (tpa_info->cfa_code_valid) 1769 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); 1770 skb->protocol = eth_type_trans(skb, dev); 1771 1772 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1773 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1774 1775 if (tpa_info->vlan_valid && 1776 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1777 __be16 vlan_proto = htons(tpa_info->metadata >> 1778 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1779 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1780 1781 if (eth_type_vlan(vlan_proto)) { 1782 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1783 } else { 1784 dev_kfree_skb(skb); 1785 return NULL; 1786 } 1787 } 1788 1789 skb_checksum_none_assert(skb); 1790 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1791 skb->ip_summed = CHECKSUM_UNNECESSARY; 1792 skb->csum_level = 1793 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1794 } 1795 1796 if (gro) 1797 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1798 1799 return skb; 1800 } 1801 1802 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1803 struct rx_agg_cmp *rx_agg) 1804 { 1805 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1806 struct bnxt_tpa_info *tpa_info; 1807 1808 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1809 tpa_info = &rxr->rx_tpa[agg_id]; 1810 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1811 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1812 } 1813 1814 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1815 struct sk_buff *skb) 1816 { 1817 if (skb->dev != bp->dev) { 1818 /* this packet belongs to a vf-rep */ 1819 bnxt_vf_rep_rx(bp, skb); 1820 return; 1821 } 1822 skb_record_rx_queue(skb, bnapi->index); 1823 skb_mark_for_recycle(skb); 1824 napi_gro_receive(&bnapi->napi, skb); 1825 } 1826 1827 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, 1828 struct rx_cmp *rxcmp, 1829 struct rx_cmp_ext *rxcmp1) 1830 { 1831 __be16 vlan_proto; 1832 u16 vtag; 1833 1834 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1835 __le32 flags2 = rxcmp1->rx_cmp_flags2; 1836 u32 meta_data; 1837 1838 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 1839 return skb; 1840 1841 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1842 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1843 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 1844 if (eth_type_vlan(vlan_proto)) 1845 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1846 else 1847 goto vlan_err; 1848 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 1849 if (RX_CMP_VLAN_VALID(rxcmp)) { 1850 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 1851 1852 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 1853 vlan_proto = htons(ETH_P_8021Q); 1854 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 1855 vlan_proto = htons(ETH_P_8021AD); 1856 else 1857 goto vlan_err; 1858 vtag = RX_CMP_METADATA0_TCI(rxcmp1); 1859 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1860 } 1861 } 1862 return skb; 1863 vlan_err: 1864 dev_kfree_skb(skb); 1865 return NULL; 1866 } 1867 1868 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, 1869 struct rx_cmp *rxcmp) 1870 { 1871 u8 ext_op; 1872 1873 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); 1874 switch (ext_op) { 1875 case EXT_OP_INNER_4: 1876 case EXT_OP_OUTER_4: 1877 case EXT_OP_INNFL_3: 1878 case EXT_OP_OUTFL_3: 1879 return PKT_HASH_TYPE_L4; 1880 default: 1881 return PKT_HASH_TYPE_L3; 1882 } 1883 } 1884 1885 /* returns the following: 1886 * 1 - 1 packet successfully received 1887 * 0 - successful TPA_START, packet not completed yet 1888 * -EBUSY - completion ring does not have all the agg buffers yet 1889 * -ENOMEM - packet aborted due to out of memory 1890 * -EIO - packet aborted due to hw error indicated in BD 1891 */ 1892 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1893 u32 *raw_cons, u8 *event) 1894 { 1895 struct bnxt_napi *bnapi = cpr->bnapi; 1896 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1897 struct net_device *dev = bp->dev; 1898 struct rx_cmp *rxcmp; 1899 struct rx_cmp_ext *rxcmp1; 1900 u32 tmp_raw_cons = *raw_cons; 1901 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1902 struct bnxt_sw_rx_bd *rx_buf; 1903 unsigned int len; 1904 u8 *data_ptr, agg_bufs, cmp_type; 1905 bool xdp_active = false; 1906 dma_addr_t dma_addr; 1907 struct sk_buff *skb; 1908 struct xdp_buff xdp; 1909 u32 flags, misc; 1910 void *data; 1911 int rc = 0; 1912 1913 rxcmp = (struct rx_cmp *) 1914 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1915 1916 cmp_type = RX_CMP_TYPE(rxcmp); 1917 1918 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1919 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1920 goto next_rx_no_prod_no_len; 1921 } 1922 1923 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1924 cp_cons = RING_CMP(tmp_raw_cons); 1925 rxcmp1 = (struct rx_cmp_ext *) 1926 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1927 1928 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1929 return -EBUSY; 1930 1931 /* The valid test of the entry must be done first before 1932 * reading any further. 1933 */ 1934 dma_rmb(); 1935 prod = rxr->rx_prod; 1936 1937 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 1938 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 1939 bnxt_tpa_start(bp, rxr, cmp_type, 1940 (struct rx_tpa_start_cmp *)rxcmp, 1941 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1942 1943 *event |= BNXT_RX_EVENT; 1944 goto next_rx_no_prod_no_len; 1945 1946 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1947 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1948 (struct rx_tpa_end_cmp *)rxcmp, 1949 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1950 1951 if (IS_ERR(skb)) 1952 return -EBUSY; 1953 1954 rc = -ENOMEM; 1955 if (likely(skb)) { 1956 bnxt_deliver_skb(bp, bnapi, skb); 1957 rc = 1; 1958 } 1959 *event |= BNXT_RX_EVENT; 1960 goto next_rx_no_prod_no_len; 1961 } 1962 1963 cons = rxcmp->rx_cmp_opaque; 1964 if (unlikely(cons != rxr->rx_next_cons)) { 1965 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 1966 1967 /* 0xffff is forced error, don't print it */ 1968 if (rxr->rx_next_cons != 0xffff) 1969 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1970 cons, rxr->rx_next_cons); 1971 bnxt_sched_reset_rxr(bp, rxr); 1972 if (rc1) 1973 return rc1; 1974 goto next_rx_no_prod_no_len; 1975 } 1976 rx_buf = &rxr->rx_buf_ring[cons]; 1977 data = rx_buf->data; 1978 data_ptr = rx_buf->data_ptr; 1979 prefetch(data_ptr); 1980 1981 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1982 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1983 1984 if (agg_bufs) { 1985 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1986 return -EBUSY; 1987 1988 cp_cons = NEXT_CMP(cp_cons); 1989 *event |= BNXT_AGG_EVENT; 1990 } 1991 *event |= BNXT_RX_EVENT; 1992 1993 rx_buf->data = NULL; 1994 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1995 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1996 1997 bnxt_reuse_rx_data(rxr, cons, data); 1998 if (agg_bufs) 1999 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 2000 false); 2001 2002 rc = -EIO; 2003 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 2004 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; 2005 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 2006 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 2007 netdev_warn_once(bp->dev, "RX buffer error %x\n", 2008 rx_err); 2009 bnxt_sched_reset_rxr(bp, rxr); 2010 } 2011 } 2012 goto next_rx_no_len; 2013 } 2014 2015 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 2016 len = flags >> RX_CMP_LEN_SHIFT; 2017 dma_addr = rx_buf->mapping; 2018 2019 if (bnxt_xdp_attached(bp, rxr)) { 2020 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 2021 if (agg_bufs) { 2022 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, 2023 cp_cons, agg_bufs, 2024 false); 2025 if (!frag_len) { 2026 cpr->sw_stats.rx.rx_oom_discards += 1; 2027 rc = -ENOMEM; 2028 goto next_rx; 2029 } 2030 } 2031 xdp_active = true; 2032 } 2033 2034 if (xdp_active) { 2035 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { 2036 rc = 1; 2037 goto next_rx; 2038 } 2039 } 2040 2041 if (len <= bp->rx_copy_thresh) { 2042 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 2043 bnxt_reuse_rx_data(rxr, cons, data); 2044 if (!skb) { 2045 if (agg_bufs) { 2046 if (!xdp_active) 2047 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 2048 agg_bufs, false); 2049 else 2050 bnxt_xdp_buff_frags_free(rxr, &xdp); 2051 } 2052 cpr->sw_stats.rx.rx_oom_discards += 1; 2053 rc = -ENOMEM; 2054 goto next_rx; 2055 } 2056 } else { 2057 u32 payload; 2058 2059 if (rx_buf->data_ptr == data_ptr) 2060 payload = misc & RX_CMP_PAYLOAD_OFFSET; 2061 else 2062 payload = 0; 2063 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 2064 payload | len); 2065 if (!skb) { 2066 cpr->sw_stats.rx.rx_oom_discards += 1; 2067 rc = -ENOMEM; 2068 goto next_rx; 2069 } 2070 } 2071 2072 if (agg_bufs) { 2073 if (!xdp_active) { 2074 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); 2075 if (!skb) { 2076 cpr->sw_stats.rx.rx_oom_discards += 1; 2077 rc = -ENOMEM; 2078 goto next_rx; 2079 } 2080 } else { 2081 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); 2082 if (!skb) { 2083 /* we should be able to free the old skb here */ 2084 bnxt_xdp_buff_frags_free(rxr, &xdp); 2085 cpr->sw_stats.rx.rx_oom_discards += 1; 2086 rc = -ENOMEM; 2087 goto next_rx; 2088 } 2089 } 2090 } 2091 2092 if (RX_CMP_HASH_VALID(rxcmp)) { 2093 enum pkt_hash_types type; 2094 2095 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2096 type = bnxt_rss_ext_op(bp, rxcmp); 2097 } else { 2098 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 2099 2100 /* RSS profiles 1 and 3 with extract code 0 for inner 2101 * 4-tuple 2102 */ 2103 if (hash_type != 1 && hash_type != 3) 2104 type = PKT_HASH_TYPE_L3; 2105 else 2106 type = PKT_HASH_TYPE_L4; 2107 } 2108 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 2109 } 2110 2111 if (cmp_type == CMP_TYPE_RX_L2_CMP) 2112 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); 2113 skb->protocol = eth_type_trans(skb, dev); 2114 2115 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { 2116 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 2117 if (!skb) 2118 goto next_rx; 2119 } 2120 2121 skb_checksum_none_assert(skb); 2122 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2123 if (dev->features & NETIF_F_RXCSUM) { 2124 skb->ip_summed = CHECKSUM_UNNECESSARY; 2125 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2126 } 2127 } else { 2128 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2129 if (dev->features & NETIF_F_RXCSUM) 2130 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; 2131 } 2132 } 2133 2134 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == 2135 RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { 2136 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2137 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 2138 u64 ns, ts; 2139 2140 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2141 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2142 2143 spin_lock_bh(&ptp->ptp_lock); 2144 ns = timecounter_cyc2time(&ptp->tc, ts); 2145 spin_unlock_bh(&ptp->ptp_lock); 2146 memset(skb_hwtstamps(skb), 0, 2147 sizeof(*skb_hwtstamps(skb))); 2148 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2149 } 2150 } 2151 } 2152 bnxt_deliver_skb(bp, bnapi, skb); 2153 rc = 1; 2154 2155 next_rx: 2156 cpr->rx_packets += 1; 2157 cpr->rx_bytes += len; 2158 2159 next_rx_no_len: 2160 rxr->rx_prod = NEXT_RX(prod); 2161 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2162 2163 next_rx_no_prod_no_len: 2164 *raw_cons = tmp_raw_cons; 2165 2166 return rc; 2167 } 2168 2169 /* In netpoll mode, if we are using a combined completion ring, we need to 2170 * discard the rx packets and recycle the buffers. 2171 */ 2172 static int bnxt_force_rx_discard(struct bnxt *bp, 2173 struct bnxt_cp_ring_info *cpr, 2174 u32 *raw_cons, u8 *event) 2175 { 2176 u32 tmp_raw_cons = *raw_cons; 2177 struct rx_cmp_ext *rxcmp1; 2178 struct rx_cmp *rxcmp; 2179 u16 cp_cons; 2180 u8 cmp_type; 2181 int rc; 2182 2183 cp_cons = RING_CMP(tmp_raw_cons); 2184 rxcmp = (struct rx_cmp *) 2185 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2186 2187 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2188 cp_cons = RING_CMP(tmp_raw_cons); 2189 rxcmp1 = (struct rx_cmp_ext *) 2190 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2191 2192 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2193 return -EBUSY; 2194 2195 /* The valid test of the entry must be done first before 2196 * reading any further. 2197 */ 2198 dma_rmb(); 2199 cmp_type = RX_CMP_TYPE(rxcmp); 2200 if (cmp_type == CMP_TYPE_RX_L2_CMP || 2201 cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2202 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2203 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2204 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2205 struct rx_tpa_end_cmp_ext *tpa_end1; 2206 2207 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2208 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2209 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2210 } 2211 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2212 if (rc && rc != -EBUSY) 2213 cpr->sw_stats.rx.rx_netpoll_discards += 1; 2214 return rc; 2215 } 2216 2217 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2218 { 2219 struct bnxt_fw_health *fw_health = bp->fw_health; 2220 u32 reg = fw_health->regs[reg_idx]; 2221 u32 reg_type, reg_off, val = 0; 2222 2223 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2224 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2225 switch (reg_type) { 2226 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2227 pci_read_config_dword(bp->pdev, reg_off, &val); 2228 break; 2229 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2230 reg_off = fw_health->mapped_regs[reg_idx]; 2231 fallthrough; 2232 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2233 val = readl(bp->bar0 + reg_off); 2234 break; 2235 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2236 val = readl(bp->bar1 + reg_off); 2237 break; 2238 } 2239 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2240 val &= fw_health->fw_reset_inprog_reg_mask; 2241 return val; 2242 } 2243 2244 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2245 { 2246 int i; 2247 2248 for (i = 0; i < bp->rx_nr_rings; i++) { 2249 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2250 struct bnxt_ring_grp_info *grp_info; 2251 2252 grp_info = &bp->grp_info[grp_idx]; 2253 if (grp_info->agg_fw_ring_id == ring_id) 2254 return grp_idx; 2255 } 2256 return INVALID_HW_RING_ID; 2257 } 2258 2259 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2260 { 2261 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2262 2263 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 2264 return link_info->force_link_speed2; 2265 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2266 return link_info->force_pam4_link_speed; 2267 return link_info->force_link_speed; 2268 } 2269 2270 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2271 { 2272 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2273 2274 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2275 link_info->req_link_speed = link_info->force_link_speed2; 2276 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2277 switch (link_info->req_link_speed) { 2278 case BNXT_LINK_SPEED_50GB_PAM4: 2279 case BNXT_LINK_SPEED_100GB_PAM4: 2280 case BNXT_LINK_SPEED_200GB_PAM4: 2281 case BNXT_LINK_SPEED_400GB_PAM4: 2282 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2283 break; 2284 case BNXT_LINK_SPEED_100GB_PAM4_112: 2285 case BNXT_LINK_SPEED_200GB_PAM4_112: 2286 case BNXT_LINK_SPEED_400GB_PAM4_112: 2287 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; 2288 break; 2289 default: 2290 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2291 } 2292 return; 2293 } 2294 link_info->req_link_speed = link_info->force_link_speed; 2295 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2296 if (link_info->force_pam4_link_speed) { 2297 link_info->req_link_speed = link_info->force_pam4_link_speed; 2298 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2299 } 2300 } 2301 2302 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2303 { 2304 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2305 2306 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2307 link_info->advertising = link_info->auto_link_speeds2; 2308 return; 2309 } 2310 link_info->advertising = link_info->auto_link_speeds; 2311 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2312 } 2313 2314 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2315 { 2316 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2317 2318 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2319 if (link_info->req_link_speed != link_info->force_link_speed2) 2320 return true; 2321 return false; 2322 } 2323 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2324 link_info->req_link_speed != link_info->force_link_speed) 2325 return true; 2326 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2327 link_info->req_link_speed != link_info->force_pam4_link_speed) 2328 return true; 2329 return false; 2330 } 2331 2332 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2333 { 2334 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2335 2336 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2337 if (link_info->advertising != link_info->auto_link_speeds2) 2338 return true; 2339 return false; 2340 } 2341 if (link_info->advertising != link_info->auto_link_speeds || 2342 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2343 return true; 2344 return false; 2345 } 2346 2347 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2348 ((data2) & \ 2349 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2350 2351 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2352 (((data2) & \ 2353 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2354 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2355 2356 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2357 ((data1) & \ 2358 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2359 2360 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2361 (((data1) & \ 2362 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2363 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2364 2365 /* Return true if the workqueue has to be scheduled */ 2366 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2367 { 2368 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2369 2370 switch (err_type) { 2371 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2372 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2373 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2374 break; 2375 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2376 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2377 break; 2378 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2379 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2380 break; 2381 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2382 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2383 char *threshold_type; 2384 bool notify = false; 2385 char *dir_str; 2386 2387 switch (type) { 2388 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2389 threshold_type = "warning"; 2390 break; 2391 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2392 threshold_type = "critical"; 2393 break; 2394 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2395 threshold_type = "fatal"; 2396 break; 2397 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2398 threshold_type = "shutdown"; 2399 break; 2400 default: 2401 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2402 return false; 2403 } 2404 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2405 dir_str = "above"; 2406 notify = true; 2407 } else { 2408 dir_str = "below"; 2409 } 2410 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2411 dir_str, threshold_type); 2412 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2413 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2414 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2415 if (notify) { 2416 bp->thermal_threshold_type = type; 2417 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2418 return true; 2419 } 2420 return false; 2421 } 2422 default: 2423 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2424 err_type); 2425 break; 2426 } 2427 return false; 2428 } 2429 2430 #define BNXT_GET_EVENT_PORT(data) \ 2431 ((data) & \ 2432 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2433 2434 #define BNXT_EVENT_RING_TYPE(data2) \ 2435 ((data2) & \ 2436 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2437 2438 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2439 (BNXT_EVENT_RING_TYPE(data2) == \ 2440 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2441 2442 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2443 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2444 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2445 2446 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2447 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2448 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2449 2450 #define BNXT_PHC_BITS 48 2451 2452 static int bnxt_async_event_process(struct bnxt *bp, 2453 struct hwrm_async_event_cmpl *cmpl) 2454 { 2455 u16 event_id = le16_to_cpu(cmpl->event_id); 2456 u32 data1 = le32_to_cpu(cmpl->event_data1); 2457 u32 data2 = le32_to_cpu(cmpl->event_data2); 2458 2459 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2460 event_id, data1, data2); 2461 2462 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2463 switch (event_id) { 2464 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2465 struct bnxt_link_info *link_info = &bp->link_info; 2466 2467 if (BNXT_VF(bp)) 2468 goto async_event_process_exit; 2469 2470 /* print unsupported speed warning in forced speed mode only */ 2471 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2472 (data1 & 0x20000)) { 2473 u16 fw_speed = bnxt_get_force_speed(link_info); 2474 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2475 2476 if (speed != SPEED_UNKNOWN) 2477 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2478 speed); 2479 } 2480 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2481 } 2482 fallthrough; 2483 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2484 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2485 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2486 fallthrough; 2487 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2488 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2489 break; 2490 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2491 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2492 break; 2493 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2494 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2495 2496 if (BNXT_VF(bp)) 2497 break; 2498 2499 if (bp->pf.port_id != port_id) 2500 break; 2501 2502 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2503 break; 2504 } 2505 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2506 if (BNXT_PF(bp)) 2507 goto async_event_process_exit; 2508 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2509 break; 2510 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2511 char *type_str = "Solicited"; 2512 2513 if (!bp->fw_health) 2514 goto async_event_process_exit; 2515 2516 bp->fw_reset_timestamp = jiffies; 2517 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2518 if (!bp->fw_reset_min_dsecs) 2519 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2520 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2521 if (!bp->fw_reset_max_dsecs) 2522 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2523 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2524 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2525 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2526 type_str = "Fatal"; 2527 bp->fw_health->fatalities++; 2528 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2529 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2530 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2531 type_str = "Non-fatal"; 2532 bp->fw_health->survivals++; 2533 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2534 } 2535 netif_warn(bp, hw, bp->dev, 2536 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2537 type_str, data1, data2, 2538 bp->fw_reset_min_dsecs * 100, 2539 bp->fw_reset_max_dsecs * 100); 2540 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2541 break; 2542 } 2543 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2544 struct bnxt_fw_health *fw_health = bp->fw_health; 2545 char *status_desc = "healthy"; 2546 u32 status; 2547 2548 if (!fw_health) 2549 goto async_event_process_exit; 2550 2551 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2552 fw_health->enabled = false; 2553 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2554 break; 2555 } 2556 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2557 fw_health->tmr_multiplier = 2558 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2559 bp->current_interval * 10); 2560 fw_health->tmr_counter = fw_health->tmr_multiplier; 2561 if (!fw_health->enabled) 2562 fw_health->last_fw_heartbeat = 2563 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2564 fw_health->last_fw_reset_cnt = 2565 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2566 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2567 if (status != BNXT_FW_STATUS_HEALTHY) 2568 status_desc = "unhealthy"; 2569 netif_info(bp, drv, bp->dev, 2570 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2571 fw_health->primary ? "primary" : "backup", status, 2572 status_desc, fw_health->last_fw_reset_cnt); 2573 if (!fw_health->enabled) { 2574 /* Make sure tmr_counter is set and visible to 2575 * bnxt_health_check() before setting enabled to true. 2576 */ 2577 smp_wmb(); 2578 fw_health->enabled = true; 2579 } 2580 goto async_event_process_exit; 2581 } 2582 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2583 netif_notice(bp, hw, bp->dev, 2584 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2585 data1, data2); 2586 goto async_event_process_exit; 2587 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2588 struct bnxt_rx_ring_info *rxr; 2589 u16 grp_idx; 2590 2591 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2592 goto async_event_process_exit; 2593 2594 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2595 BNXT_EVENT_RING_TYPE(data2), data1); 2596 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2597 goto async_event_process_exit; 2598 2599 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2600 if (grp_idx == INVALID_HW_RING_ID) { 2601 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2602 data1); 2603 goto async_event_process_exit; 2604 } 2605 rxr = bp->bnapi[grp_idx]->rx_ring; 2606 bnxt_sched_reset_rxr(bp, rxr); 2607 goto async_event_process_exit; 2608 } 2609 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2610 struct bnxt_fw_health *fw_health = bp->fw_health; 2611 2612 netif_notice(bp, hw, bp->dev, 2613 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2614 data1, data2); 2615 if (fw_health) { 2616 fw_health->echo_req_data1 = data1; 2617 fw_health->echo_req_data2 = data2; 2618 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2619 break; 2620 } 2621 goto async_event_process_exit; 2622 } 2623 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2624 bnxt_ptp_pps_event(bp, data1, data2); 2625 goto async_event_process_exit; 2626 } 2627 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2628 if (bnxt_event_error_report(bp, data1, data2)) 2629 break; 2630 goto async_event_process_exit; 2631 } 2632 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2633 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2634 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2635 if (BNXT_PTP_USE_RTC(bp)) { 2636 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2637 u64 ns; 2638 2639 if (!ptp) 2640 goto async_event_process_exit; 2641 2642 spin_lock_bh(&ptp->ptp_lock); 2643 bnxt_ptp_update_current_time(bp); 2644 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2645 BNXT_PHC_BITS) | ptp->current_time); 2646 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2647 spin_unlock_bh(&ptp->ptp_lock); 2648 } 2649 break; 2650 } 2651 goto async_event_process_exit; 2652 } 2653 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2654 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2655 2656 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2657 goto async_event_process_exit; 2658 } 2659 default: 2660 goto async_event_process_exit; 2661 } 2662 __bnxt_queue_sp_work(bp); 2663 async_event_process_exit: 2664 return 0; 2665 } 2666 2667 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2668 { 2669 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2670 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2671 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2672 (struct hwrm_fwd_req_cmpl *)txcmp; 2673 2674 switch (cmpl_type) { 2675 case CMPL_BASE_TYPE_HWRM_DONE: 2676 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2677 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2678 break; 2679 2680 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2681 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2682 2683 if ((vf_id < bp->pf.first_vf_id) || 2684 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2685 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2686 vf_id); 2687 return -EINVAL; 2688 } 2689 2690 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2691 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2692 break; 2693 2694 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2695 bnxt_async_event_process(bp, 2696 (struct hwrm_async_event_cmpl *)txcmp); 2697 break; 2698 2699 default: 2700 break; 2701 } 2702 2703 return 0; 2704 } 2705 2706 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2707 { 2708 struct bnxt_napi *bnapi = dev_instance; 2709 struct bnxt *bp = bnapi->bp; 2710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2711 u32 cons = RING_CMP(cpr->cp_raw_cons); 2712 2713 cpr->event_ctr++; 2714 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2715 napi_schedule(&bnapi->napi); 2716 return IRQ_HANDLED; 2717 } 2718 2719 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2720 { 2721 u32 raw_cons = cpr->cp_raw_cons; 2722 u16 cons = RING_CMP(raw_cons); 2723 struct tx_cmp *txcmp; 2724 2725 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2726 2727 return TX_CMP_VALID(txcmp, raw_cons); 2728 } 2729 2730 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2731 { 2732 struct bnxt_napi *bnapi = dev_instance; 2733 struct bnxt *bp = bnapi->bp; 2734 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2735 u32 cons = RING_CMP(cpr->cp_raw_cons); 2736 u32 int_status; 2737 2738 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2739 2740 if (!bnxt_has_work(bp, cpr)) { 2741 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2742 /* return if erroneous interrupt */ 2743 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2744 return IRQ_NONE; 2745 } 2746 2747 /* disable ring IRQ */ 2748 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2749 2750 /* Return here if interrupt is shared and is disabled. */ 2751 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2752 return IRQ_HANDLED; 2753 2754 napi_schedule(&bnapi->napi); 2755 return IRQ_HANDLED; 2756 } 2757 2758 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2759 int budget) 2760 { 2761 struct bnxt_napi *bnapi = cpr->bnapi; 2762 u32 raw_cons = cpr->cp_raw_cons; 2763 u32 cons; 2764 int rx_pkts = 0; 2765 u8 event = 0; 2766 struct tx_cmp *txcmp; 2767 2768 cpr->has_more_work = 0; 2769 cpr->had_work_done = 1; 2770 while (1) { 2771 u8 cmp_type; 2772 int rc; 2773 2774 cons = RING_CMP(raw_cons); 2775 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2776 2777 if (!TX_CMP_VALID(txcmp, raw_cons)) 2778 break; 2779 2780 /* The valid test of the entry must be done first before 2781 * reading any further. 2782 */ 2783 dma_rmb(); 2784 cmp_type = TX_CMP_TYPE(txcmp); 2785 if (cmp_type == CMP_TYPE_TX_L2_CMP) { 2786 u32 opaque = txcmp->tx_cmp_opaque; 2787 struct bnxt_tx_ring_info *txr; 2788 u16 tx_freed; 2789 2790 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 2791 event |= BNXT_TX_CMP_EVENT; 2792 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 2793 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 2794 bp->tx_ring_mask; 2795 /* return full budget so NAPI will complete. */ 2796 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 2797 rx_pkts = budget; 2798 raw_cons = NEXT_RAW_CMP(raw_cons); 2799 if (budget) 2800 cpr->has_more_work = 1; 2801 break; 2802 } 2803 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 2804 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2805 if (likely(budget)) 2806 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2807 else 2808 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2809 &event); 2810 if (likely(rc >= 0)) 2811 rx_pkts += rc; 2812 /* Increment rx_pkts when rc is -ENOMEM to count towards 2813 * the NAPI budget. Otherwise, we may potentially loop 2814 * here forever if we consistently cannot allocate 2815 * buffers. 2816 */ 2817 else if (rc == -ENOMEM && budget) 2818 rx_pkts++; 2819 else if (rc == -EBUSY) /* partial completion */ 2820 break; 2821 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 2822 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 2823 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { 2824 bnxt_hwrm_handler(bp, txcmp); 2825 } 2826 raw_cons = NEXT_RAW_CMP(raw_cons); 2827 2828 if (rx_pkts && rx_pkts == budget) { 2829 cpr->has_more_work = 1; 2830 break; 2831 } 2832 } 2833 2834 if (event & BNXT_REDIRECT_EVENT) 2835 xdp_do_flush(); 2836 2837 if (event & BNXT_TX_EVENT) { 2838 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 2839 u16 prod = txr->tx_prod; 2840 2841 /* Sync BD data before updating doorbell */ 2842 wmb(); 2843 2844 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2845 } 2846 2847 cpr->cp_raw_cons = raw_cons; 2848 bnapi->events |= event; 2849 return rx_pkts; 2850 } 2851 2852 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2853 int budget) 2854 { 2855 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 2856 bnapi->tx_int(bp, bnapi, budget); 2857 2858 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 2859 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2860 2861 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2862 } 2863 if (bnapi->events & BNXT_AGG_EVENT) { 2864 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2865 2866 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2867 } 2868 bnapi->events &= BNXT_TX_CMP_EVENT; 2869 } 2870 2871 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2872 int budget) 2873 { 2874 struct bnxt_napi *bnapi = cpr->bnapi; 2875 int rx_pkts; 2876 2877 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2878 2879 /* ACK completion ring before freeing tx ring and producing new 2880 * buffers in rx/agg rings to prevent overflowing the completion 2881 * ring. 2882 */ 2883 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2884 2885 __bnxt_poll_work_done(bp, bnapi, budget); 2886 return rx_pkts; 2887 } 2888 2889 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2890 { 2891 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2892 struct bnxt *bp = bnapi->bp; 2893 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2894 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2895 struct tx_cmp *txcmp; 2896 struct rx_cmp_ext *rxcmp1; 2897 u32 cp_cons, tmp_raw_cons; 2898 u32 raw_cons = cpr->cp_raw_cons; 2899 bool flush_xdp = false; 2900 u32 rx_pkts = 0; 2901 u8 event = 0; 2902 2903 while (1) { 2904 int rc; 2905 2906 cp_cons = RING_CMP(raw_cons); 2907 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2908 2909 if (!TX_CMP_VALID(txcmp, raw_cons)) 2910 break; 2911 2912 /* The valid test of the entry must be done first before 2913 * reading any further. 2914 */ 2915 dma_rmb(); 2916 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2917 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2918 cp_cons = RING_CMP(tmp_raw_cons); 2919 rxcmp1 = (struct rx_cmp_ext *) 2920 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2921 2922 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2923 break; 2924 2925 /* force an error to recycle the buffer */ 2926 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2927 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2928 2929 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2930 if (likely(rc == -EIO) && budget) 2931 rx_pkts++; 2932 else if (rc == -EBUSY) /* partial completion */ 2933 break; 2934 if (event & BNXT_REDIRECT_EVENT) 2935 flush_xdp = true; 2936 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2937 CMPL_BASE_TYPE_HWRM_DONE)) { 2938 bnxt_hwrm_handler(bp, txcmp); 2939 } else { 2940 netdev_err(bp->dev, 2941 "Invalid completion received on special ring\n"); 2942 } 2943 raw_cons = NEXT_RAW_CMP(raw_cons); 2944 2945 if (rx_pkts == budget) 2946 break; 2947 } 2948 2949 cpr->cp_raw_cons = raw_cons; 2950 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2951 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2952 2953 if (event & BNXT_AGG_EVENT) 2954 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2955 if (flush_xdp) 2956 xdp_do_flush(); 2957 2958 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2959 napi_complete_done(napi, rx_pkts); 2960 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2961 } 2962 return rx_pkts; 2963 } 2964 2965 static int bnxt_poll(struct napi_struct *napi, int budget) 2966 { 2967 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2968 struct bnxt *bp = bnapi->bp; 2969 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2970 int work_done = 0; 2971 2972 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 2973 napi_complete(napi); 2974 return 0; 2975 } 2976 while (1) { 2977 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2978 2979 if (work_done >= budget) { 2980 if (!budget) 2981 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2982 break; 2983 } 2984 2985 if (!bnxt_has_work(bp, cpr)) { 2986 if (napi_complete_done(napi, work_done)) 2987 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2988 break; 2989 } 2990 } 2991 if (bp->flags & BNXT_FLAG_DIM) { 2992 struct dim_sample dim_sample = {}; 2993 2994 dim_update_sample(cpr->event_ctr, 2995 cpr->rx_packets, 2996 cpr->rx_bytes, 2997 &dim_sample); 2998 net_dim(&cpr->dim, dim_sample); 2999 } 3000 return work_done; 3001 } 3002 3003 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 3004 { 3005 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3006 int i, work_done = 0; 3007 3008 for (i = 0; i < cpr->cp_ring_count; i++) { 3009 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3010 3011 if (cpr2->had_nqe_notify) { 3012 work_done += __bnxt_poll_work(bp, cpr2, 3013 budget - work_done); 3014 cpr->has_more_work |= cpr2->has_more_work; 3015 } 3016 } 3017 return work_done; 3018 } 3019 3020 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3021 u64 dbr_type, int budget) 3022 { 3023 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3024 int i; 3025 3026 for (i = 0; i < cpr->cp_ring_count; i++) { 3027 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3028 struct bnxt_db_info *db; 3029 3030 if (cpr2->had_work_done) { 3031 u32 tgl = 0; 3032 3033 if (dbr_type == DBR_TYPE_CQ_ARMALL) { 3034 cpr2->had_nqe_notify = 0; 3035 tgl = cpr2->toggle; 3036 } 3037 db = &cpr2->cp_db; 3038 bnxt_writeq(bp, 3039 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 3040 DB_RING_IDX(db, cpr2->cp_raw_cons), 3041 db->doorbell); 3042 cpr2->had_work_done = 0; 3043 } 3044 } 3045 __bnxt_poll_work_done(bp, bnapi, budget); 3046 } 3047 3048 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 3049 { 3050 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3051 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3052 struct bnxt_cp_ring_info *cpr_rx; 3053 u32 raw_cons = cpr->cp_raw_cons; 3054 struct bnxt *bp = bnapi->bp; 3055 struct nqe_cn *nqcmp; 3056 int work_done = 0; 3057 u32 cons; 3058 3059 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3060 napi_complete(napi); 3061 return 0; 3062 } 3063 if (cpr->has_more_work) { 3064 cpr->has_more_work = 0; 3065 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 3066 } 3067 while (1) { 3068 u16 type; 3069 3070 cons = RING_CMP(raw_cons); 3071 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3072 3073 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 3074 if (cpr->has_more_work) 3075 break; 3076 3077 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 3078 budget); 3079 cpr->cp_raw_cons = raw_cons; 3080 if (napi_complete_done(napi, work_done)) 3081 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 3082 cpr->cp_raw_cons); 3083 goto poll_done; 3084 } 3085 3086 /* The valid test of the entry must be done first before 3087 * reading any further. 3088 */ 3089 dma_rmb(); 3090 3091 type = le16_to_cpu(nqcmp->type); 3092 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 3093 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 3094 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 3095 struct bnxt_cp_ring_info *cpr2; 3096 3097 /* No more budget for RX work */ 3098 if (budget && work_done >= budget && 3099 cq_type == BNXT_NQ_HDL_TYPE_RX) 3100 break; 3101 3102 idx = BNXT_NQ_HDL_IDX(idx); 3103 cpr2 = &cpr->cp_ring_arr[idx]; 3104 cpr2->had_nqe_notify = 1; 3105 cpr2->toggle = NQE_CN_TOGGLE(type); 3106 work_done += __bnxt_poll_work(bp, cpr2, 3107 budget - work_done); 3108 cpr->has_more_work |= cpr2->has_more_work; 3109 } else { 3110 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 3111 } 3112 raw_cons = NEXT_RAW_CMP(raw_cons); 3113 } 3114 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 3115 if (raw_cons != cpr->cp_raw_cons) { 3116 cpr->cp_raw_cons = raw_cons; 3117 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 3118 } 3119 poll_done: 3120 cpr_rx = &cpr->cp_ring_arr[0]; 3121 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 3122 (bp->flags & BNXT_FLAG_DIM)) { 3123 struct dim_sample dim_sample = {}; 3124 3125 dim_update_sample(cpr->event_ctr, 3126 cpr_rx->rx_packets, 3127 cpr_rx->rx_bytes, 3128 &dim_sample); 3129 net_dim(&cpr->dim, dim_sample); 3130 } 3131 return work_done; 3132 } 3133 3134 static void bnxt_free_tx_skbs(struct bnxt *bp) 3135 { 3136 int i, max_idx; 3137 struct pci_dev *pdev = bp->pdev; 3138 3139 if (!bp->tx_ring) 3140 return; 3141 3142 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 3143 for (i = 0; i < bp->tx_nr_rings; i++) { 3144 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3145 int j; 3146 3147 if (!txr->tx_buf_ring) 3148 continue; 3149 3150 for (j = 0; j < max_idx;) { 3151 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 3152 struct sk_buff *skb; 3153 int k, last; 3154 3155 if (i < bp->tx_nr_rings_xdp && 3156 tx_buf->action == XDP_REDIRECT) { 3157 dma_unmap_single(&pdev->dev, 3158 dma_unmap_addr(tx_buf, mapping), 3159 dma_unmap_len(tx_buf, len), 3160 DMA_TO_DEVICE); 3161 xdp_return_frame(tx_buf->xdpf); 3162 tx_buf->action = 0; 3163 tx_buf->xdpf = NULL; 3164 j++; 3165 continue; 3166 } 3167 3168 skb = tx_buf->skb; 3169 if (!skb) { 3170 j++; 3171 continue; 3172 } 3173 3174 tx_buf->skb = NULL; 3175 3176 if (tx_buf->is_push) { 3177 dev_kfree_skb(skb); 3178 j += 2; 3179 continue; 3180 } 3181 3182 dma_unmap_single(&pdev->dev, 3183 dma_unmap_addr(tx_buf, mapping), 3184 skb_headlen(skb), 3185 DMA_TO_DEVICE); 3186 3187 last = tx_buf->nr_frags; 3188 j += 2; 3189 for (k = 0; k < last; k++, j++) { 3190 int ring_idx = j & bp->tx_ring_mask; 3191 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 3192 3193 tx_buf = &txr->tx_buf_ring[ring_idx]; 3194 dma_unmap_page( 3195 &pdev->dev, 3196 dma_unmap_addr(tx_buf, mapping), 3197 skb_frag_size(frag), DMA_TO_DEVICE); 3198 } 3199 dev_kfree_skb(skb); 3200 } 3201 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 3202 } 3203 } 3204 3205 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) 3206 { 3207 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3208 struct pci_dev *pdev = bp->pdev; 3209 struct bnxt_tpa_idx_map *map; 3210 int i, max_idx, max_agg_idx; 3211 3212 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3213 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3214 if (!rxr->rx_tpa) 3215 goto skip_rx_tpa_free; 3216 3217 for (i = 0; i < bp->max_tpa; i++) { 3218 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3219 u8 *data = tpa_info->data; 3220 3221 if (!data) 3222 continue; 3223 3224 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, 3225 bp->rx_buf_use_size, bp->rx_dir, 3226 DMA_ATTR_WEAK_ORDERING); 3227 3228 tpa_info->data = NULL; 3229 3230 skb_free_frag(data); 3231 } 3232 3233 skip_rx_tpa_free: 3234 if (!rxr->rx_buf_ring) 3235 goto skip_rx_buf_free; 3236 3237 for (i = 0; i < max_idx; i++) { 3238 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3239 dma_addr_t mapping = rx_buf->mapping; 3240 void *data = rx_buf->data; 3241 3242 if (!data) 3243 continue; 3244 3245 rx_buf->data = NULL; 3246 if (BNXT_RX_PAGE_MODE(bp)) { 3247 page_pool_recycle_direct(rxr->page_pool, data); 3248 } else { 3249 dma_unmap_single_attrs(&pdev->dev, mapping, 3250 bp->rx_buf_use_size, bp->rx_dir, 3251 DMA_ATTR_WEAK_ORDERING); 3252 skb_free_frag(data); 3253 } 3254 } 3255 3256 skip_rx_buf_free: 3257 if (!rxr->rx_agg_ring) 3258 goto skip_rx_agg_free; 3259 3260 for (i = 0; i < max_agg_idx; i++) { 3261 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3262 struct page *page = rx_agg_buf->page; 3263 3264 if (!page) 3265 continue; 3266 3267 rx_agg_buf->page = NULL; 3268 __clear_bit(i, rxr->rx_agg_bmap); 3269 3270 page_pool_recycle_direct(rxr->page_pool, page); 3271 } 3272 3273 skip_rx_agg_free: 3274 map = rxr->rx_tpa_idx_map; 3275 if (map) 3276 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3277 } 3278 3279 static void bnxt_free_rx_skbs(struct bnxt *bp) 3280 { 3281 int i; 3282 3283 if (!bp->rx_ring) 3284 return; 3285 3286 for (i = 0; i < bp->rx_nr_rings; i++) 3287 bnxt_free_one_rx_ring_skbs(bp, i); 3288 } 3289 3290 static void bnxt_free_skbs(struct bnxt *bp) 3291 { 3292 bnxt_free_tx_skbs(bp); 3293 bnxt_free_rx_skbs(bp); 3294 } 3295 3296 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3297 { 3298 u8 init_val = ctxm->init_value; 3299 u16 offset = ctxm->init_offset; 3300 u8 *p2 = p; 3301 int i; 3302 3303 if (!init_val) 3304 return; 3305 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3306 memset(p, init_val, len); 3307 return; 3308 } 3309 for (i = 0; i < len; i += ctxm->entry_size) 3310 *(p2 + i + offset) = init_val; 3311 } 3312 3313 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3314 { 3315 struct pci_dev *pdev = bp->pdev; 3316 int i; 3317 3318 if (!rmem->pg_arr) 3319 goto skip_pages; 3320 3321 for (i = 0; i < rmem->nr_pages; i++) { 3322 if (!rmem->pg_arr[i]) 3323 continue; 3324 3325 dma_free_coherent(&pdev->dev, rmem->page_size, 3326 rmem->pg_arr[i], rmem->dma_arr[i]); 3327 3328 rmem->pg_arr[i] = NULL; 3329 } 3330 skip_pages: 3331 if (rmem->pg_tbl) { 3332 size_t pg_tbl_size = rmem->nr_pages * 8; 3333 3334 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3335 pg_tbl_size = rmem->page_size; 3336 dma_free_coherent(&pdev->dev, pg_tbl_size, 3337 rmem->pg_tbl, rmem->pg_tbl_map); 3338 rmem->pg_tbl = NULL; 3339 } 3340 if (rmem->vmem_size && *rmem->vmem) { 3341 vfree(*rmem->vmem); 3342 *rmem->vmem = NULL; 3343 } 3344 } 3345 3346 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3347 { 3348 struct pci_dev *pdev = bp->pdev; 3349 u64 valid_bit = 0; 3350 int i; 3351 3352 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3353 valid_bit = PTU_PTE_VALID; 3354 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3355 size_t pg_tbl_size = rmem->nr_pages * 8; 3356 3357 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3358 pg_tbl_size = rmem->page_size; 3359 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3360 &rmem->pg_tbl_map, 3361 GFP_KERNEL); 3362 if (!rmem->pg_tbl) 3363 return -ENOMEM; 3364 } 3365 3366 for (i = 0; i < rmem->nr_pages; i++) { 3367 u64 extra_bits = valid_bit; 3368 3369 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3370 rmem->page_size, 3371 &rmem->dma_arr[i], 3372 GFP_KERNEL); 3373 if (!rmem->pg_arr[i]) 3374 return -ENOMEM; 3375 3376 if (rmem->ctx_mem) 3377 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3378 rmem->page_size); 3379 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3380 if (i == rmem->nr_pages - 2 && 3381 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3382 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3383 else if (i == rmem->nr_pages - 1 && 3384 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3385 extra_bits |= PTU_PTE_LAST; 3386 rmem->pg_tbl[i] = 3387 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3388 } 3389 } 3390 3391 if (rmem->vmem_size) { 3392 *rmem->vmem = vzalloc(rmem->vmem_size); 3393 if (!(*rmem->vmem)) 3394 return -ENOMEM; 3395 } 3396 return 0; 3397 } 3398 3399 static void bnxt_free_tpa_info(struct bnxt *bp) 3400 { 3401 int i, j; 3402 3403 for (i = 0; i < bp->rx_nr_rings; i++) { 3404 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3405 3406 kfree(rxr->rx_tpa_idx_map); 3407 rxr->rx_tpa_idx_map = NULL; 3408 if (rxr->rx_tpa) { 3409 for (j = 0; j < bp->max_tpa; j++) { 3410 kfree(rxr->rx_tpa[j].agg_arr); 3411 rxr->rx_tpa[j].agg_arr = NULL; 3412 } 3413 } 3414 kfree(rxr->rx_tpa); 3415 rxr->rx_tpa = NULL; 3416 } 3417 } 3418 3419 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3420 { 3421 int i, j; 3422 3423 bp->max_tpa = MAX_TPA; 3424 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3425 if (!bp->max_tpa_v2) 3426 return 0; 3427 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3428 } 3429 3430 for (i = 0; i < bp->rx_nr_rings; i++) { 3431 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3432 struct rx_agg_cmp *agg; 3433 3434 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3435 GFP_KERNEL); 3436 if (!rxr->rx_tpa) 3437 return -ENOMEM; 3438 3439 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3440 continue; 3441 for (j = 0; j < bp->max_tpa; j++) { 3442 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3443 if (!agg) 3444 return -ENOMEM; 3445 rxr->rx_tpa[j].agg_arr = agg; 3446 } 3447 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3448 GFP_KERNEL); 3449 if (!rxr->rx_tpa_idx_map) 3450 return -ENOMEM; 3451 } 3452 return 0; 3453 } 3454 3455 static void bnxt_free_rx_rings(struct bnxt *bp) 3456 { 3457 int i; 3458 3459 if (!bp->rx_ring) 3460 return; 3461 3462 bnxt_free_tpa_info(bp); 3463 for (i = 0; i < bp->rx_nr_rings; i++) { 3464 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3465 struct bnxt_ring_struct *ring; 3466 3467 if (rxr->xdp_prog) 3468 bpf_prog_put(rxr->xdp_prog); 3469 3470 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3471 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3472 3473 page_pool_destroy(rxr->page_pool); 3474 rxr->page_pool = NULL; 3475 3476 kfree(rxr->rx_agg_bmap); 3477 rxr->rx_agg_bmap = NULL; 3478 3479 ring = &rxr->rx_ring_struct; 3480 bnxt_free_ring(bp, &ring->ring_mem); 3481 3482 ring = &rxr->rx_agg_ring_struct; 3483 bnxt_free_ring(bp, &ring->ring_mem); 3484 } 3485 } 3486 3487 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3488 struct bnxt_rx_ring_info *rxr) 3489 { 3490 struct page_pool_params pp = { 0 }; 3491 3492 pp.pool_size = bp->rx_agg_ring_size; 3493 if (BNXT_RX_PAGE_MODE(bp)) 3494 pp.pool_size += bp->rx_ring_size; 3495 pp.nid = dev_to_node(&bp->pdev->dev); 3496 pp.napi = &rxr->bnapi->napi; 3497 pp.netdev = bp->dev; 3498 pp.dev = &bp->pdev->dev; 3499 pp.dma_dir = bp->rx_dir; 3500 pp.max_len = PAGE_SIZE; 3501 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3502 3503 rxr->page_pool = page_pool_create(&pp); 3504 if (IS_ERR(rxr->page_pool)) { 3505 int err = PTR_ERR(rxr->page_pool); 3506 3507 rxr->page_pool = NULL; 3508 return err; 3509 } 3510 return 0; 3511 } 3512 3513 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3514 { 3515 int i, rc = 0, agg_rings = 0; 3516 3517 if (!bp->rx_ring) 3518 return -ENOMEM; 3519 3520 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3521 agg_rings = 1; 3522 3523 for (i = 0; i < bp->rx_nr_rings; i++) { 3524 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3525 struct bnxt_ring_struct *ring; 3526 3527 ring = &rxr->rx_ring_struct; 3528 3529 rc = bnxt_alloc_rx_page_pool(bp, rxr); 3530 if (rc) 3531 return rc; 3532 3533 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3534 if (rc < 0) 3535 return rc; 3536 3537 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3538 MEM_TYPE_PAGE_POOL, 3539 rxr->page_pool); 3540 if (rc) { 3541 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3542 return rc; 3543 } 3544 3545 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3546 if (rc) 3547 return rc; 3548 3549 ring->grp_idx = i; 3550 if (agg_rings) { 3551 u16 mem_size; 3552 3553 ring = &rxr->rx_agg_ring_struct; 3554 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3555 if (rc) 3556 return rc; 3557 3558 ring->grp_idx = i; 3559 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3560 mem_size = rxr->rx_agg_bmap_size / 8; 3561 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3562 if (!rxr->rx_agg_bmap) 3563 return -ENOMEM; 3564 } 3565 } 3566 if (bp->flags & BNXT_FLAG_TPA) 3567 rc = bnxt_alloc_tpa_info(bp); 3568 return rc; 3569 } 3570 3571 static void bnxt_free_tx_rings(struct bnxt *bp) 3572 { 3573 int i; 3574 struct pci_dev *pdev = bp->pdev; 3575 3576 if (!bp->tx_ring) 3577 return; 3578 3579 for (i = 0; i < bp->tx_nr_rings; i++) { 3580 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3581 struct bnxt_ring_struct *ring; 3582 3583 if (txr->tx_push) { 3584 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3585 txr->tx_push, txr->tx_push_mapping); 3586 txr->tx_push = NULL; 3587 } 3588 3589 ring = &txr->tx_ring_struct; 3590 3591 bnxt_free_ring(bp, &ring->ring_mem); 3592 } 3593 } 3594 3595 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3596 ((tc) * (bp)->tx_nr_rings_per_tc) 3597 3598 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3599 ((tx) % (bp)->tx_nr_rings_per_tc) 3600 3601 #define BNXT_RING_TO_TC(bp, tx) \ 3602 ((tx) / (bp)->tx_nr_rings_per_tc) 3603 3604 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3605 { 3606 int i, j, rc; 3607 struct pci_dev *pdev = bp->pdev; 3608 3609 bp->tx_push_size = 0; 3610 if (bp->tx_push_thresh) { 3611 int push_size; 3612 3613 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3614 bp->tx_push_thresh); 3615 3616 if (push_size > 256) { 3617 push_size = 0; 3618 bp->tx_push_thresh = 0; 3619 } 3620 3621 bp->tx_push_size = push_size; 3622 } 3623 3624 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3625 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3626 struct bnxt_ring_struct *ring; 3627 u8 qidx; 3628 3629 ring = &txr->tx_ring_struct; 3630 3631 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3632 if (rc) 3633 return rc; 3634 3635 ring->grp_idx = txr->bnapi->index; 3636 if (bp->tx_push_size) { 3637 dma_addr_t mapping; 3638 3639 /* One pre-allocated DMA buffer to backup 3640 * TX push operation 3641 */ 3642 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3643 bp->tx_push_size, 3644 &txr->tx_push_mapping, 3645 GFP_KERNEL); 3646 3647 if (!txr->tx_push) 3648 return -ENOMEM; 3649 3650 mapping = txr->tx_push_mapping + 3651 sizeof(struct tx_push_bd); 3652 txr->data_mapping = cpu_to_le64(mapping); 3653 } 3654 qidx = bp->tc_to_qidx[j]; 3655 ring->queue_id = bp->q_info[qidx].queue_id; 3656 spin_lock_init(&txr->xdp_tx_lock); 3657 if (i < bp->tx_nr_rings_xdp) 3658 continue; 3659 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 3660 j++; 3661 } 3662 return 0; 3663 } 3664 3665 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3666 { 3667 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3668 3669 kfree(cpr->cp_desc_ring); 3670 cpr->cp_desc_ring = NULL; 3671 ring->ring_mem.pg_arr = NULL; 3672 kfree(cpr->cp_desc_mapping); 3673 cpr->cp_desc_mapping = NULL; 3674 ring->ring_mem.dma_arr = NULL; 3675 } 3676 3677 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 3678 { 3679 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 3680 if (!cpr->cp_desc_ring) 3681 return -ENOMEM; 3682 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 3683 GFP_KERNEL); 3684 if (!cpr->cp_desc_mapping) 3685 return -ENOMEM; 3686 return 0; 3687 } 3688 3689 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 3690 { 3691 int i; 3692 3693 if (!bp->bnapi) 3694 return; 3695 for (i = 0; i < bp->cp_nr_rings; i++) { 3696 struct bnxt_napi *bnapi = bp->bnapi[i]; 3697 3698 if (!bnapi) 3699 continue; 3700 bnxt_free_cp_arrays(&bnapi->cp_ring); 3701 } 3702 } 3703 3704 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 3705 { 3706 int i, n = bp->cp_nr_pages; 3707 3708 for (i = 0; i < bp->cp_nr_rings; i++) { 3709 struct bnxt_napi *bnapi = bp->bnapi[i]; 3710 int rc; 3711 3712 if (!bnapi) 3713 continue; 3714 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 3715 if (rc) 3716 return rc; 3717 } 3718 return 0; 3719 } 3720 3721 static void bnxt_free_cp_rings(struct bnxt *bp) 3722 { 3723 int i; 3724 3725 if (!bp->bnapi) 3726 return; 3727 3728 for (i = 0; i < bp->cp_nr_rings; i++) { 3729 struct bnxt_napi *bnapi = bp->bnapi[i]; 3730 struct bnxt_cp_ring_info *cpr; 3731 struct bnxt_ring_struct *ring; 3732 int j; 3733 3734 if (!bnapi) 3735 continue; 3736 3737 cpr = &bnapi->cp_ring; 3738 ring = &cpr->cp_ring_struct; 3739 3740 bnxt_free_ring(bp, &ring->ring_mem); 3741 3742 if (!cpr->cp_ring_arr) 3743 continue; 3744 3745 for (j = 0; j < cpr->cp_ring_count; j++) { 3746 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 3747 3748 ring = &cpr2->cp_ring_struct; 3749 bnxt_free_ring(bp, &ring->ring_mem); 3750 bnxt_free_cp_arrays(cpr2); 3751 } 3752 kfree(cpr->cp_ring_arr); 3753 cpr->cp_ring_arr = NULL; 3754 cpr->cp_ring_count = 0; 3755 } 3756 } 3757 3758 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 3759 struct bnxt_cp_ring_info *cpr) 3760 { 3761 struct bnxt_ring_mem_info *rmem; 3762 struct bnxt_ring_struct *ring; 3763 int rc; 3764 3765 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 3766 if (rc) { 3767 bnxt_free_cp_arrays(cpr); 3768 return -ENOMEM; 3769 } 3770 ring = &cpr->cp_ring_struct; 3771 rmem = &ring->ring_mem; 3772 rmem->nr_pages = bp->cp_nr_pages; 3773 rmem->page_size = HW_CMPD_RING_SIZE; 3774 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3775 rmem->dma_arr = cpr->cp_desc_mapping; 3776 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3777 rc = bnxt_alloc_ring(bp, rmem); 3778 if (rc) { 3779 bnxt_free_ring(bp, rmem); 3780 bnxt_free_cp_arrays(cpr); 3781 } 3782 return rc; 3783 } 3784 3785 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3786 { 3787 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3788 int i, j, rc, ulp_base_vec, ulp_msix; 3789 int tcs = netdev_get_num_tc(bp->dev); 3790 3791 if (!tcs) 3792 tcs = 1; 3793 ulp_msix = bnxt_get_ulp_msix_num(bp); 3794 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3795 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 3796 struct bnxt_napi *bnapi = bp->bnapi[i]; 3797 struct bnxt_cp_ring_info *cpr, *cpr2; 3798 struct bnxt_ring_struct *ring; 3799 int cp_count = 0, k; 3800 int rx = 0, tx = 0; 3801 3802 if (!bnapi) 3803 continue; 3804 3805 cpr = &bnapi->cp_ring; 3806 cpr->bnapi = bnapi; 3807 ring = &cpr->cp_ring_struct; 3808 3809 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3810 if (rc) 3811 return rc; 3812 3813 if (ulp_msix && i >= ulp_base_vec) 3814 ring->map_idx = i + ulp_msix; 3815 else 3816 ring->map_idx = i; 3817 3818 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3819 continue; 3820 3821 if (i < bp->rx_nr_rings) { 3822 cp_count++; 3823 rx = 1; 3824 } 3825 if (i < bp->tx_nr_rings_xdp) { 3826 cp_count++; 3827 tx = 1; 3828 } else if ((sh && i < bp->tx_nr_rings) || 3829 (!sh && i >= bp->rx_nr_rings)) { 3830 cp_count += tcs; 3831 tx = 1; 3832 } 3833 3834 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 3835 GFP_KERNEL); 3836 if (!cpr->cp_ring_arr) 3837 return -ENOMEM; 3838 cpr->cp_ring_count = cp_count; 3839 3840 for (k = 0; k < cp_count; k++) { 3841 cpr2 = &cpr->cp_ring_arr[k]; 3842 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 3843 if (rc) 3844 return rc; 3845 cpr2->bnapi = bnapi; 3846 cpr2->cp_idx = k; 3847 if (!k && rx) { 3848 bp->rx_ring[i].rx_cpr = cpr2; 3849 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 3850 } else { 3851 int n, tc = k - rx; 3852 3853 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 3854 bp->tx_ring[n].tx_cpr = cpr2; 3855 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 3856 } 3857 } 3858 if (tx) 3859 j++; 3860 } 3861 return 0; 3862 } 3863 3864 static void bnxt_init_ring_struct(struct bnxt *bp) 3865 { 3866 int i, j; 3867 3868 for (i = 0; i < bp->cp_nr_rings; i++) { 3869 struct bnxt_napi *bnapi = bp->bnapi[i]; 3870 struct bnxt_ring_mem_info *rmem; 3871 struct bnxt_cp_ring_info *cpr; 3872 struct bnxt_rx_ring_info *rxr; 3873 struct bnxt_tx_ring_info *txr; 3874 struct bnxt_ring_struct *ring; 3875 3876 if (!bnapi) 3877 continue; 3878 3879 cpr = &bnapi->cp_ring; 3880 ring = &cpr->cp_ring_struct; 3881 rmem = &ring->ring_mem; 3882 rmem->nr_pages = bp->cp_nr_pages; 3883 rmem->page_size = HW_CMPD_RING_SIZE; 3884 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3885 rmem->dma_arr = cpr->cp_desc_mapping; 3886 rmem->vmem_size = 0; 3887 3888 rxr = bnapi->rx_ring; 3889 if (!rxr) 3890 goto skip_rx; 3891 3892 ring = &rxr->rx_ring_struct; 3893 rmem = &ring->ring_mem; 3894 rmem->nr_pages = bp->rx_nr_pages; 3895 rmem->page_size = HW_RXBD_RING_SIZE; 3896 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3897 rmem->dma_arr = rxr->rx_desc_mapping; 3898 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3899 rmem->vmem = (void **)&rxr->rx_buf_ring; 3900 3901 ring = &rxr->rx_agg_ring_struct; 3902 rmem = &ring->ring_mem; 3903 rmem->nr_pages = bp->rx_agg_nr_pages; 3904 rmem->page_size = HW_RXBD_RING_SIZE; 3905 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3906 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3907 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3908 rmem->vmem = (void **)&rxr->rx_agg_ring; 3909 3910 skip_rx: 3911 bnxt_for_each_napi_tx(j, bnapi, txr) { 3912 ring = &txr->tx_ring_struct; 3913 rmem = &ring->ring_mem; 3914 rmem->nr_pages = bp->tx_nr_pages; 3915 rmem->page_size = HW_TXBD_RING_SIZE; 3916 rmem->pg_arr = (void **)txr->tx_desc_ring; 3917 rmem->dma_arr = txr->tx_desc_mapping; 3918 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3919 rmem->vmem = (void **)&txr->tx_buf_ring; 3920 } 3921 } 3922 } 3923 3924 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3925 { 3926 int i; 3927 u32 prod; 3928 struct rx_bd **rx_buf_ring; 3929 3930 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3931 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3932 int j; 3933 struct rx_bd *rxbd; 3934 3935 rxbd = rx_buf_ring[i]; 3936 if (!rxbd) 3937 continue; 3938 3939 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3940 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3941 rxbd->rx_bd_opaque = prod; 3942 } 3943 } 3944 } 3945 3946 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 3947 { 3948 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3949 struct net_device *dev = bp->dev; 3950 u32 prod; 3951 int i; 3952 3953 prod = rxr->rx_prod; 3954 for (i = 0; i < bp->rx_ring_size; i++) { 3955 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 3956 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3957 ring_nr, i, bp->rx_ring_size); 3958 break; 3959 } 3960 prod = NEXT_RX(prod); 3961 } 3962 rxr->rx_prod = prod; 3963 3964 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3965 return 0; 3966 3967 prod = rxr->rx_agg_prod; 3968 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3969 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 3970 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3971 ring_nr, i, bp->rx_ring_size); 3972 break; 3973 } 3974 prod = NEXT_RX_AGG(prod); 3975 } 3976 rxr->rx_agg_prod = prod; 3977 3978 if (rxr->rx_tpa) { 3979 dma_addr_t mapping; 3980 u8 *data; 3981 3982 for (i = 0; i < bp->max_tpa; i++) { 3983 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); 3984 if (!data) 3985 return -ENOMEM; 3986 3987 rxr->rx_tpa[i].data = data; 3988 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3989 rxr->rx_tpa[i].mapping = mapping; 3990 } 3991 } 3992 return 0; 3993 } 3994 3995 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3996 { 3997 struct bnxt_rx_ring_info *rxr; 3998 struct bnxt_ring_struct *ring; 3999 u32 type; 4000 4001 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 4002 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 4003 4004 if (NET_IP_ALIGN == 2) 4005 type |= RX_BD_FLAGS_SOP; 4006 4007 rxr = &bp->rx_ring[ring_nr]; 4008 ring = &rxr->rx_ring_struct; 4009 bnxt_init_rxbd_pages(ring, type); 4010 4011 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, 4012 &rxr->bnapi->napi); 4013 4014 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 4015 bpf_prog_add(bp->xdp_prog, 1); 4016 rxr->xdp_prog = bp->xdp_prog; 4017 } 4018 ring->fw_ring_id = INVALID_HW_RING_ID; 4019 4020 ring = &rxr->rx_agg_ring_struct; 4021 ring->fw_ring_id = INVALID_HW_RING_ID; 4022 4023 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 4024 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 4025 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 4026 4027 bnxt_init_rxbd_pages(ring, type); 4028 } 4029 4030 return bnxt_alloc_one_rx_ring(bp, ring_nr); 4031 } 4032 4033 static void bnxt_init_cp_rings(struct bnxt *bp) 4034 { 4035 int i, j; 4036 4037 for (i = 0; i < bp->cp_nr_rings; i++) { 4038 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 4039 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4040 4041 ring->fw_ring_id = INVALID_HW_RING_ID; 4042 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4043 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4044 if (!cpr->cp_ring_arr) 4045 continue; 4046 for (j = 0; j < cpr->cp_ring_count; j++) { 4047 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4048 4049 ring = &cpr2->cp_ring_struct; 4050 ring->fw_ring_id = INVALID_HW_RING_ID; 4051 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4052 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4053 } 4054 } 4055 } 4056 4057 static int bnxt_init_rx_rings(struct bnxt *bp) 4058 { 4059 int i, rc = 0; 4060 4061 if (BNXT_RX_PAGE_MODE(bp)) { 4062 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 4063 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 4064 } else { 4065 bp->rx_offset = BNXT_RX_OFFSET; 4066 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 4067 } 4068 4069 for (i = 0; i < bp->rx_nr_rings; i++) { 4070 rc = bnxt_init_one_rx_ring(bp, i); 4071 if (rc) 4072 break; 4073 } 4074 4075 return rc; 4076 } 4077 4078 static int bnxt_init_tx_rings(struct bnxt *bp) 4079 { 4080 u16 i; 4081 4082 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 4083 BNXT_MIN_TX_DESC_CNT); 4084 4085 for (i = 0; i < bp->tx_nr_rings; i++) { 4086 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4087 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4088 4089 ring->fw_ring_id = INVALID_HW_RING_ID; 4090 4091 if (i >= bp->tx_nr_rings_xdp) 4092 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, 4093 NETDEV_QUEUE_TYPE_TX, 4094 &txr->bnapi->napi); 4095 } 4096 4097 return 0; 4098 } 4099 4100 static void bnxt_free_ring_grps(struct bnxt *bp) 4101 { 4102 kfree(bp->grp_info); 4103 bp->grp_info = NULL; 4104 } 4105 4106 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 4107 { 4108 int i; 4109 4110 if (irq_re_init) { 4111 bp->grp_info = kcalloc(bp->cp_nr_rings, 4112 sizeof(struct bnxt_ring_grp_info), 4113 GFP_KERNEL); 4114 if (!bp->grp_info) 4115 return -ENOMEM; 4116 } 4117 for (i = 0; i < bp->cp_nr_rings; i++) { 4118 if (irq_re_init) 4119 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 4120 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4121 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 4122 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 4123 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4124 } 4125 return 0; 4126 } 4127 4128 static void bnxt_free_vnics(struct bnxt *bp) 4129 { 4130 kfree(bp->vnic_info); 4131 bp->vnic_info = NULL; 4132 bp->nr_vnics = 0; 4133 } 4134 4135 static int bnxt_alloc_vnics(struct bnxt *bp) 4136 { 4137 int num_vnics = 1; 4138 4139 #ifdef CONFIG_RFS_ACCEL 4140 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS) 4141 num_vnics += bp->rx_nr_rings; 4142 #endif 4143 4144 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4145 num_vnics++; 4146 4147 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 4148 GFP_KERNEL); 4149 if (!bp->vnic_info) 4150 return -ENOMEM; 4151 4152 bp->nr_vnics = num_vnics; 4153 return 0; 4154 } 4155 4156 static void bnxt_init_vnics(struct bnxt *bp) 4157 { 4158 int i; 4159 4160 for (i = 0; i < bp->nr_vnics; i++) { 4161 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4162 int j; 4163 4164 vnic->fw_vnic_id = INVALID_HW_RING_ID; 4165 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 4166 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 4167 4168 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4169 4170 if (bp->vnic_info[i].rss_hash_key) { 4171 if (i == 0) 4172 get_random_bytes(vnic->rss_hash_key, 4173 HW_HASH_KEY_SIZE); 4174 else 4175 memcpy(vnic->rss_hash_key, 4176 bp->vnic_info[0].rss_hash_key, 4177 HW_HASH_KEY_SIZE); 4178 } 4179 } 4180 } 4181 4182 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4183 { 4184 int pages; 4185 4186 pages = ring_size / desc_per_pg; 4187 4188 if (!pages) 4189 return 1; 4190 4191 pages++; 4192 4193 while (pages & (pages - 1)) 4194 pages++; 4195 4196 return pages; 4197 } 4198 4199 void bnxt_set_tpa_flags(struct bnxt *bp) 4200 { 4201 bp->flags &= ~BNXT_FLAG_TPA; 4202 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4203 return; 4204 if (bp->dev->features & NETIF_F_LRO) 4205 bp->flags |= BNXT_FLAG_LRO; 4206 else if (bp->dev->features & NETIF_F_GRO_HW) 4207 bp->flags |= BNXT_FLAG_GRO; 4208 } 4209 4210 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4211 * be set on entry. 4212 */ 4213 void bnxt_set_ring_params(struct bnxt *bp) 4214 { 4215 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4216 u32 agg_factor = 0, agg_ring_size = 0; 4217 4218 /* 8 for CRC and VLAN */ 4219 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4220 4221 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4222 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4223 4224 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 4225 ring_size = bp->rx_ring_size; 4226 bp->rx_agg_ring_size = 0; 4227 bp->rx_agg_nr_pages = 0; 4228 4229 if (bp->flags & BNXT_FLAG_TPA) 4230 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4231 4232 bp->flags &= ~BNXT_FLAG_JUMBO; 4233 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4234 u32 jumbo_factor; 4235 4236 bp->flags |= BNXT_FLAG_JUMBO; 4237 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4238 if (jumbo_factor > agg_factor) 4239 agg_factor = jumbo_factor; 4240 } 4241 if (agg_factor) { 4242 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4243 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4244 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4245 bp->rx_ring_size, ring_size); 4246 bp->rx_ring_size = ring_size; 4247 } 4248 agg_ring_size = ring_size * agg_factor; 4249 4250 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4251 RX_DESC_CNT); 4252 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4253 u32 tmp = agg_ring_size; 4254 4255 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4256 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4257 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4258 tmp, agg_ring_size); 4259 } 4260 bp->rx_agg_ring_size = agg_ring_size; 4261 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4262 4263 if (BNXT_RX_PAGE_MODE(bp)) { 4264 rx_space = PAGE_SIZE; 4265 rx_size = PAGE_SIZE - 4266 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4267 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4268 } else { 4269 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 4270 rx_space = rx_size + NET_SKB_PAD + 4271 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4272 } 4273 } 4274 4275 bp->rx_buf_use_size = rx_size; 4276 bp->rx_buf_size = rx_space; 4277 4278 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4279 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4280 4281 ring_size = bp->tx_ring_size; 4282 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4283 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4284 4285 max_rx_cmpl = bp->rx_ring_size; 4286 /* MAX TPA needs to be added because TPA_START completions are 4287 * immediately recycled, so the TPA completions are not bound by 4288 * the RX ring size. 4289 */ 4290 if (bp->flags & BNXT_FLAG_TPA) 4291 max_rx_cmpl += bp->max_tpa; 4292 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4293 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4294 bp->cp_ring_size = ring_size; 4295 4296 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4297 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4298 bp->cp_nr_pages = MAX_CP_PAGES; 4299 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4300 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4301 ring_size, bp->cp_ring_size); 4302 } 4303 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4304 bp->cp_ring_mask = bp->cp_bit - 1; 4305 } 4306 4307 /* Changing allocation mode of RX rings. 4308 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4309 */ 4310 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4311 { 4312 struct net_device *dev = bp->dev; 4313 4314 if (page_mode) { 4315 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 4316 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4317 4318 if (bp->xdp_prog->aux->xdp_has_frags) 4319 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4320 else 4321 dev->max_mtu = 4322 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4323 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4324 bp->flags |= BNXT_FLAG_JUMBO; 4325 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4326 } else { 4327 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4328 bp->rx_skb_func = bnxt_rx_page_skb; 4329 } 4330 bp->rx_dir = DMA_BIDIRECTIONAL; 4331 /* Disable LRO or GRO_HW */ 4332 netdev_update_features(dev); 4333 } else { 4334 dev->max_mtu = bp->max_mtu; 4335 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4336 bp->rx_dir = DMA_FROM_DEVICE; 4337 bp->rx_skb_func = bnxt_rx_skb; 4338 } 4339 return 0; 4340 } 4341 4342 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4343 { 4344 int i; 4345 struct bnxt_vnic_info *vnic; 4346 struct pci_dev *pdev = bp->pdev; 4347 4348 if (!bp->vnic_info) 4349 return; 4350 4351 for (i = 0; i < bp->nr_vnics; i++) { 4352 vnic = &bp->vnic_info[i]; 4353 4354 kfree(vnic->fw_grp_ids); 4355 vnic->fw_grp_ids = NULL; 4356 4357 kfree(vnic->uc_list); 4358 vnic->uc_list = NULL; 4359 4360 if (vnic->mc_list) { 4361 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4362 vnic->mc_list, vnic->mc_list_mapping); 4363 vnic->mc_list = NULL; 4364 } 4365 4366 if (vnic->rss_table) { 4367 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4368 vnic->rss_table, 4369 vnic->rss_table_dma_addr); 4370 vnic->rss_table = NULL; 4371 } 4372 4373 vnic->rss_hash_key = NULL; 4374 vnic->flags = 0; 4375 } 4376 } 4377 4378 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4379 { 4380 int i, rc = 0, size; 4381 struct bnxt_vnic_info *vnic; 4382 struct pci_dev *pdev = bp->pdev; 4383 int max_rings; 4384 4385 for (i = 0; i < bp->nr_vnics; i++) { 4386 vnic = &bp->vnic_info[i]; 4387 4388 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4389 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4390 4391 if (mem_size > 0) { 4392 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4393 if (!vnic->uc_list) { 4394 rc = -ENOMEM; 4395 goto out; 4396 } 4397 } 4398 } 4399 4400 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4401 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4402 vnic->mc_list = 4403 dma_alloc_coherent(&pdev->dev, 4404 vnic->mc_list_size, 4405 &vnic->mc_list_mapping, 4406 GFP_KERNEL); 4407 if (!vnic->mc_list) { 4408 rc = -ENOMEM; 4409 goto out; 4410 } 4411 } 4412 4413 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4414 goto vnic_skip_grps; 4415 4416 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4417 max_rings = bp->rx_nr_rings; 4418 else 4419 max_rings = 1; 4420 4421 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4422 if (!vnic->fw_grp_ids) { 4423 rc = -ENOMEM; 4424 goto out; 4425 } 4426 vnic_skip_grps: 4427 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 4428 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4429 continue; 4430 4431 /* Allocate rss table and hash key */ 4432 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4433 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4434 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4435 4436 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4437 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4438 vnic->rss_table_size, 4439 &vnic->rss_table_dma_addr, 4440 GFP_KERNEL); 4441 if (!vnic->rss_table) { 4442 rc = -ENOMEM; 4443 goto out; 4444 } 4445 4446 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4447 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4448 } 4449 return 0; 4450 4451 out: 4452 return rc; 4453 } 4454 4455 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4456 { 4457 struct bnxt_hwrm_wait_token *token; 4458 4459 dma_pool_destroy(bp->hwrm_dma_pool); 4460 bp->hwrm_dma_pool = NULL; 4461 4462 rcu_read_lock(); 4463 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4464 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4465 rcu_read_unlock(); 4466 } 4467 4468 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4469 { 4470 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4471 BNXT_HWRM_DMA_SIZE, 4472 BNXT_HWRM_DMA_ALIGN, 0); 4473 if (!bp->hwrm_dma_pool) 4474 return -ENOMEM; 4475 4476 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4477 4478 return 0; 4479 } 4480 4481 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 4482 { 4483 kfree(stats->hw_masks); 4484 stats->hw_masks = NULL; 4485 kfree(stats->sw_stats); 4486 stats->sw_stats = NULL; 4487 if (stats->hw_stats) { 4488 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 4489 stats->hw_stats_map); 4490 stats->hw_stats = NULL; 4491 } 4492 } 4493 4494 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 4495 bool alloc_masks) 4496 { 4497 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 4498 &stats->hw_stats_map, GFP_KERNEL); 4499 if (!stats->hw_stats) 4500 return -ENOMEM; 4501 4502 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 4503 if (!stats->sw_stats) 4504 goto stats_mem_err; 4505 4506 if (alloc_masks) { 4507 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 4508 if (!stats->hw_masks) 4509 goto stats_mem_err; 4510 } 4511 return 0; 4512 4513 stats_mem_err: 4514 bnxt_free_stats_mem(bp, stats); 4515 return -ENOMEM; 4516 } 4517 4518 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 4519 { 4520 int i; 4521 4522 for (i = 0; i < count; i++) 4523 mask_arr[i] = mask; 4524 } 4525 4526 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 4527 { 4528 int i; 4529 4530 for (i = 0; i < count; i++) 4531 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 4532 } 4533 4534 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 4535 struct bnxt_stats_mem *stats) 4536 { 4537 struct hwrm_func_qstats_ext_output *resp; 4538 struct hwrm_func_qstats_ext_input *req; 4539 __le64 *hw_masks; 4540 int rc; 4541 4542 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 4543 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4544 return -EOPNOTSUPP; 4545 4546 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 4547 if (rc) 4548 return rc; 4549 4550 req->fid = cpu_to_le16(0xffff); 4551 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4552 4553 resp = hwrm_req_hold(bp, req); 4554 rc = hwrm_req_send(bp, req); 4555 if (!rc) { 4556 hw_masks = &resp->rx_ucast_pkts; 4557 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 4558 } 4559 hwrm_req_drop(bp, req); 4560 return rc; 4561 } 4562 4563 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 4564 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 4565 4566 static void bnxt_init_stats(struct bnxt *bp) 4567 { 4568 struct bnxt_napi *bnapi = bp->bnapi[0]; 4569 struct bnxt_cp_ring_info *cpr; 4570 struct bnxt_stats_mem *stats; 4571 __le64 *rx_stats, *tx_stats; 4572 int rc, rx_count, tx_count; 4573 u64 *rx_masks, *tx_masks; 4574 u64 mask; 4575 u8 flags; 4576 4577 cpr = &bnapi->cp_ring; 4578 stats = &cpr->stats; 4579 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 4580 if (rc) { 4581 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4582 mask = (1ULL << 48) - 1; 4583 else 4584 mask = -1ULL; 4585 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 4586 } 4587 if (bp->flags & BNXT_FLAG_PORT_STATS) { 4588 stats = &bp->port_stats; 4589 rx_stats = stats->hw_stats; 4590 rx_masks = stats->hw_masks; 4591 rx_count = sizeof(struct rx_port_stats) / 8; 4592 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4593 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4594 tx_count = sizeof(struct tx_port_stats) / 8; 4595 4596 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 4597 rc = bnxt_hwrm_port_qstats(bp, flags); 4598 if (rc) { 4599 mask = (1ULL << 40) - 1; 4600 4601 bnxt_fill_masks(rx_masks, mask, rx_count); 4602 bnxt_fill_masks(tx_masks, mask, tx_count); 4603 } else { 4604 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4605 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 4606 bnxt_hwrm_port_qstats(bp, 0); 4607 } 4608 } 4609 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 4610 stats = &bp->rx_port_stats_ext; 4611 rx_stats = stats->hw_stats; 4612 rx_masks = stats->hw_masks; 4613 rx_count = sizeof(struct rx_port_stats_ext) / 8; 4614 stats = &bp->tx_port_stats_ext; 4615 tx_stats = stats->hw_stats; 4616 tx_masks = stats->hw_masks; 4617 tx_count = sizeof(struct tx_port_stats_ext) / 8; 4618 4619 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4620 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 4621 if (rc) { 4622 mask = (1ULL << 40) - 1; 4623 4624 bnxt_fill_masks(rx_masks, mask, rx_count); 4625 if (tx_stats) 4626 bnxt_fill_masks(tx_masks, mask, tx_count); 4627 } else { 4628 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4629 if (tx_stats) 4630 bnxt_copy_hw_masks(tx_masks, tx_stats, 4631 tx_count); 4632 bnxt_hwrm_port_qstats_ext(bp, 0); 4633 } 4634 } 4635 } 4636 4637 static void bnxt_free_port_stats(struct bnxt *bp) 4638 { 4639 bp->flags &= ~BNXT_FLAG_PORT_STATS; 4640 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 4641 4642 bnxt_free_stats_mem(bp, &bp->port_stats); 4643 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 4644 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 4645 } 4646 4647 static void bnxt_free_ring_stats(struct bnxt *bp) 4648 { 4649 int i; 4650 4651 if (!bp->bnapi) 4652 return; 4653 4654 for (i = 0; i < bp->cp_nr_rings; i++) { 4655 struct bnxt_napi *bnapi = bp->bnapi[i]; 4656 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4657 4658 bnxt_free_stats_mem(bp, &cpr->stats); 4659 } 4660 } 4661 4662 static int bnxt_alloc_stats(struct bnxt *bp) 4663 { 4664 u32 size, i; 4665 int rc; 4666 4667 size = bp->hw_ring_stats_size; 4668 4669 for (i = 0; i < bp->cp_nr_rings; i++) { 4670 struct bnxt_napi *bnapi = bp->bnapi[i]; 4671 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4672 4673 cpr->stats.len = size; 4674 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 4675 if (rc) 4676 return rc; 4677 4678 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4679 } 4680 4681 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 4682 return 0; 4683 4684 if (bp->port_stats.hw_stats) 4685 goto alloc_ext_stats; 4686 4687 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 4688 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 4689 if (rc) 4690 return rc; 4691 4692 bp->flags |= BNXT_FLAG_PORT_STATS; 4693 4694 alloc_ext_stats: 4695 /* Display extended statistics only if FW supports it */ 4696 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 4697 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 4698 return 0; 4699 4700 if (bp->rx_port_stats_ext.hw_stats) 4701 goto alloc_tx_ext_stats; 4702 4703 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 4704 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 4705 /* Extended stats are optional */ 4706 if (rc) 4707 return 0; 4708 4709 alloc_tx_ext_stats: 4710 if (bp->tx_port_stats_ext.hw_stats) 4711 return 0; 4712 4713 if (bp->hwrm_spec_code >= 0x10902 || 4714 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 4715 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 4716 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 4717 /* Extended stats are optional */ 4718 if (rc) 4719 return 0; 4720 } 4721 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 4722 return 0; 4723 } 4724 4725 static void bnxt_clear_ring_indices(struct bnxt *bp) 4726 { 4727 int i, j; 4728 4729 if (!bp->bnapi) 4730 return; 4731 4732 for (i = 0; i < bp->cp_nr_rings; i++) { 4733 struct bnxt_napi *bnapi = bp->bnapi[i]; 4734 struct bnxt_cp_ring_info *cpr; 4735 struct bnxt_rx_ring_info *rxr; 4736 struct bnxt_tx_ring_info *txr; 4737 4738 if (!bnapi) 4739 continue; 4740 4741 cpr = &bnapi->cp_ring; 4742 cpr->cp_raw_cons = 0; 4743 4744 bnxt_for_each_napi_tx(j, bnapi, txr) { 4745 txr->tx_prod = 0; 4746 txr->tx_cons = 0; 4747 txr->tx_hw_cons = 0; 4748 } 4749 4750 rxr = bnapi->rx_ring; 4751 if (rxr) { 4752 rxr->rx_prod = 0; 4753 rxr->rx_agg_prod = 0; 4754 rxr->rx_sw_agg_prod = 0; 4755 rxr->rx_next_cons = 0; 4756 } 4757 bnapi->events = 0; 4758 } 4759 } 4760 4761 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 4762 { 4763 #ifdef CONFIG_RFS_ACCEL 4764 int i; 4765 4766 /* Under rtnl_lock and all our NAPIs have been disabled. It's 4767 * safe to delete the hash table. 4768 */ 4769 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 4770 struct hlist_head *head; 4771 struct hlist_node *tmp; 4772 struct bnxt_ntuple_filter *fltr; 4773 4774 head = &bp->ntp_fltr_hash_tbl[i]; 4775 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 4776 hlist_del(&fltr->hash); 4777 kfree(fltr); 4778 } 4779 } 4780 if (irq_reinit) { 4781 bitmap_free(bp->ntp_fltr_bmap); 4782 bp->ntp_fltr_bmap = NULL; 4783 } 4784 bp->ntp_fltr_count = 0; 4785 #endif 4786 } 4787 4788 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 4789 { 4790 #ifdef CONFIG_RFS_ACCEL 4791 int i, rc = 0; 4792 4793 if (!(bp->flags & BNXT_FLAG_RFS)) 4794 return 0; 4795 4796 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 4797 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 4798 4799 bp->ntp_fltr_count = 0; 4800 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL); 4801 4802 if (!bp->ntp_fltr_bmap) 4803 rc = -ENOMEM; 4804 4805 return rc; 4806 #else 4807 return 0; 4808 #endif 4809 } 4810 4811 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 4812 { 4813 bnxt_free_vnic_attributes(bp); 4814 bnxt_free_tx_rings(bp); 4815 bnxt_free_rx_rings(bp); 4816 bnxt_free_cp_rings(bp); 4817 bnxt_free_all_cp_arrays(bp); 4818 bnxt_free_ntp_fltrs(bp, irq_re_init); 4819 if (irq_re_init) { 4820 bnxt_free_ring_stats(bp); 4821 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 4822 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 4823 bnxt_free_port_stats(bp); 4824 bnxt_free_ring_grps(bp); 4825 bnxt_free_vnics(bp); 4826 kfree(bp->tx_ring_map); 4827 bp->tx_ring_map = NULL; 4828 kfree(bp->tx_ring); 4829 bp->tx_ring = NULL; 4830 kfree(bp->rx_ring); 4831 bp->rx_ring = NULL; 4832 kfree(bp->bnapi); 4833 bp->bnapi = NULL; 4834 } else { 4835 bnxt_clear_ring_indices(bp); 4836 } 4837 } 4838 4839 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 4840 { 4841 int i, j, rc, size, arr_size; 4842 void *bnapi; 4843 4844 if (irq_re_init) { 4845 /* Allocate bnapi mem pointer array and mem block for 4846 * all queues 4847 */ 4848 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 4849 bp->cp_nr_rings); 4850 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 4851 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 4852 if (!bnapi) 4853 return -ENOMEM; 4854 4855 bp->bnapi = bnapi; 4856 bnapi += arr_size; 4857 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 4858 bp->bnapi[i] = bnapi; 4859 bp->bnapi[i]->index = i; 4860 bp->bnapi[i]->bp = bp; 4861 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 4862 struct bnxt_cp_ring_info *cpr = 4863 &bp->bnapi[i]->cp_ring; 4864 4865 cpr->cp_ring_struct.ring_mem.flags = 4866 BNXT_RMEM_RING_PTE_FLAG; 4867 } 4868 } 4869 4870 bp->rx_ring = kcalloc(bp->rx_nr_rings, 4871 sizeof(struct bnxt_rx_ring_info), 4872 GFP_KERNEL); 4873 if (!bp->rx_ring) 4874 return -ENOMEM; 4875 4876 for (i = 0; i < bp->rx_nr_rings; i++) { 4877 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4878 4879 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 4880 rxr->rx_ring_struct.ring_mem.flags = 4881 BNXT_RMEM_RING_PTE_FLAG; 4882 rxr->rx_agg_ring_struct.ring_mem.flags = 4883 BNXT_RMEM_RING_PTE_FLAG; 4884 } else { 4885 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 4886 } 4887 rxr->bnapi = bp->bnapi[i]; 4888 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4889 } 4890 4891 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4892 sizeof(struct bnxt_tx_ring_info), 4893 GFP_KERNEL); 4894 if (!bp->tx_ring) 4895 return -ENOMEM; 4896 4897 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4898 GFP_KERNEL); 4899 4900 if (!bp->tx_ring_map) 4901 return -ENOMEM; 4902 4903 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4904 j = 0; 4905 else 4906 j = bp->rx_nr_rings; 4907 4908 for (i = 0; i < bp->tx_nr_rings; i++) { 4909 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4910 struct bnxt_napi *bnapi2; 4911 4912 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4913 txr->tx_ring_struct.ring_mem.flags = 4914 BNXT_RMEM_RING_PTE_FLAG; 4915 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4916 if (i >= bp->tx_nr_rings_xdp) { 4917 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 4918 4919 bnapi2 = bp->bnapi[k]; 4920 txr->txq_index = i - bp->tx_nr_rings_xdp; 4921 txr->tx_napi_idx = 4922 BNXT_RING_TO_TC(bp, txr->txq_index); 4923 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 4924 bnapi2->tx_int = bnxt_tx_int; 4925 } else { 4926 bnapi2 = bp->bnapi[j]; 4927 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 4928 bnapi2->tx_ring[0] = txr; 4929 bnapi2->tx_int = bnxt_tx_int_xdp; 4930 j++; 4931 } 4932 txr->bnapi = bnapi2; 4933 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4934 txr->tx_cpr = &bnapi2->cp_ring; 4935 } 4936 4937 rc = bnxt_alloc_stats(bp); 4938 if (rc) 4939 goto alloc_mem_err; 4940 bnxt_init_stats(bp); 4941 4942 rc = bnxt_alloc_ntp_fltrs(bp); 4943 if (rc) 4944 goto alloc_mem_err; 4945 4946 rc = bnxt_alloc_vnics(bp); 4947 if (rc) 4948 goto alloc_mem_err; 4949 } 4950 4951 rc = bnxt_alloc_all_cp_arrays(bp); 4952 if (rc) 4953 goto alloc_mem_err; 4954 4955 bnxt_init_ring_struct(bp); 4956 4957 rc = bnxt_alloc_rx_rings(bp); 4958 if (rc) 4959 goto alloc_mem_err; 4960 4961 rc = bnxt_alloc_tx_rings(bp); 4962 if (rc) 4963 goto alloc_mem_err; 4964 4965 rc = bnxt_alloc_cp_rings(bp); 4966 if (rc) 4967 goto alloc_mem_err; 4968 4969 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4970 BNXT_VNIC_UCAST_FLAG; 4971 rc = bnxt_alloc_vnic_attributes(bp); 4972 if (rc) 4973 goto alloc_mem_err; 4974 return 0; 4975 4976 alloc_mem_err: 4977 bnxt_free_mem(bp, true); 4978 return rc; 4979 } 4980 4981 static void bnxt_disable_int(struct bnxt *bp) 4982 { 4983 int i; 4984 4985 if (!bp->bnapi) 4986 return; 4987 4988 for (i = 0; i < bp->cp_nr_rings; i++) { 4989 struct bnxt_napi *bnapi = bp->bnapi[i]; 4990 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4991 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4992 4993 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4994 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4995 } 4996 } 4997 4998 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4999 { 5000 struct bnxt_napi *bnapi = bp->bnapi[n]; 5001 struct bnxt_cp_ring_info *cpr; 5002 5003 cpr = &bnapi->cp_ring; 5004 return cpr->cp_ring_struct.map_idx; 5005 } 5006 5007 static void bnxt_disable_int_sync(struct bnxt *bp) 5008 { 5009 int i; 5010 5011 if (!bp->irq_tbl) 5012 return; 5013 5014 atomic_inc(&bp->intr_sem); 5015 5016 bnxt_disable_int(bp); 5017 for (i = 0; i < bp->cp_nr_rings; i++) { 5018 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 5019 5020 synchronize_irq(bp->irq_tbl[map_idx].vector); 5021 } 5022 } 5023 5024 static void bnxt_enable_int(struct bnxt *bp) 5025 { 5026 int i; 5027 5028 atomic_set(&bp->intr_sem, 0); 5029 for (i = 0; i < bp->cp_nr_rings; i++) { 5030 struct bnxt_napi *bnapi = bp->bnapi[i]; 5031 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5032 5033 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 5034 } 5035 } 5036 5037 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 5038 bool async_only) 5039 { 5040 DECLARE_BITMAP(async_events_bmap, 256); 5041 u32 *events = (u32 *)async_events_bmap; 5042 struct hwrm_func_drv_rgtr_output *resp; 5043 struct hwrm_func_drv_rgtr_input *req; 5044 u32 flags; 5045 int rc, i; 5046 5047 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 5048 if (rc) 5049 return rc; 5050 5051 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 5052 FUNC_DRV_RGTR_REQ_ENABLES_VER | 5053 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5054 5055 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 5056 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 5057 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 5058 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 5059 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 5060 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 5061 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 5062 req->flags = cpu_to_le32(flags); 5063 req->ver_maj_8b = DRV_VER_MAJ; 5064 req->ver_min_8b = DRV_VER_MIN; 5065 req->ver_upd_8b = DRV_VER_UPD; 5066 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 5067 req->ver_min = cpu_to_le16(DRV_VER_MIN); 5068 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 5069 5070 if (BNXT_PF(bp)) { 5071 u32 data[8]; 5072 int i; 5073 5074 memset(data, 0, sizeof(data)); 5075 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 5076 u16 cmd = bnxt_vf_req_snif[i]; 5077 unsigned int bit, idx; 5078 5079 idx = cmd / 32; 5080 bit = cmd % 32; 5081 data[idx] |= 1 << bit; 5082 } 5083 5084 for (i = 0; i < 8; i++) 5085 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 5086 5087 req->enables |= 5088 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 5089 } 5090 5091 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 5092 req->flags |= cpu_to_le32( 5093 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 5094 5095 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 5096 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 5097 u16 event_id = bnxt_async_events_arr[i]; 5098 5099 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 5100 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5101 continue; 5102 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 5103 !bp->ptp_cfg) 5104 continue; 5105 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 5106 } 5107 if (bmap && bmap_size) { 5108 for (i = 0; i < bmap_size; i++) { 5109 if (test_bit(i, bmap)) 5110 __set_bit(i, async_events_bmap); 5111 } 5112 } 5113 for (i = 0; i < 8; i++) 5114 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 5115 5116 if (async_only) 5117 req->enables = 5118 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5119 5120 resp = hwrm_req_hold(bp, req); 5121 rc = hwrm_req_send(bp, req); 5122 if (!rc) { 5123 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 5124 if (resp->flags & 5125 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 5126 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 5127 } 5128 hwrm_req_drop(bp, req); 5129 return rc; 5130 } 5131 5132 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 5133 { 5134 struct hwrm_func_drv_unrgtr_input *req; 5135 int rc; 5136 5137 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 5138 return 0; 5139 5140 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 5141 if (rc) 5142 return rc; 5143 return hwrm_req_send(bp, req); 5144 } 5145 5146 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 5147 { 5148 struct hwrm_tunnel_dst_port_free_input *req; 5149 int rc; 5150 5151 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 5152 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 5153 return 0; 5154 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 5155 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 5156 return 0; 5157 5158 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 5159 if (rc) 5160 return rc; 5161 5162 req->tunnel_type = tunnel_type; 5163 5164 switch (tunnel_type) { 5165 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 5166 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 5167 bp->vxlan_port = 0; 5168 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 5169 break; 5170 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5171 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5172 bp->nge_port = 0; 5173 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5174 break; 5175 default: 5176 break; 5177 } 5178 5179 rc = hwrm_req_send(bp, req); 5180 if (rc) 5181 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5182 rc); 5183 return rc; 5184 } 5185 5186 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5187 u8 tunnel_type) 5188 { 5189 struct hwrm_tunnel_dst_port_alloc_output *resp; 5190 struct hwrm_tunnel_dst_port_alloc_input *req; 5191 int rc; 5192 5193 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5194 if (rc) 5195 return rc; 5196 5197 req->tunnel_type = tunnel_type; 5198 req->tunnel_dst_port_val = port; 5199 5200 resp = hwrm_req_hold(bp, req); 5201 rc = hwrm_req_send(bp, req); 5202 if (rc) { 5203 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5204 rc); 5205 goto err_out; 5206 } 5207 5208 switch (tunnel_type) { 5209 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5210 bp->vxlan_port = port; 5211 bp->vxlan_fw_dst_port_id = 5212 le16_to_cpu(resp->tunnel_dst_port_id); 5213 break; 5214 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5215 bp->nge_port = port; 5216 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5217 break; 5218 default: 5219 break; 5220 } 5221 5222 err_out: 5223 hwrm_req_drop(bp, req); 5224 return rc; 5225 } 5226 5227 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5228 { 5229 struct hwrm_cfa_l2_set_rx_mask_input *req; 5230 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5231 int rc; 5232 5233 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5234 if (rc) 5235 return rc; 5236 5237 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5238 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5239 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5240 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5241 } 5242 req->mask = cpu_to_le32(vnic->rx_mask); 5243 return hwrm_req_send_silent(bp, req); 5244 } 5245 5246 #ifdef CONFIG_RFS_ACCEL 5247 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 5248 struct bnxt_ntuple_filter *fltr) 5249 { 5250 struct hwrm_cfa_ntuple_filter_free_input *req; 5251 int rc; 5252 5253 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 5254 if (rc) 5255 return rc; 5256 5257 req->ntuple_filter_id = fltr->filter_id; 5258 return hwrm_req_send(bp, req); 5259 } 5260 5261 #define BNXT_NTP_FLTR_FLAGS \ 5262 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 5263 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 5264 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 5265 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 5266 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 5267 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 5268 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 5269 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 5270 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 5271 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 5272 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 5273 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 5274 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 5275 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 5276 5277 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 5278 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 5279 5280 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 5281 struct bnxt_ntuple_filter *fltr) 5282 { 5283 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 5284 struct hwrm_cfa_ntuple_filter_alloc_input *req; 5285 struct flow_keys *keys = &fltr->fkeys; 5286 struct bnxt_vnic_info *vnic; 5287 u32 flags = 0; 5288 int rc; 5289 5290 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 5291 if (rc) 5292 return rc; 5293 5294 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 5295 5296 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 5297 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 5298 req->dst_id = cpu_to_le16(fltr->rxq); 5299 } else { 5300 vnic = &bp->vnic_info[fltr->rxq + 1]; 5301 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5302 } 5303 req->flags = cpu_to_le32(flags); 5304 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 5305 5306 req->ethertype = htons(ETH_P_IP); 5307 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN); 5308 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 5309 req->ip_protocol = keys->basic.ip_proto; 5310 5311 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 5312 int i; 5313 5314 req->ethertype = htons(ETH_P_IPV6); 5315 req->ip_addr_type = 5316 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 5317 *(struct in6_addr *)&req->src_ipaddr[0] = 5318 keys->addrs.v6addrs.src; 5319 *(struct in6_addr *)&req->dst_ipaddr[0] = 5320 keys->addrs.v6addrs.dst; 5321 for (i = 0; i < 4; i++) { 5322 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 5323 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 5324 } 5325 } else { 5326 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 5327 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 5328 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 5329 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 5330 } 5331 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 5332 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 5333 req->tunnel_type = 5334 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 5335 } 5336 5337 req->src_port = keys->ports.src; 5338 req->src_port_mask = cpu_to_be16(0xffff); 5339 req->dst_port = keys->ports.dst; 5340 req->dst_port_mask = cpu_to_be16(0xffff); 5341 5342 resp = hwrm_req_hold(bp, req); 5343 rc = hwrm_req_send(bp, req); 5344 if (!rc) 5345 fltr->filter_id = resp->ntuple_filter_id; 5346 hwrm_req_drop(bp, req); 5347 return rc; 5348 } 5349 #endif 5350 5351 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 5352 const u8 *mac_addr) 5353 { 5354 struct hwrm_cfa_l2_filter_alloc_output *resp; 5355 struct hwrm_cfa_l2_filter_alloc_input *req; 5356 int rc; 5357 5358 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 5359 if (rc) 5360 return rc; 5361 5362 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 5363 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 5364 req->flags |= 5365 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 5366 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 5367 req->enables = 5368 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 5369 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 5370 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 5371 memcpy(req->l2_addr, mac_addr, ETH_ALEN); 5372 req->l2_addr_mask[0] = 0xff; 5373 req->l2_addr_mask[1] = 0xff; 5374 req->l2_addr_mask[2] = 0xff; 5375 req->l2_addr_mask[3] = 0xff; 5376 req->l2_addr_mask[4] = 0xff; 5377 req->l2_addr_mask[5] = 0xff; 5378 5379 resp = hwrm_req_hold(bp, req); 5380 rc = hwrm_req_send(bp, req); 5381 if (!rc) 5382 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 5383 resp->l2_filter_id; 5384 hwrm_req_drop(bp, req); 5385 return rc; 5386 } 5387 5388 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 5389 { 5390 struct hwrm_cfa_l2_filter_free_input *req; 5391 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 5392 int rc; 5393 5394 /* Any associated ntuple filters will also be cleared by firmware. */ 5395 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 5396 if (rc) 5397 return rc; 5398 hwrm_req_hold(bp, req); 5399 for (i = 0; i < num_of_vnics; i++) { 5400 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5401 5402 for (j = 0; j < vnic->uc_filter_count; j++) { 5403 req->l2_filter_id = vnic->fw_l2_filter_id[j]; 5404 5405 rc = hwrm_req_send(bp, req); 5406 } 5407 vnic->uc_filter_count = 0; 5408 } 5409 hwrm_req_drop(bp, req); 5410 return rc; 5411 } 5412 5413 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 5414 { 5415 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5416 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 5417 struct hwrm_vnic_tpa_cfg_input *req; 5418 int rc; 5419 5420 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 5421 return 0; 5422 5423 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 5424 if (rc) 5425 return rc; 5426 5427 if (tpa_flags) { 5428 u16 mss = bp->dev->mtu - 40; 5429 u32 nsegs, n, segs = 0, flags; 5430 5431 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 5432 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 5433 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 5434 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 5435 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 5436 if (tpa_flags & BNXT_FLAG_GRO) 5437 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 5438 5439 req->flags = cpu_to_le32(flags); 5440 5441 req->enables = 5442 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 5443 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 5444 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 5445 5446 /* Number of segs are log2 units, and first packet is not 5447 * included as part of this units. 5448 */ 5449 if (mss <= BNXT_RX_PAGE_SIZE) { 5450 n = BNXT_RX_PAGE_SIZE / mss; 5451 nsegs = (MAX_SKB_FRAGS - 1) * n; 5452 } else { 5453 n = mss / BNXT_RX_PAGE_SIZE; 5454 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 5455 n++; 5456 nsegs = (MAX_SKB_FRAGS - n) / n; 5457 } 5458 5459 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5460 segs = MAX_TPA_SEGS_P5; 5461 max_aggs = bp->max_tpa; 5462 } else { 5463 segs = ilog2(nsegs); 5464 } 5465 req->max_agg_segs = cpu_to_le16(segs); 5466 req->max_aggs = cpu_to_le16(max_aggs); 5467 5468 req->min_agg_len = cpu_to_le32(512); 5469 } 5470 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5471 5472 return hwrm_req_send(bp, req); 5473 } 5474 5475 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 5476 { 5477 struct bnxt_ring_grp_info *grp_info; 5478 5479 grp_info = &bp->grp_info[ring->grp_idx]; 5480 return grp_info->cp_fw_ring_id; 5481 } 5482 5483 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 5484 { 5485 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5486 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 5487 else 5488 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 5489 } 5490 5491 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 5492 { 5493 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5494 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 5495 else 5496 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 5497 } 5498 5499 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 5500 { 5501 int entries; 5502 5503 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5504 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 5505 else 5506 entries = HW_HASH_INDEX_SIZE; 5507 5508 bp->rss_indir_tbl_entries = entries; 5509 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), 5510 GFP_KERNEL); 5511 if (!bp->rss_indir_tbl) 5512 return -ENOMEM; 5513 return 0; 5514 } 5515 5516 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) 5517 { 5518 u16 max_rings, max_entries, pad, i; 5519 5520 if (!bp->rx_nr_rings) 5521 return; 5522 5523 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5524 max_rings = bp->rx_nr_rings - 1; 5525 else 5526 max_rings = bp->rx_nr_rings; 5527 5528 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 5529 5530 for (i = 0; i < max_entries; i++) 5531 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 5532 5533 pad = bp->rss_indir_tbl_entries - max_entries; 5534 if (pad) 5535 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 5536 } 5537 5538 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 5539 { 5540 u16 i, tbl_size, max_ring = 0; 5541 5542 if (!bp->rss_indir_tbl) 5543 return 0; 5544 5545 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5546 for (i = 0; i < tbl_size; i++) 5547 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 5548 return max_ring; 5549 } 5550 5551 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 5552 { 5553 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5554 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); 5555 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5556 return 2; 5557 return 1; 5558 } 5559 5560 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 5561 { 5562 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 5563 u16 i, j; 5564 5565 /* Fill the RSS indirection table with ring group ids */ 5566 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 5567 if (!no_rss) 5568 j = bp->rss_indir_tbl[i]; 5569 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 5570 } 5571 } 5572 5573 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 5574 struct bnxt_vnic_info *vnic) 5575 { 5576 __le16 *ring_tbl = vnic->rss_table; 5577 struct bnxt_rx_ring_info *rxr; 5578 u16 tbl_size, i; 5579 5580 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5581 5582 for (i = 0; i < tbl_size; i++) { 5583 u16 ring_id, j; 5584 5585 j = bp->rss_indir_tbl[i]; 5586 rxr = &bp->rx_ring[j]; 5587 5588 ring_id = rxr->rx_ring_struct.fw_ring_id; 5589 *ring_tbl++ = cpu_to_le16(ring_id); 5590 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5591 *ring_tbl++ = cpu_to_le16(ring_id); 5592 } 5593 } 5594 5595 static void 5596 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 5597 struct bnxt_vnic_info *vnic) 5598 { 5599 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5600 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 5601 else 5602 bnxt_fill_hw_rss_tbl(bp, vnic); 5603 5604 if (bp->rss_hash_delta) { 5605 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 5606 if (bp->rss_hash_cfg & bp->rss_hash_delta) 5607 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 5608 else 5609 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 5610 } else { 5611 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 5612 } 5613 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 5614 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 5615 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 5616 } 5617 5618 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 5619 { 5620 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5621 struct hwrm_vnic_rss_cfg_input *req; 5622 int rc; 5623 5624 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 5625 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 5626 return 0; 5627 5628 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 5629 if (rc) 5630 return rc; 5631 5632 if (set_rss) 5633 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 5634 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5635 return hwrm_req_send(bp, req); 5636 } 5637 5638 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 5639 { 5640 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5641 struct hwrm_vnic_rss_cfg_input *req; 5642 dma_addr_t ring_tbl_map; 5643 u32 i, nr_ctxs; 5644 int rc; 5645 5646 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 5647 if (rc) 5648 return rc; 5649 5650 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5651 if (!set_rss) 5652 return hwrm_req_send(bp, req); 5653 5654 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 5655 ring_tbl_map = vnic->rss_table_dma_addr; 5656 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 5657 5658 hwrm_req_hold(bp, req); 5659 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 5660 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 5661 req->ring_table_pair_index = i; 5662 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 5663 rc = hwrm_req_send(bp, req); 5664 if (rc) 5665 goto exit; 5666 } 5667 5668 exit: 5669 hwrm_req_drop(bp, req); 5670 return rc; 5671 } 5672 5673 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 5674 { 5675 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5676 struct hwrm_vnic_rss_qcfg_output *resp; 5677 struct hwrm_vnic_rss_qcfg_input *req; 5678 5679 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 5680 return; 5681 5682 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5683 /* all contexts configured to same hash_type, zero always exists */ 5684 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5685 resp = hwrm_req_hold(bp, req); 5686 if (!hwrm_req_send(bp, req)) { 5687 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 5688 bp->rss_hash_delta = 0; 5689 } 5690 hwrm_req_drop(bp, req); 5691 } 5692 5693 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 5694 { 5695 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5696 struct hwrm_vnic_plcmodes_cfg_input *req; 5697 int rc; 5698 5699 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 5700 if (rc) 5701 return rc; 5702 5703 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 5704 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 5705 5706 if (BNXT_RX_PAGE_MODE(bp)) { 5707 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 5708 } else { 5709 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 5710 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 5711 req->enables |= 5712 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 5713 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 5714 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 5715 } 5716 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5717 return hwrm_req_send(bp, req); 5718 } 5719 5720 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 5721 u16 ctx_idx) 5722 { 5723 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 5724 5725 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 5726 return; 5727 5728 req->rss_cos_lb_ctx_id = 5729 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 5730 5731 hwrm_req_send(bp, req); 5732 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 5733 } 5734 5735 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 5736 { 5737 int i, j; 5738 5739 for (i = 0; i < bp->nr_vnics; i++) { 5740 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5741 5742 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 5743 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 5744 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 5745 } 5746 } 5747 bp->rsscos_nr_ctxs = 0; 5748 } 5749 5750 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 5751 { 5752 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 5753 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 5754 int rc; 5755 5756 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 5757 if (rc) 5758 return rc; 5759 5760 resp = hwrm_req_hold(bp, req); 5761 rc = hwrm_req_send(bp, req); 5762 if (!rc) 5763 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 5764 le16_to_cpu(resp->rss_cos_lb_ctx_id); 5765 hwrm_req_drop(bp, req); 5766 5767 return rc; 5768 } 5769 5770 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 5771 { 5772 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 5773 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 5774 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 5775 } 5776 5777 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 5778 { 5779 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5780 struct hwrm_vnic_cfg_input *req; 5781 unsigned int ring = 0, grp_idx; 5782 u16 def_vlan = 0; 5783 int rc; 5784 5785 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 5786 if (rc) 5787 return rc; 5788 5789 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5790 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5791 5792 req->default_rx_ring_id = 5793 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5794 req->default_cmpl_ring_id = 5795 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5796 req->enables = 5797 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5798 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5799 goto vnic_mru; 5800 } 5801 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5802 /* Only RSS support for now TBD: COS & LB */ 5803 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5804 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5805 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5806 VNIC_CFG_REQ_ENABLES_MRU); 5807 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5808 req->rss_rule = 5809 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5810 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5811 VNIC_CFG_REQ_ENABLES_MRU); 5812 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5813 } else { 5814 req->rss_rule = cpu_to_le16(0xffff); 5815 } 5816 5817 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5818 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5819 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5820 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5821 } else { 5822 req->cos_rule = cpu_to_le16(0xffff); 5823 } 5824 5825 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5826 ring = 0; 5827 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5828 ring = vnic_id - 1; 5829 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5830 ring = bp->rx_nr_rings - 1; 5831 5832 grp_idx = bp->rx_ring[ring].bnapi->index; 5833 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5834 req->lb_rule = cpu_to_le16(0xffff); 5835 vnic_mru: 5836 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); 5837 5838 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5839 #ifdef CONFIG_BNXT_SRIOV 5840 if (BNXT_VF(bp)) 5841 def_vlan = bp->vf.vlan; 5842 #endif 5843 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5844 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5845 if (!vnic_id && bnxt_ulp_registered(bp->edev)) 5846 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5847 5848 return hwrm_req_send(bp, req); 5849 } 5850 5851 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5852 { 5853 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5854 struct hwrm_vnic_free_input *req; 5855 5856 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 5857 return; 5858 5859 req->vnic_id = 5860 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5861 5862 hwrm_req_send(bp, req); 5863 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5864 } 5865 } 5866 5867 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5868 { 5869 u16 i; 5870 5871 for (i = 0; i < bp->nr_vnics; i++) 5872 bnxt_hwrm_vnic_free_one(bp, i); 5873 } 5874 5875 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5876 unsigned int start_rx_ring_idx, 5877 unsigned int nr_rings) 5878 { 5879 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5880 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5881 struct hwrm_vnic_alloc_output *resp; 5882 struct hwrm_vnic_alloc_input *req; 5883 int rc; 5884 5885 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 5886 if (rc) 5887 return rc; 5888 5889 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5890 goto vnic_no_ring_grps; 5891 5892 /* map ring groups to this vnic */ 5893 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5894 grp_idx = bp->rx_ring[i].bnapi->index; 5895 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5896 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5897 j, nr_rings); 5898 break; 5899 } 5900 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5901 } 5902 5903 vnic_no_ring_grps: 5904 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5905 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5906 if (vnic_id == 0) 5907 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5908 5909 resp = hwrm_req_hold(bp, req); 5910 rc = hwrm_req_send(bp, req); 5911 if (!rc) 5912 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5913 hwrm_req_drop(bp, req); 5914 return rc; 5915 } 5916 5917 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5918 { 5919 struct hwrm_vnic_qcaps_output *resp; 5920 struct hwrm_vnic_qcaps_input *req; 5921 int rc; 5922 5923 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5924 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; 5925 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; 5926 if (bp->hwrm_spec_code < 0x10600) 5927 return 0; 5928 5929 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 5930 if (rc) 5931 return rc; 5932 5933 resp = hwrm_req_hold(bp, req); 5934 rc = hwrm_req_send(bp, req); 5935 if (!rc) { 5936 u32 flags = le32_to_cpu(resp->flags); 5937 5938 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 5939 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5940 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; 5941 if (flags & 5942 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5943 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5944 5945 /* Older P5 fw before EXT_HW_STATS support did not set 5946 * VLAN_STRIP_CAP properly. 5947 */ 5948 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 5949 (BNXT_CHIP_P5(bp) && 5950 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 5951 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 5952 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 5953 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; 5954 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) 5955 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; 5956 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5957 if (bp->max_tpa_v2) { 5958 if (BNXT_CHIP_P5(bp)) 5959 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 5960 else 5961 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; 5962 } 5963 } 5964 hwrm_req_drop(bp, req); 5965 return rc; 5966 } 5967 5968 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5969 { 5970 struct hwrm_ring_grp_alloc_output *resp; 5971 struct hwrm_ring_grp_alloc_input *req; 5972 int rc; 5973 u16 i; 5974 5975 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5976 return 0; 5977 5978 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 5979 if (rc) 5980 return rc; 5981 5982 resp = hwrm_req_hold(bp, req); 5983 for (i = 0; i < bp->rx_nr_rings; i++) { 5984 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5985 5986 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5987 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5988 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5989 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5990 5991 rc = hwrm_req_send(bp, req); 5992 5993 if (rc) 5994 break; 5995 5996 bp->grp_info[grp_idx].fw_grp_id = 5997 le32_to_cpu(resp->ring_group_id); 5998 } 5999 hwrm_req_drop(bp, req); 6000 return rc; 6001 } 6002 6003 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 6004 { 6005 struct hwrm_ring_grp_free_input *req; 6006 u16 i; 6007 6008 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6009 return; 6010 6011 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 6012 return; 6013 6014 hwrm_req_hold(bp, req); 6015 for (i = 0; i < bp->cp_nr_rings; i++) { 6016 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 6017 continue; 6018 req->ring_group_id = 6019 cpu_to_le32(bp->grp_info[i].fw_grp_id); 6020 6021 hwrm_req_send(bp, req); 6022 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 6023 } 6024 hwrm_req_drop(bp, req); 6025 } 6026 6027 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 6028 struct bnxt_ring_struct *ring, 6029 u32 ring_type, u32 map_index) 6030 { 6031 struct hwrm_ring_alloc_output *resp; 6032 struct hwrm_ring_alloc_input *req; 6033 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 6034 struct bnxt_ring_grp_info *grp_info; 6035 int rc, err = 0; 6036 u16 ring_id; 6037 6038 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 6039 if (rc) 6040 goto exit; 6041 6042 req->enables = 0; 6043 if (rmem->nr_pages > 1) { 6044 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 6045 /* Page size is in log2 units */ 6046 req->page_size = BNXT_PAGE_SHIFT; 6047 req->page_tbl_depth = 1; 6048 } else { 6049 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 6050 } 6051 req->fbo = 0; 6052 /* Association of ring index with doorbell index and MSIX number */ 6053 req->logical_id = cpu_to_le16(map_index); 6054 6055 switch (ring_type) { 6056 case HWRM_RING_ALLOC_TX: { 6057 struct bnxt_tx_ring_info *txr; 6058 6059 txr = container_of(ring, struct bnxt_tx_ring_info, 6060 tx_ring_struct); 6061 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 6062 /* Association of transmit ring with completion ring */ 6063 grp_info = &bp->grp_info[ring->grp_idx]; 6064 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 6065 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 6066 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6067 req->queue_id = cpu_to_le16(ring->queue_id); 6068 break; 6069 } 6070 case HWRM_RING_ALLOC_RX: 6071 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6072 req->length = cpu_to_le32(bp->rx_ring_mask + 1); 6073 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6074 u16 flags = 0; 6075 6076 /* Association of rx ring with stats context */ 6077 grp_info = &bp->grp_info[ring->grp_idx]; 6078 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 6079 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6080 req->enables |= cpu_to_le32( 6081 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6082 if (NET_IP_ALIGN == 2) 6083 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 6084 req->flags = cpu_to_le16(flags); 6085 } 6086 break; 6087 case HWRM_RING_ALLOC_AGG: 6088 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6089 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 6090 /* Association of agg ring with rx ring */ 6091 grp_info = &bp->grp_info[ring->grp_idx]; 6092 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 6093 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 6094 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6095 req->enables |= cpu_to_le32( 6096 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 6097 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 6098 } else { 6099 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 6100 } 6101 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 6102 break; 6103 case HWRM_RING_ALLOC_CMPL: 6104 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 6105 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6106 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6107 /* Association of cp ring with nq */ 6108 grp_info = &bp->grp_info[map_index]; 6109 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 6110 req->cq_handle = cpu_to_le64(ring->handle); 6111 req->enables |= cpu_to_le32( 6112 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 6113 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 6114 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6115 } 6116 break; 6117 case HWRM_RING_ALLOC_NQ: 6118 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 6119 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 6120 if (bp->flags & BNXT_FLAG_USING_MSIX) 6121 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 6122 break; 6123 default: 6124 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 6125 ring_type); 6126 return -1; 6127 } 6128 6129 resp = hwrm_req_hold(bp, req); 6130 rc = hwrm_req_send(bp, req); 6131 err = le16_to_cpu(resp->error_code); 6132 ring_id = le16_to_cpu(resp->ring_id); 6133 hwrm_req_drop(bp, req); 6134 6135 exit: 6136 if (rc || err) { 6137 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 6138 ring_type, rc, err); 6139 return -EIO; 6140 } 6141 ring->fw_ring_id = ring_id; 6142 return rc; 6143 } 6144 6145 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 6146 { 6147 int rc; 6148 6149 if (BNXT_PF(bp)) { 6150 struct hwrm_func_cfg_input *req; 6151 6152 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 6153 if (rc) 6154 return rc; 6155 6156 req->fid = cpu_to_le16(0xffff); 6157 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6158 req->async_event_cr = cpu_to_le16(idx); 6159 return hwrm_req_send(bp, req); 6160 } else { 6161 struct hwrm_func_vf_cfg_input *req; 6162 6163 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 6164 if (rc) 6165 return rc; 6166 6167 req->enables = 6168 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 6169 req->async_event_cr = cpu_to_le16(idx); 6170 return hwrm_req_send(bp, req); 6171 } 6172 } 6173 6174 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 6175 u32 ring_type) 6176 { 6177 switch (ring_type) { 6178 case HWRM_RING_ALLOC_TX: 6179 db->db_ring_mask = bp->tx_ring_mask; 6180 break; 6181 case HWRM_RING_ALLOC_RX: 6182 db->db_ring_mask = bp->rx_ring_mask; 6183 break; 6184 case HWRM_RING_ALLOC_AGG: 6185 db->db_ring_mask = bp->rx_agg_ring_mask; 6186 break; 6187 case HWRM_RING_ALLOC_CMPL: 6188 case HWRM_RING_ALLOC_NQ: 6189 db->db_ring_mask = bp->cp_ring_mask; 6190 break; 6191 } 6192 if (bp->flags & BNXT_FLAG_CHIP_P7) { 6193 db->db_epoch_mask = db->db_ring_mask + 1; 6194 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); 6195 } 6196 } 6197 6198 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 6199 u32 map_idx, u32 xid) 6200 { 6201 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6202 switch (ring_type) { 6203 case HWRM_RING_ALLOC_TX: 6204 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 6205 break; 6206 case HWRM_RING_ALLOC_RX: 6207 case HWRM_RING_ALLOC_AGG: 6208 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 6209 break; 6210 case HWRM_RING_ALLOC_CMPL: 6211 db->db_key64 = DBR_PATH_L2; 6212 break; 6213 case HWRM_RING_ALLOC_NQ: 6214 db->db_key64 = DBR_PATH_L2; 6215 break; 6216 } 6217 db->db_key64 |= (u64)xid << DBR_XID_SFT; 6218 6219 if (bp->flags & BNXT_FLAG_CHIP_P7) 6220 db->db_key64 |= DBR_VALID; 6221 6222 db->doorbell = bp->bar1 + bp->db_offset; 6223 } else { 6224 db->doorbell = bp->bar1 + map_idx * 0x80; 6225 switch (ring_type) { 6226 case HWRM_RING_ALLOC_TX: 6227 db->db_key32 = DB_KEY_TX; 6228 break; 6229 case HWRM_RING_ALLOC_RX: 6230 case HWRM_RING_ALLOC_AGG: 6231 db->db_key32 = DB_KEY_RX; 6232 break; 6233 case HWRM_RING_ALLOC_CMPL: 6234 db->db_key32 = DB_KEY_CP; 6235 break; 6236 } 6237 } 6238 bnxt_set_db_mask(bp, db, ring_type); 6239 } 6240 6241 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 6242 { 6243 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 6244 int i, rc = 0; 6245 u32 type; 6246 6247 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6248 type = HWRM_RING_ALLOC_NQ; 6249 else 6250 type = HWRM_RING_ALLOC_CMPL; 6251 for (i = 0; i < bp->cp_nr_rings; i++) { 6252 struct bnxt_napi *bnapi = bp->bnapi[i]; 6253 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6254 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 6255 u32 map_idx = ring->map_idx; 6256 unsigned int vector; 6257 6258 vector = bp->irq_tbl[map_idx].vector; 6259 disable_irq_nosync(vector); 6260 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6261 if (rc) { 6262 enable_irq(vector); 6263 goto err_out; 6264 } 6265 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 6266 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 6267 enable_irq(vector); 6268 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 6269 6270 if (!i) { 6271 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 6272 if (rc) 6273 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 6274 } 6275 } 6276 6277 type = HWRM_RING_ALLOC_TX; 6278 for (i = 0; i < bp->tx_nr_rings; i++) { 6279 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6280 struct bnxt_ring_struct *ring; 6281 u32 map_idx; 6282 6283 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6284 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; 6285 struct bnxt_napi *bnapi = txr->bnapi; 6286 u32 type2 = HWRM_RING_ALLOC_CMPL; 6287 6288 ring = &cpr2->cp_ring_struct; 6289 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6290 map_idx = bnapi->index; 6291 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6292 if (rc) 6293 goto err_out; 6294 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6295 ring->fw_ring_id); 6296 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6297 } 6298 ring = &txr->tx_ring_struct; 6299 map_idx = i; 6300 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6301 if (rc) 6302 goto err_out; 6303 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 6304 } 6305 6306 type = HWRM_RING_ALLOC_RX; 6307 for (i = 0; i < bp->rx_nr_rings; i++) { 6308 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6309 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6310 struct bnxt_napi *bnapi = rxr->bnapi; 6311 u32 map_idx = bnapi->index; 6312 6313 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6314 if (rc) 6315 goto err_out; 6316 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 6317 /* If we have agg rings, post agg buffers first. */ 6318 if (!agg_rings) 6319 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6320 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 6321 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6322 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; 6323 u32 type2 = HWRM_RING_ALLOC_CMPL; 6324 6325 ring = &cpr2->cp_ring_struct; 6326 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6327 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6328 if (rc) 6329 goto err_out; 6330 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6331 ring->fw_ring_id); 6332 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6333 } 6334 } 6335 6336 if (agg_rings) { 6337 type = HWRM_RING_ALLOC_AGG; 6338 for (i = 0; i < bp->rx_nr_rings; i++) { 6339 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6340 struct bnxt_ring_struct *ring = 6341 &rxr->rx_agg_ring_struct; 6342 u32 grp_idx = ring->grp_idx; 6343 u32 map_idx = grp_idx + bp->rx_nr_rings; 6344 6345 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6346 if (rc) 6347 goto err_out; 6348 6349 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 6350 ring->fw_ring_id); 6351 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 6352 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6353 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 6354 } 6355 } 6356 err_out: 6357 return rc; 6358 } 6359 6360 static int hwrm_ring_free_send_msg(struct bnxt *bp, 6361 struct bnxt_ring_struct *ring, 6362 u32 ring_type, int cmpl_ring_id) 6363 { 6364 struct hwrm_ring_free_output *resp; 6365 struct hwrm_ring_free_input *req; 6366 u16 error_code = 0; 6367 int rc; 6368 6369 if (BNXT_NO_FW_ACCESS(bp)) 6370 return 0; 6371 6372 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 6373 if (rc) 6374 goto exit; 6375 6376 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 6377 req->ring_type = ring_type; 6378 req->ring_id = cpu_to_le16(ring->fw_ring_id); 6379 6380 resp = hwrm_req_hold(bp, req); 6381 rc = hwrm_req_send(bp, req); 6382 error_code = le16_to_cpu(resp->error_code); 6383 hwrm_req_drop(bp, req); 6384 exit: 6385 if (rc || error_code) { 6386 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 6387 ring_type, rc, error_code); 6388 return -EIO; 6389 } 6390 return 0; 6391 } 6392 6393 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 6394 { 6395 u32 type; 6396 int i; 6397 6398 if (!bp->bnapi) 6399 return; 6400 6401 for (i = 0; i < bp->tx_nr_rings; i++) { 6402 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6403 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 6404 6405 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6406 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 6407 6408 hwrm_ring_free_send_msg(bp, ring, 6409 RING_FREE_REQ_RING_TYPE_TX, 6410 close_path ? cmpl_ring_id : 6411 INVALID_HW_RING_ID); 6412 ring->fw_ring_id = INVALID_HW_RING_ID; 6413 } 6414 } 6415 6416 for (i = 0; i < bp->rx_nr_rings; i++) { 6417 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6418 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6419 u32 grp_idx = rxr->bnapi->index; 6420 6421 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6422 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6423 6424 hwrm_ring_free_send_msg(bp, ring, 6425 RING_FREE_REQ_RING_TYPE_RX, 6426 close_path ? cmpl_ring_id : 6427 INVALID_HW_RING_ID); 6428 ring->fw_ring_id = INVALID_HW_RING_ID; 6429 bp->grp_info[grp_idx].rx_fw_ring_id = 6430 INVALID_HW_RING_ID; 6431 } 6432 } 6433 6434 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6435 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 6436 else 6437 type = RING_FREE_REQ_RING_TYPE_RX; 6438 for (i = 0; i < bp->rx_nr_rings; i++) { 6439 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6440 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 6441 u32 grp_idx = rxr->bnapi->index; 6442 6443 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6444 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6445 6446 hwrm_ring_free_send_msg(bp, ring, type, 6447 close_path ? cmpl_ring_id : 6448 INVALID_HW_RING_ID); 6449 ring->fw_ring_id = INVALID_HW_RING_ID; 6450 bp->grp_info[grp_idx].agg_fw_ring_id = 6451 INVALID_HW_RING_ID; 6452 } 6453 } 6454 6455 /* The completion rings are about to be freed. After that the 6456 * IRQ doorbell will not work anymore. So we need to disable 6457 * IRQ here. 6458 */ 6459 bnxt_disable_int_sync(bp); 6460 6461 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6462 type = RING_FREE_REQ_RING_TYPE_NQ; 6463 else 6464 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 6465 for (i = 0; i < bp->cp_nr_rings; i++) { 6466 struct bnxt_napi *bnapi = bp->bnapi[i]; 6467 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6468 struct bnxt_ring_struct *ring; 6469 int j; 6470 6471 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { 6472 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 6473 6474 ring = &cpr2->cp_ring_struct; 6475 if (ring->fw_ring_id == INVALID_HW_RING_ID) 6476 continue; 6477 hwrm_ring_free_send_msg(bp, ring, 6478 RING_FREE_REQ_RING_TYPE_L2_CMPL, 6479 INVALID_HW_RING_ID); 6480 ring->fw_ring_id = INVALID_HW_RING_ID; 6481 } 6482 ring = &cpr->cp_ring_struct; 6483 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6484 hwrm_ring_free_send_msg(bp, ring, type, 6485 INVALID_HW_RING_ID); 6486 ring->fw_ring_id = INVALID_HW_RING_ID; 6487 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 6488 } 6489 } 6490 } 6491 6492 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 6493 bool shared); 6494 6495 static int bnxt_hwrm_get_rings(struct bnxt *bp) 6496 { 6497 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6498 struct hwrm_func_qcfg_output *resp; 6499 struct hwrm_func_qcfg_input *req; 6500 int rc; 6501 6502 if (bp->hwrm_spec_code < 0x10601) 6503 return 0; 6504 6505 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 6506 if (rc) 6507 return rc; 6508 6509 req->fid = cpu_to_le16(0xffff); 6510 resp = hwrm_req_hold(bp, req); 6511 rc = hwrm_req_send(bp, req); 6512 if (rc) { 6513 hwrm_req_drop(bp, req); 6514 return rc; 6515 } 6516 6517 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6518 if (BNXT_NEW_RM(bp)) { 6519 u16 cp, stats; 6520 6521 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 6522 hw_resc->resv_hw_ring_grps = 6523 le32_to_cpu(resp->alloc_hw_ring_grps); 6524 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 6525 cp = le16_to_cpu(resp->alloc_cmpl_rings); 6526 stats = le16_to_cpu(resp->alloc_stat_ctx); 6527 hw_resc->resv_irqs = cp; 6528 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6529 int rx = hw_resc->resv_rx_rings; 6530 int tx = hw_resc->resv_tx_rings; 6531 6532 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6533 rx >>= 1; 6534 if (cp < (rx + tx)) { 6535 rx = cp / 2; 6536 tx = rx; 6537 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6538 rx <<= 1; 6539 hw_resc->resv_rx_rings = rx; 6540 hw_resc->resv_tx_rings = tx; 6541 } 6542 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 6543 hw_resc->resv_hw_ring_grps = rx; 6544 } 6545 hw_resc->resv_cp_rings = cp; 6546 hw_resc->resv_stat_ctxs = stats; 6547 } 6548 hwrm_req_drop(bp, req); 6549 return 0; 6550 } 6551 6552 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 6553 { 6554 struct hwrm_func_qcfg_output *resp; 6555 struct hwrm_func_qcfg_input *req; 6556 int rc; 6557 6558 if (bp->hwrm_spec_code < 0x10601) 6559 return 0; 6560 6561 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 6562 if (rc) 6563 return rc; 6564 6565 req->fid = cpu_to_le16(fid); 6566 resp = hwrm_req_hold(bp, req); 6567 rc = hwrm_req_send(bp, req); 6568 if (!rc) 6569 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6570 6571 hwrm_req_drop(bp, req); 6572 return rc; 6573 } 6574 6575 static bool bnxt_rfs_supported(struct bnxt *bp); 6576 6577 static struct hwrm_func_cfg_input * 6578 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6579 int ring_grps, int cp_rings, int stats, int vnics) 6580 { 6581 struct hwrm_func_cfg_input *req; 6582 u32 enables = 0; 6583 6584 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 6585 return NULL; 6586 6587 req->fid = cpu_to_le16(0xffff); 6588 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6589 req->num_tx_rings = cpu_to_le16(tx_rings); 6590 if (BNXT_NEW_RM(bp)) { 6591 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 6592 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6593 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6594 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 6595 enables |= tx_rings + ring_grps ? 6596 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6597 enables |= rx_rings ? 6598 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6599 } else { 6600 enables |= cp_rings ? 6601 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6602 enables |= ring_grps ? 6603 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 6604 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6605 } 6606 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 6607 6608 req->num_rx_rings = cpu_to_le16(rx_rings); 6609 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6610 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6611 req->num_msix = cpu_to_le16(cp_rings); 6612 req->num_rsscos_ctxs = 6613 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6614 } else { 6615 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6616 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6617 req->num_rsscos_ctxs = cpu_to_le16(1); 6618 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 6619 bnxt_rfs_supported(bp)) 6620 req->num_rsscos_ctxs = 6621 cpu_to_le16(ring_grps + 1); 6622 } 6623 req->num_stat_ctxs = cpu_to_le16(stats); 6624 req->num_vnics = cpu_to_le16(vnics); 6625 } 6626 req->enables = cpu_to_le32(enables); 6627 return req; 6628 } 6629 6630 static struct hwrm_func_vf_cfg_input * 6631 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6632 int ring_grps, int cp_rings, int stats, int vnics) 6633 { 6634 struct hwrm_func_vf_cfg_input *req; 6635 u32 enables = 0; 6636 6637 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 6638 return NULL; 6639 6640 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6641 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 6642 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6643 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6644 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6645 enables |= tx_rings + ring_grps ? 6646 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6647 } else { 6648 enables |= cp_rings ? 6649 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6650 enables |= ring_grps ? 6651 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 6652 } 6653 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 6654 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 6655 6656 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 6657 req->num_tx_rings = cpu_to_le16(tx_rings); 6658 req->num_rx_rings = cpu_to_le16(rx_rings); 6659 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6660 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6661 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6662 } else { 6663 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6664 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6665 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 6666 } 6667 req->num_stat_ctxs = cpu_to_le16(stats); 6668 req->num_vnics = cpu_to_le16(vnics); 6669 6670 req->enables = cpu_to_le32(enables); 6671 return req; 6672 } 6673 6674 static int 6675 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6676 int ring_grps, int cp_rings, int stats, int vnics) 6677 { 6678 struct hwrm_func_cfg_input *req; 6679 int rc; 6680 6681 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 6682 cp_rings, stats, vnics); 6683 if (!req) 6684 return -ENOMEM; 6685 6686 if (!req->enables) { 6687 hwrm_req_drop(bp, req); 6688 return 0; 6689 } 6690 6691 rc = hwrm_req_send(bp, req); 6692 if (rc) 6693 return rc; 6694 6695 if (bp->hwrm_spec_code < 0x10601) 6696 bp->hw_resc.resv_tx_rings = tx_rings; 6697 6698 return bnxt_hwrm_get_rings(bp); 6699 } 6700 6701 static int 6702 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6703 int ring_grps, int cp_rings, int stats, int vnics) 6704 { 6705 struct hwrm_func_vf_cfg_input *req; 6706 int rc; 6707 6708 if (!BNXT_NEW_RM(bp)) { 6709 bp->hw_resc.resv_tx_rings = tx_rings; 6710 return 0; 6711 } 6712 6713 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6714 cp_rings, stats, vnics); 6715 if (!req) 6716 return -ENOMEM; 6717 6718 rc = hwrm_req_send(bp, req); 6719 if (rc) 6720 return rc; 6721 6722 return bnxt_hwrm_get_rings(bp); 6723 } 6724 6725 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 6726 int cp, int stat, int vnic) 6727 { 6728 if (BNXT_PF(bp)) 6729 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 6730 vnic); 6731 else 6732 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 6733 vnic); 6734 } 6735 6736 int bnxt_nq_rings_in_use(struct bnxt *bp) 6737 { 6738 int cp = bp->cp_nr_rings; 6739 int ulp_msix, ulp_base; 6740 6741 ulp_msix = bnxt_get_ulp_msix_num(bp); 6742 if (ulp_msix) { 6743 ulp_base = bnxt_get_ulp_msix_base(bp); 6744 cp += ulp_msix; 6745 if ((ulp_base + ulp_msix) > cp) 6746 cp = ulp_base + ulp_msix; 6747 } 6748 return cp; 6749 } 6750 6751 static int bnxt_cp_rings_in_use(struct bnxt *bp) 6752 { 6753 int cp; 6754 6755 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6756 return bnxt_nq_rings_in_use(bp); 6757 6758 cp = bp->tx_nr_rings + bp->rx_nr_rings; 6759 return cp; 6760 } 6761 6762 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 6763 { 6764 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 6765 int cp = bp->cp_nr_rings; 6766 6767 if (!ulp_stat) 6768 return cp; 6769 6770 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 6771 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 6772 6773 return cp + ulp_stat; 6774 } 6775 6776 /* Check if a default RSS map needs to be setup. This function is only 6777 * used on older firmware that does not require reserving RX rings. 6778 */ 6779 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 6780 { 6781 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6782 6783 /* The RSS map is valid for RX rings set to resv_rx_rings */ 6784 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 6785 hw_resc->resv_rx_rings = bp->rx_nr_rings; 6786 if (!netif_is_rxfh_configured(bp->dev)) 6787 bnxt_set_dflt_rss_indir_tbl(bp); 6788 } 6789 } 6790 6791 static bool bnxt_need_reserve_rings(struct bnxt *bp) 6792 { 6793 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6794 int cp = bnxt_cp_rings_in_use(bp); 6795 int nq = bnxt_nq_rings_in_use(bp); 6796 int rx = bp->rx_nr_rings, stat; 6797 int vnic = 1, grp = rx; 6798 6799 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 6800 bp->hwrm_spec_code >= 0x10601) 6801 return true; 6802 6803 /* Old firmware does not need RX ring reservations but we still 6804 * need to setup a default RSS map when needed. With new firmware 6805 * we go through RX ring reservations first and then set up the 6806 * RSS map for the successfully reserved RX rings when needed. 6807 */ 6808 if (!BNXT_NEW_RM(bp)) { 6809 bnxt_check_rss_tbl_no_rmgr(bp); 6810 return false; 6811 } 6812 if ((bp->flags & BNXT_FLAG_RFS) && 6813 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6814 vnic = rx + 1; 6815 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6816 rx <<= 1; 6817 stat = bnxt_get_func_stat_ctxs(bp); 6818 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 6819 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 6820 (hw_resc->resv_hw_ring_grps != grp && 6821 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 6822 return true; 6823 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 6824 hw_resc->resv_irqs != nq) 6825 return true; 6826 return false; 6827 } 6828 6829 static int __bnxt_reserve_rings(struct bnxt *bp) 6830 { 6831 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6832 int cp = bnxt_nq_rings_in_use(bp); 6833 int tx = bp->tx_nr_rings; 6834 int rx = bp->rx_nr_rings; 6835 int grp, rx_rings, rc; 6836 int vnic = 1, stat; 6837 bool sh = false; 6838 int tx_cp; 6839 6840 if (!bnxt_need_reserve_rings(bp)) 6841 return 0; 6842 6843 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6844 sh = true; 6845 if ((bp->flags & BNXT_FLAG_RFS) && 6846 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6847 vnic = rx + 1; 6848 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6849 rx <<= 1; 6850 grp = bp->rx_nr_rings; 6851 stat = bnxt_get_func_stat_ctxs(bp); 6852 6853 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 6854 if (rc) 6855 return rc; 6856 6857 tx = hw_resc->resv_tx_rings; 6858 if (BNXT_NEW_RM(bp)) { 6859 rx = hw_resc->resv_rx_rings; 6860 cp = hw_resc->resv_irqs; 6861 grp = hw_resc->resv_hw_ring_grps; 6862 vnic = hw_resc->resv_vnics; 6863 stat = hw_resc->resv_stat_ctxs; 6864 } 6865 6866 rx_rings = rx; 6867 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 6868 if (rx >= 2) { 6869 rx_rings = rx >> 1; 6870 } else { 6871 if (netif_running(bp->dev)) 6872 return -ENOMEM; 6873 6874 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 6875 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 6876 bp->dev->hw_features &= ~NETIF_F_LRO; 6877 bp->dev->features &= ~NETIF_F_LRO; 6878 bnxt_set_ring_params(bp); 6879 } 6880 } 6881 rx_rings = min_t(int, rx_rings, grp); 6882 cp = min_t(int, cp, bp->cp_nr_rings); 6883 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 6884 stat -= bnxt_get_ulp_stat_ctxs(bp); 6885 cp = min_t(int, cp, stat); 6886 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 6887 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6888 rx = rx_rings << 1; 6889 tx_cp = bnxt_num_tx_to_cp(bp, tx); 6890 cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 6891 bp->tx_nr_rings = tx; 6892 6893 /* If we cannot reserve all the RX rings, reset the RSS map only 6894 * if absolutely necessary 6895 */ 6896 if (rx_rings != bp->rx_nr_rings) { 6897 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 6898 rx_rings, bp->rx_nr_rings); 6899 if (netif_is_rxfh_configured(bp->dev) && 6900 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 6901 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 6902 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 6903 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 6904 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 6905 } 6906 } 6907 bp->rx_nr_rings = rx_rings; 6908 bp->cp_nr_rings = cp; 6909 6910 if (!tx || !rx || !cp || !grp || !vnic || !stat) 6911 return -ENOMEM; 6912 6913 if (!netif_is_rxfh_configured(bp->dev)) 6914 bnxt_set_dflt_rss_indir_tbl(bp); 6915 6916 return rc; 6917 } 6918 6919 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6920 int ring_grps, int cp_rings, int stats, 6921 int vnics) 6922 { 6923 struct hwrm_func_vf_cfg_input *req; 6924 u32 flags; 6925 6926 if (!BNXT_NEW_RM(bp)) 6927 return 0; 6928 6929 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6930 cp_rings, stats, vnics); 6931 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6932 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6933 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6934 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6935 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6936 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6937 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6938 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6939 6940 req->flags = cpu_to_le32(flags); 6941 return hwrm_req_send_silent(bp, req); 6942 } 6943 6944 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6945 int ring_grps, int cp_rings, int stats, 6946 int vnics) 6947 { 6948 struct hwrm_func_cfg_input *req; 6949 u32 flags; 6950 6951 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 6952 cp_rings, stats, vnics); 6953 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6954 if (BNXT_NEW_RM(bp)) { 6955 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6956 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6957 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6958 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6959 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6960 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6961 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6962 else 6963 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6964 } 6965 6966 req->flags = cpu_to_le32(flags); 6967 return hwrm_req_send_silent(bp, req); 6968 } 6969 6970 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6971 int ring_grps, int cp_rings, int stats, 6972 int vnics) 6973 { 6974 if (bp->hwrm_spec_code < 0x10801) 6975 return 0; 6976 6977 if (BNXT_PF(bp)) 6978 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6979 ring_grps, cp_rings, stats, 6980 vnics); 6981 6982 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6983 cp_rings, stats, vnics); 6984 } 6985 6986 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6987 { 6988 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6989 struct hwrm_ring_aggint_qcaps_output *resp; 6990 struct hwrm_ring_aggint_qcaps_input *req; 6991 int rc; 6992 6993 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6994 coal_cap->num_cmpl_dma_aggr_max = 63; 6995 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6996 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6997 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6998 coal_cap->int_lat_tmr_min_max = 65535; 6999 coal_cap->int_lat_tmr_max_max = 65535; 7000 coal_cap->num_cmpl_aggr_int_max = 65535; 7001 coal_cap->timer_units = 80; 7002 7003 if (bp->hwrm_spec_code < 0x10902) 7004 return; 7005 7006 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 7007 return; 7008 7009 resp = hwrm_req_hold(bp, req); 7010 rc = hwrm_req_send_silent(bp, req); 7011 if (!rc) { 7012 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 7013 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 7014 coal_cap->num_cmpl_dma_aggr_max = 7015 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 7016 coal_cap->num_cmpl_dma_aggr_during_int_max = 7017 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 7018 coal_cap->cmpl_aggr_dma_tmr_max = 7019 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 7020 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 7021 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 7022 coal_cap->int_lat_tmr_min_max = 7023 le16_to_cpu(resp->int_lat_tmr_min_max); 7024 coal_cap->int_lat_tmr_max_max = 7025 le16_to_cpu(resp->int_lat_tmr_max_max); 7026 coal_cap->num_cmpl_aggr_int_max = 7027 le16_to_cpu(resp->num_cmpl_aggr_int_max); 7028 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 7029 } 7030 hwrm_req_drop(bp, req); 7031 } 7032 7033 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 7034 { 7035 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7036 7037 return usec * 1000 / coal_cap->timer_units; 7038 } 7039 7040 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 7041 struct bnxt_coal *hw_coal, 7042 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7043 { 7044 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7045 u16 val, tmr, max, flags = hw_coal->flags; 7046 u32 cmpl_params = coal_cap->cmpl_params; 7047 7048 max = hw_coal->bufs_per_record * 128; 7049 if (hw_coal->budget) 7050 max = hw_coal->bufs_per_record * hw_coal->budget; 7051 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 7052 7053 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 7054 req->num_cmpl_aggr_int = cpu_to_le16(val); 7055 7056 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 7057 req->num_cmpl_dma_aggr = cpu_to_le16(val); 7058 7059 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 7060 coal_cap->num_cmpl_dma_aggr_during_int_max); 7061 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 7062 7063 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 7064 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 7065 req->int_lat_tmr_max = cpu_to_le16(tmr); 7066 7067 /* min timer set to 1/2 of interrupt timer */ 7068 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 7069 val = tmr / 2; 7070 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 7071 req->int_lat_tmr_min = cpu_to_le16(val); 7072 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7073 } 7074 7075 /* buf timer set to 1/4 of interrupt timer */ 7076 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 7077 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 7078 7079 if (cmpl_params & 7080 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 7081 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 7082 val = clamp_t(u16, tmr, 1, 7083 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 7084 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 7085 req->enables |= 7086 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 7087 } 7088 7089 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 7090 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 7091 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 7092 req->flags = cpu_to_le16(flags); 7093 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 7094 } 7095 7096 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 7097 struct bnxt_coal *hw_coal) 7098 { 7099 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 7100 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7101 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 7102 u32 nq_params = coal_cap->nq_params; 7103 u16 tmr; 7104 int rc; 7105 7106 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 7107 return 0; 7108 7109 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7110 if (rc) 7111 return rc; 7112 7113 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 7114 req->flags = 7115 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 7116 7117 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 7118 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 7119 req->int_lat_tmr_min = cpu_to_le16(tmr); 7120 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 7121 return hwrm_req_send(bp, req); 7122 } 7123 7124 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 7125 { 7126 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 7127 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7128 struct bnxt_coal coal; 7129 int rc; 7130 7131 /* Tick values in micro seconds. 7132 * 1 coal_buf x bufs_per_record = 1 completion record. 7133 */ 7134 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 7135 7136 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 7137 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 7138 7139 if (!bnapi->rx_ring) 7140 return -ENODEV; 7141 7142 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7143 if (rc) 7144 return rc; 7145 7146 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 7147 7148 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 7149 7150 return hwrm_req_send(bp, req_rx); 7151 } 7152 7153 static int 7154 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7155 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7156 { 7157 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 7158 7159 req->ring_id = cpu_to_le16(ring_id); 7160 return hwrm_req_send(bp, req); 7161 } 7162 7163 static int 7164 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 7165 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 7166 { 7167 struct bnxt_tx_ring_info *txr; 7168 int i, rc; 7169 7170 bnxt_for_each_napi_tx(i, bnapi, txr) { 7171 u16 ring_id; 7172 7173 ring_id = bnxt_cp_ring_for_tx(bp, txr); 7174 req->ring_id = cpu_to_le16(ring_id); 7175 rc = hwrm_req_send(bp, req); 7176 if (rc) 7177 return rc; 7178 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7179 return 0; 7180 } 7181 return 0; 7182 } 7183 7184 int bnxt_hwrm_set_coal(struct bnxt *bp) 7185 { 7186 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 7187 int i, rc; 7188 7189 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7190 if (rc) 7191 return rc; 7192 7193 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7194 if (rc) { 7195 hwrm_req_drop(bp, req_rx); 7196 return rc; 7197 } 7198 7199 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 7200 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 7201 7202 hwrm_req_hold(bp, req_rx); 7203 hwrm_req_hold(bp, req_tx); 7204 for (i = 0; i < bp->cp_nr_rings; i++) { 7205 struct bnxt_napi *bnapi = bp->bnapi[i]; 7206 struct bnxt_coal *hw_coal; 7207 7208 if (!bnapi->rx_ring) 7209 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7210 else 7211 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 7212 if (rc) 7213 break; 7214 7215 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7216 continue; 7217 7218 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 7219 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7220 if (rc) 7221 break; 7222 } 7223 if (bnapi->rx_ring) 7224 hw_coal = &bp->rx_coal; 7225 else 7226 hw_coal = &bp->tx_coal; 7227 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 7228 } 7229 hwrm_req_drop(bp, req_rx); 7230 hwrm_req_drop(bp, req_tx); 7231 return rc; 7232 } 7233 7234 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 7235 { 7236 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 7237 struct hwrm_stat_ctx_free_input *req; 7238 int i; 7239 7240 if (!bp->bnapi) 7241 return; 7242 7243 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7244 return; 7245 7246 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 7247 return; 7248 if (BNXT_FW_MAJ(bp) <= 20) { 7249 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 7250 hwrm_req_drop(bp, req); 7251 return; 7252 } 7253 hwrm_req_hold(bp, req0); 7254 } 7255 hwrm_req_hold(bp, req); 7256 for (i = 0; i < bp->cp_nr_rings; i++) { 7257 struct bnxt_napi *bnapi = bp->bnapi[i]; 7258 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7259 7260 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 7261 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 7262 if (req0) { 7263 req0->stat_ctx_id = req->stat_ctx_id; 7264 hwrm_req_send(bp, req0); 7265 } 7266 hwrm_req_send(bp, req); 7267 7268 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 7269 } 7270 } 7271 hwrm_req_drop(bp, req); 7272 if (req0) 7273 hwrm_req_drop(bp, req0); 7274 } 7275 7276 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 7277 { 7278 struct hwrm_stat_ctx_alloc_output *resp; 7279 struct hwrm_stat_ctx_alloc_input *req; 7280 int rc, i; 7281 7282 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7283 return 0; 7284 7285 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 7286 if (rc) 7287 return rc; 7288 7289 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 7290 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 7291 7292 resp = hwrm_req_hold(bp, req); 7293 for (i = 0; i < bp->cp_nr_rings; i++) { 7294 struct bnxt_napi *bnapi = bp->bnapi[i]; 7295 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7296 7297 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 7298 7299 rc = hwrm_req_send(bp, req); 7300 if (rc) 7301 break; 7302 7303 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 7304 7305 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 7306 } 7307 hwrm_req_drop(bp, req); 7308 return rc; 7309 } 7310 7311 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 7312 { 7313 struct hwrm_func_qcfg_output *resp; 7314 struct hwrm_func_qcfg_input *req; 7315 u16 flags; 7316 int rc; 7317 7318 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7319 if (rc) 7320 return rc; 7321 7322 req->fid = cpu_to_le16(0xffff); 7323 resp = hwrm_req_hold(bp, req); 7324 rc = hwrm_req_send(bp, req); 7325 if (rc) 7326 goto func_qcfg_exit; 7327 7328 #ifdef CONFIG_BNXT_SRIOV 7329 if (BNXT_VF(bp)) { 7330 struct bnxt_vf_info *vf = &bp->vf; 7331 7332 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 7333 } else { 7334 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 7335 } 7336 #endif 7337 flags = le16_to_cpu(resp->flags); 7338 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 7339 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 7340 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 7341 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 7342 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 7343 } 7344 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 7345 bp->flags |= BNXT_FLAG_MULTI_HOST; 7346 7347 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 7348 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 7349 7350 switch (resp->port_partition_type) { 7351 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 7352 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 7353 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 7354 bp->port_partition_type = resp->port_partition_type; 7355 break; 7356 } 7357 if (bp->hwrm_spec_code < 0x10707 || 7358 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 7359 bp->br_mode = BRIDGE_MODE_VEB; 7360 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 7361 bp->br_mode = BRIDGE_MODE_VEPA; 7362 else 7363 bp->br_mode = BRIDGE_MODE_UNDEF; 7364 7365 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 7366 if (!bp->max_mtu) 7367 bp->max_mtu = BNXT_MAX_MTU; 7368 7369 if (bp->db_size) 7370 goto func_qcfg_exit; 7371 7372 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; 7373 if (BNXT_CHIP_P5(bp)) { 7374 if (BNXT_PF(bp)) 7375 bp->db_offset = DB_PF_OFFSET_P5; 7376 else 7377 bp->db_offset = DB_VF_OFFSET_P5; 7378 } 7379 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 7380 1024); 7381 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 7382 bp->db_size <= bp->db_offset) 7383 bp->db_size = pci_resource_len(bp->pdev, 2); 7384 7385 func_qcfg_exit: 7386 hwrm_req_drop(bp, req); 7387 return rc; 7388 } 7389 7390 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 7391 u8 init_val, u8 init_offset, 7392 bool init_mask_set) 7393 { 7394 ctxm->init_value = init_val; 7395 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 7396 if (init_mask_set) 7397 ctxm->init_offset = init_offset * 4; 7398 else 7399 ctxm->init_value = 0; 7400 } 7401 7402 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 7403 { 7404 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7405 u16 type; 7406 7407 for (type = 0; type < ctx_max; type++) { 7408 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7409 int n = 1; 7410 7411 if (!ctxm->max_entries) 7412 continue; 7413 7414 if (ctxm->instance_bmap) 7415 n = hweight32(ctxm->instance_bmap); 7416 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 7417 if (!ctxm->pg_info) 7418 return -ENOMEM; 7419 } 7420 return 0; 7421 } 7422 7423 #define BNXT_CTX_INIT_VALID(flags) \ 7424 (!!((flags) & \ 7425 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 7426 7427 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 7428 { 7429 struct hwrm_func_backing_store_qcaps_v2_output *resp; 7430 struct hwrm_func_backing_store_qcaps_v2_input *req; 7431 struct bnxt_ctx_mem_info *ctx; 7432 u16 type; 7433 int rc; 7434 7435 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 7436 if (rc) 7437 return rc; 7438 7439 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 7440 if (!ctx) 7441 return -ENOMEM; 7442 bp->ctx = ctx; 7443 7444 resp = hwrm_req_hold(bp, req); 7445 7446 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 7447 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7448 u8 init_val, init_off, i; 7449 __le32 *p; 7450 u32 flags; 7451 7452 req->type = cpu_to_le16(type); 7453 rc = hwrm_req_send(bp, req); 7454 if (rc) 7455 goto ctx_done; 7456 flags = le32_to_cpu(resp->flags); 7457 type = le16_to_cpu(resp->next_valid_type); 7458 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) 7459 continue; 7460 7461 ctxm->type = le16_to_cpu(resp->type); 7462 ctxm->entry_size = le16_to_cpu(resp->entry_size); 7463 ctxm->flags = flags; 7464 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 7465 ctxm->entry_multiple = resp->entry_multiple; 7466 ctxm->max_entries = le32_to_cpu(resp->max_num_entries); 7467 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 7468 init_val = resp->ctx_init_value; 7469 init_off = resp->ctx_init_offset; 7470 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 7471 BNXT_CTX_INIT_VALID(flags)); 7472 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 7473 BNXT_MAX_SPLIT_ENTRY); 7474 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 7475 i++, p++) 7476 ctxm->split[i] = le32_to_cpu(*p); 7477 } 7478 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 7479 7480 ctx_done: 7481 hwrm_req_drop(bp, req); 7482 return rc; 7483 } 7484 7485 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 7486 { 7487 struct hwrm_func_backing_store_qcaps_output *resp; 7488 struct hwrm_func_backing_store_qcaps_input *req; 7489 int rc; 7490 7491 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 7492 return 0; 7493 7494 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 7495 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 7496 7497 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 7498 if (rc) 7499 return rc; 7500 7501 resp = hwrm_req_hold(bp, req); 7502 rc = hwrm_req_send_silent(bp, req); 7503 if (!rc) { 7504 struct bnxt_ctx_mem_type *ctxm; 7505 struct bnxt_ctx_mem_info *ctx; 7506 u8 init_val, init_idx = 0; 7507 u16 init_mask; 7508 7509 ctx = bp->ctx; 7510 if (!ctx) { 7511 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 7512 if (!ctx) { 7513 rc = -ENOMEM; 7514 goto ctx_err; 7515 } 7516 bp->ctx = ctx; 7517 } 7518 init_val = resp->ctx_kind_initializer; 7519 init_mask = le16_to_cpu(resp->ctx_init_mask); 7520 7521 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 7522 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 7523 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 7524 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 7525 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 7526 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 7527 (init_mask & (1 << init_idx++)) != 0); 7528 7529 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 7530 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 7531 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 7532 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 7533 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 7534 (init_mask & (1 << init_idx++)) != 0); 7535 7536 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 7537 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 7538 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 7539 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 7540 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 7541 (init_mask & (1 << init_idx++)) != 0); 7542 7543 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 7544 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 7545 ctxm->max_entries = ctxm->vnic_entries + 7546 le16_to_cpu(resp->vnic_max_ring_table_entries); 7547 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 7548 bnxt_init_ctx_initializer(ctxm, init_val, 7549 resp->vnic_init_offset, 7550 (init_mask & (1 << init_idx++)) != 0); 7551 7552 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 7553 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 7554 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 7555 bnxt_init_ctx_initializer(ctxm, init_val, 7556 resp->stat_init_offset, 7557 (init_mask & (1 << init_idx++)) != 0); 7558 7559 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 7560 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 7561 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 7562 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 7563 ctxm->entry_multiple = resp->tqm_entries_multiple; 7564 if (!ctxm->entry_multiple) 7565 ctxm->entry_multiple = 1; 7566 7567 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 7568 7569 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 7570 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 7571 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 7572 ctxm->mrav_num_entries_units = 7573 le16_to_cpu(resp->mrav_num_entries_units); 7574 bnxt_init_ctx_initializer(ctxm, init_val, 7575 resp->mrav_init_offset, 7576 (init_mask & (1 << init_idx++)) != 0); 7577 7578 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 7579 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 7580 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 7581 7582 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 7583 if (!ctx->tqm_fp_rings_count) 7584 ctx->tqm_fp_rings_count = bp->max_q; 7585 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 7586 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 7587 7588 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 7589 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 7590 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 7591 7592 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 7593 } else { 7594 rc = 0; 7595 } 7596 ctx_err: 7597 hwrm_req_drop(bp, req); 7598 return rc; 7599 } 7600 7601 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 7602 __le64 *pg_dir) 7603 { 7604 if (!rmem->nr_pages) 7605 return; 7606 7607 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 7608 if (rmem->depth >= 1) { 7609 if (rmem->depth == 2) 7610 *pg_attr |= 2; 7611 else 7612 *pg_attr |= 1; 7613 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 7614 } else { 7615 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 7616 } 7617 } 7618 7619 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 7620 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 7621 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 7622 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 7623 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 7624 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 7625 7626 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 7627 { 7628 struct hwrm_func_backing_store_cfg_input *req; 7629 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7630 struct bnxt_ctx_pg_info *ctx_pg; 7631 struct bnxt_ctx_mem_type *ctxm; 7632 void **__req = (void **)&req; 7633 u32 req_len = sizeof(*req); 7634 __le32 *num_entries; 7635 __le64 *pg_dir; 7636 u32 flags = 0; 7637 u8 *pg_attr; 7638 u32 ena; 7639 int rc; 7640 int i; 7641 7642 if (!ctx) 7643 return 0; 7644 7645 if (req_len > bp->hwrm_max_ext_req_len) 7646 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 7647 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 7648 if (rc) 7649 return rc; 7650 7651 req->enables = cpu_to_le32(enables); 7652 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 7653 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 7654 ctx_pg = ctxm->pg_info; 7655 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 7656 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 7657 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 7658 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 7659 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7660 &req->qpc_pg_size_qpc_lvl, 7661 &req->qpc_page_dir); 7662 } 7663 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 7664 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 7665 ctx_pg = ctxm->pg_info; 7666 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 7667 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 7668 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 7669 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7670 &req->srq_pg_size_srq_lvl, 7671 &req->srq_page_dir); 7672 } 7673 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 7674 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 7675 ctx_pg = ctxm->pg_info; 7676 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 7677 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 7678 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 7679 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7680 &req->cq_pg_size_cq_lvl, 7681 &req->cq_page_dir); 7682 } 7683 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 7684 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 7685 ctx_pg = ctxm->pg_info; 7686 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 7687 req->vnic_num_ring_table_entries = 7688 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 7689 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 7690 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7691 &req->vnic_pg_size_vnic_lvl, 7692 &req->vnic_page_dir); 7693 } 7694 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 7695 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 7696 ctx_pg = ctxm->pg_info; 7697 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 7698 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 7699 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7700 &req->stat_pg_size_stat_lvl, 7701 &req->stat_page_dir); 7702 } 7703 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 7704 u32 units; 7705 7706 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 7707 ctx_pg = ctxm->pg_info; 7708 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 7709 units = ctxm->mrav_num_entries_units; 7710 if (units) { 7711 u32 num_mr, num_ah = ctxm->mrav_av_entries; 7712 u32 entries; 7713 7714 num_mr = ctx_pg->entries - num_ah; 7715 entries = ((num_mr / units) << 16) | (num_ah / units); 7716 req->mrav_num_entries = cpu_to_le32(entries); 7717 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 7718 } 7719 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 7720 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7721 &req->mrav_pg_size_mrav_lvl, 7722 &req->mrav_page_dir); 7723 } 7724 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 7725 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 7726 ctx_pg = ctxm->pg_info; 7727 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 7728 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 7729 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7730 &req->tim_pg_size_tim_lvl, 7731 &req->tim_page_dir); 7732 } 7733 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 7734 for (i = 0, num_entries = &req->tqm_sp_num_entries, 7735 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 7736 pg_dir = &req->tqm_sp_page_dir, 7737 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 7738 ctx_pg = ctxm->pg_info; 7739 i < BNXT_MAX_TQM_RINGS; 7740 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 7741 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 7742 if (!(enables & ena)) 7743 continue; 7744 7745 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 7746 *num_entries = cpu_to_le32(ctx_pg->entries); 7747 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 7748 } 7749 req->flags = cpu_to_le32(flags); 7750 return hwrm_req_send(bp, req); 7751 } 7752 7753 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 7754 struct bnxt_ctx_pg_info *ctx_pg) 7755 { 7756 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7757 7758 rmem->page_size = BNXT_PAGE_SIZE; 7759 rmem->pg_arr = ctx_pg->ctx_pg_arr; 7760 rmem->dma_arr = ctx_pg->ctx_dma_arr; 7761 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 7762 if (rmem->depth >= 1) 7763 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 7764 return bnxt_alloc_ring(bp, rmem); 7765 } 7766 7767 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 7768 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 7769 u8 depth, struct bnxt_ctx_mem_type *ctxm) 7770 { 7771 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7772 int rc; 7773 7774 if (!mem_size) 7775 return -EINVAL; 7776 7777 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 7778 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 7779 ctx_pg->nr_pages = 0; 7780 return -EINVAL; 7781 } 7782 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 7783 int nr_tbls, i; 7784 7785 rmem->depth = 2; 7786 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 7787 GFP_KERNEL); 7788 if (!ctx_pg->ctx_pg_tbl) 7789 return -ENOMEM; 7790 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 7791 rmem->nr_pages = nr_tbls; 7792 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 7793 if (rc) 7794 return rc; 7795 for (i = 0; i < nr_tbls; i++) { 7796 struct bnxt_ctx_pg_info *pg_tbl; 7797 7798 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 7799 if (!pg_tbl) 7800 return -ENOMEM; 7801 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 7802 rmem = &pg_tbl->ring_mem; 7803 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 7804 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 7805 rmem->depth = 1; 7806 rmem->nr_pages = MAX_CTX_PAGES; 7807 rmem->ctx_mem = ctxm; 7808 if (i == (nr_tbls - 1)) { 7809 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 7810 7811 if (rem) 7812 rmem->nr_pages = rem; 7813 } 7814 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 7815 if (rc) 7816 break; 7817 } 7818 } else { 7819 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 7820 if (rmem->nr_pages > 1 || depth) 7821 rmem->depth = 1; 7822 rmem->ctx_mem = ctxm; 7823 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 7824 } 7825 return rc; 7826 } 7827 7828 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 7829 struct bnxt_ctx_pg_info *ctx_pg) 7830 { 7831 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7832 7833 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 7834 ctx_pg->ctx_pg_tbl) { 7835 int i, nr_tbls = rmem->nr_pages; 7836 7837 for (i = 0; i < nr_tbls; i++) { 7838 struct bnxt_ctx_pg_info *pg_tbl; 7839 struct bnxt_ring_mem_info *rmem2; 7840 7841 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 7842 if (!pg_tbl) 7843 continue; 7844 rmem2 = &pg_tbl->ring_mem; 7845 bnxt_free_ring(bp, rmem2); 7846 ctx_pg->ctx_pg_arr[i] = NULL; 7847 kfree(pg_tbl); 7848 ctx_pg->ctx_pg_tbl[i] = NULL; 7849 } 7850 kfree(ctx_pg->ctx_pg_tbl); 7851 ctx_pg->ctx_pg_tbl = NULL; 7852 } 7853 bnxt_free_ring(bp, rmem); 7854 ctx_pg->nr_pages = 0; 7855 } 7856 7857 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 7858 struct bnxt_ctx_mem_type *ctxm, u32 entries, 7859 u8 pg_lvl) 7860 { 7861 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 7862 int i, rc = 0, n = 1; 7863 u32 mem_size; 7864 7865 if (!ctxm->entry_size || !ctx_pg) 7866 return -EINVAL; 7867 if (ctxm->instance_bmap) 7868 n = hweight32(ctxm->instance_bmap); 7869 if (ctxm->entry_multiple) 7870 entries = roundup(entries, ctxm->entry_multiple); 7871 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 7872 mem_size = entries * ctxm->entry_size; 7873 for (i = 0; i < n && !rc; i++) { 7874 ctx_pg[i].entries = entries; 7875 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 7876 ctxm->init_value ? ctxm : NULL); 7877 } 7878 return rc; 7879 } 7880 7881 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 7882 struct bnxt_ctx_mem_type *ctxm, 7883 bool last) 7884 { 7885 struct hwrm_func_backing_store_cfg_v2_input *req; 7886 u32 instance_bmap = ctxm->instance_bmap; 7887 int i, j, rc = 0, n = 1; 7888 __le32 *p; 7889 7890 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 7891 return 0; 7892 7893 if (instance_bmap) 7894 n = hweight32(ctxm->instance_bmap); 7895 else 7896 instance_bmap = 1; 7897 7898 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 7899 if (rc) 7900 return rc; 7901 hwrm_req_hold(bp, req); 7902 req->type = cpu_to_le16(ctxm->type); 7903 req->entry_size = cpu_to_le16(ctxm->entry_size); 7904 req->subtype_valid_cnt = ctxm->split_entry_cnt; 7905 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 7906 p[i] = cpu_to_le32(ctxm->split[i]); 7907 for (i = 0, j = 0; j < n && !rc; i++) { 7908 struct bnxt_ctx_pg_info *ctx_pg; 7909 7910 if (!(instance_bmap & (1 << i))) 7911 continue; 7912 req->instance = cpu_to_le16(i); 7913 ctx_pg = &ctxm->pg_info[j++]; 7914 if (!ctx_pg->entries) 7915 continue; 7916 req->num_entries = cpu_to_le32(ctx_pg->entries); 7917 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7918 &req->page_size_pbl_level, 7919 &req->page_dir); 7920 if (last && j == n) 7921 req->flags = 7922 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 7923 rc = hwrm_req_send(bp, req); 7924 } 7925 hwrm_req_drop(bp, req); 7926 return rc; 7927 } 7928 7929 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) 7930 { 7931 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7932 struct bnxt_ctx_mem_type *ctxm; 7933 u16 last_type; 7934 int rc = 0; 7935 u16 type; 7936 7937 if (!ena) 7938 return 0; 7939 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) 7940 last_type = BNXT_CTX_MAX - 1; 7941 else 7942 last_type = BNXT_CTX_L2_MAX - 1; 7943 ctx->ctx_arr[last_type].last = 1; 7944 7945 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 7946 ctxm = &ctx->ctx_arr[type]; 7947 7948 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 7949 if (rc) 7950 return rc; 7951 } 7952 return 0; 7953 } 7954 7955 void bnxt_free_ctx_mem(struct bnxt *bp) 7956 { 7957 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7958 u16 type; 7959 7960 if (!ctx) 7961 return; 7962 7963 for (type = 0; type < BNXT_CTX_V2_MAX; type++) { 7964 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7965 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 7966 int i, n = 1; 7967 7968 if (!ctx_pg) 7969 continue; 7970 if (ctxm->instance_bmap) 7971 n = hweight32(ctxm->instance_bmap); 7972 for (i = 0; i < n; i++) 7973 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 7974 7975 kfree(ctx_pg); 7976 ctxm->pg_info = NULL; 7977 } 7978 7979 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 7980 kfree(ctx); 7981 bp->ctx = NULL; 7982 } 7983 7984 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 7985 { 7986 struct bnxt_ctx_mem_type *ctxm; 7987 struct bnxt_ctx_mem_info *ctx; 7988 u32 l2_qps, qp1_qps, max_qps; 7989 u32 ena, entries_sp, entries; 7990 u32 srqs, max_srqs, min; 7991 u32 num_mr, num_ah; 7992 u32 extra_srqs = 0; 7993 u32 extra_qps = 0; 7994 u8 pg_lvl = 1; 7995 int i, rc; 7996 7997 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 7998 if (rc) { 7999 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 8000 rc); 8001 return rc; 8002 } 8003 ctx = bp->ctx; 8004 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 8005 return 0; 8006 8007 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8008 l2_qps = ctxm->qp_l2_entries; 8009 qp1_qps = ctxm->qp_qp1_entries; 8010 max_qps = ctxm->max_entries; 8011 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8012 srqs = ctxm->srq_l2_entries; 8013 max_srqs = ctxm->max_entries; 8014 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 8015 pg_lvl = 2; 8016 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); 8017 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 8018 } 8019 8020 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8021 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 8022 pg_lvl); 8023 if (rc) 8024 return rc; 8025 8026 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8027 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 8028 if (rc) 8029 return rc; 8030 8031 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8032 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 8033 extra_qps * 2, pg_lvl); 8034 if (rc) 8035 return rc; 8036 8037 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8038 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8039 if (rc) 8040 return rc; 8041 8042 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8043 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 8044 if (rc) 8045 return rc; 8046 8047 ena = 0; 8048 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 8049 goto skip_rdma; 8050 8051 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8052 /* 128K extra is needed to accommodate static AH context 8053 * allocation by f/w. 8054 */ 8055 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 8056 num_ah = min_t(u32, num_mr, 1024 * 128); 8057 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 8058 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 8059 ctxm->mrav_av_entries = num_ah; 8060 8061 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 8062 if (rc) 8063 return rc; 8064 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 8065 8066 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8067 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 8068 if (rc) 8069 return rc; 8070 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 8071 8072 skip_rdma: 8073 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8074 min = ctxm->min_entries; 8075 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 8076 2 * (extra_qps + qp1_qps) + min; 8077 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 8078 if (rc) 8079 return rc; 8080 8081 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8082 entries = l2_qps + 2 * (extra_qps + qp1_qps); 8083 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 8084 if (rc) 8085 return rc; 8086 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 8087 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 8088 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 8089 8090 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8091 rc = bnxt_backing_store_cfg_v2(bp, ena); 8092 else 8093 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 8094 if (rc) { 8095 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 8096 rc); 8097 return rc; 8098 } 8099 ctx->flags |= BNXT_CTX_FLAG_INITED; 8100 return 0; 8101 } 8102 8103 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 8104 { 8105 struct hwrm_func_resource_qcaps_output *resp; 8106 struct hwrm_func_resource_qcaps_input *req; 8107 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8108 int rc; 8109 8110 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 8111 if (rc) 8112 return rc; 8113 8114 req->fid = cpu_to_le16(0xffff); 8115 resp = hwrm_req_hold(bp, req); 8116 rc = hwrm_req_send_silent(bp, req); 8117 if (rc) 8118 goto hwrm_func_resc_qcaps_exit; 8119 8120 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 8121 if (!all) 8122 goto hwrm_func_resc_qcaps_exit; 8123 8124 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 8125 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 8126 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 8127 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 8128 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 8129 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 8130 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 8131 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 8132 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 8133 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 8134 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 8135 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 8136 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 8137 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 8138 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 8139 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 8140 8141 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 8142 u16 max_msix = le16_to_cpu(resp->max_msix); 8143 8144 hw_resc->max_nqs = max_msix; 8145 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 8146 } 8147 8148 if (BNXT_PF(bp)) { 8149 struct bnxt_pf_info *pf = &bp->pf; 8150 8151 pf->vf_resv_strategy = 8152 le16_to_cpu(resp->vf_reservation_strategy); 8153 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 8154 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 8155 } 8156 hwrm_func_resc_qcaps_exit: 8157 hwrm_req_drop(bp, req); 8158 return rc; 8159 } 8160 8161 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 8162 { 8163 struct hwrm_port_mac_ptp_qcfg_output *resp; 8164 struct hwrm_port_mac_ptp_qcfg_input *req; 8165 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 8166 bool phc_cfg; 8167 u8 flags; 8168 int rc; 8169 8170 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { 8171 rc = -ENODEV; 8172 goto no_ptp; 8173 } 8174 8175 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 8176 if (rc) 8177 goto no_ptp; 8178 8179 req->port_id = cpu_to_le16(bp->pf.port_id); 8180 resp = hwrm_req_hold(bp, req); 8181 rc = hwrm_req_send(bp, req); 8182 if (rc) 8183 goto exit; 8184 8185 flags = resp->flags; 8186 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 8187 rc = -ENODEV; 8188 goto exit; 8189 } 8190 if (!ptp) { 8191 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 8192 if (!ptp) { 8193 rc = -ENOMEM; 8194 goto exit; 8195 } 8196 ptp->bp = bp; 8197 bp->ptp_cfg = ptp; 8198 } 8199 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { 8200 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 8201 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 8202 } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 8203 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 8204 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 8205 } else { 8206 rc = -ENODEV; 8207 goto exit; 8208 } 8209 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 8210 rc = bnxt_ptp_init(bp, phc_cfg); 8211 if (rc) 8212 netdev_warn(bp->dev, "PTP initialization failed.\n"); 8213 exit: 8214 hwrm_req_drop(bp, req); 8215 if (!rc) 8216 return 0; 8217 8218 no_ptp: 8219 bnxt_ptp_clear(bp); 8220 kfree(ptp); 8221 bp->ptp_cfg = NULL; 8222 return rc; 8223 } 8224 8225 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 8226 { 8227 struct hwrm_func_qcaps_output *resp; 8228 struct hwrm_func_qcaps_input *req; 8229 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8230 u32 flags, flags_ext, flags_ext2; 8231 int rc; 8232 8233 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 8234 if (rc) 8235 return rc; 8236 8237 req->fid = cpu_to_le16(0xffff); 8238 resp = hwrm_req_hold(bp, req); 8239 rc = hwrm_req_send(bp, req); 8240 if (rc) 8241 goto hwrm_func_qcaps_exit; 8242 8243 flags = le32_to_cpu(resp->flags); 8244 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 8245 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 8246 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 8247 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 8248 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 8249 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 8250 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 8251 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 8252 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 8253 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 8254 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 8255 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 8256 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 8257 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 8258 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 8259 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 8260 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 8261 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 8262 8263 flags_ext = le32_to_cpu(resp->flags_ext); 8264 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 8265 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 8266 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 8267 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 8268 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 8269 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 8270 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 8271 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 8272 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 8273 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 8274 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 8275 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 8276 8277 flags_ext2 = le32_to_cpu(resp->flags_ext2); 8278 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 8279 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 8280 8281 bp->tx_push_thresh = 0; 8282 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 8283 BNXT_FW_MAJ(bp) > 217) 8284 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 8285 8286 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 8287 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 8288 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 8289 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 8290 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 8291 if (!hw_resc->max_hw_ring_grps) 8292 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 8293 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 8294 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 8295 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 8296 8297 if (BNXT_PF(bp)) { 8298 struct bnxt_pf_info *pf = &bp->pf; 8299 8300 pf->fw_fid = le16_to_cpu(resp->fid); 8301 pf->port_id = le16_to_cpu(resp->port_id); 8302 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 8303 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 8304 pf->max_vfs = le16_to_cpu(resp->max_vfs); 8305 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 8306 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 8307 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 8308 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 8309 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 8310 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 8311 bp->flags &= ~BNXT_FLAG_WOL_CAP; 8312 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 8313 bp->flags |= BNXT_FLAG_WOL_CAP; 8314 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 8315 bp->fw_cap |= BNXT_FW_CAP_PTP; 8316 } else { 8317 bnxt_ptp_clear(bp); 8318 kfree(bp->ptp_cfg); 8319 bp->ptp_cfg = NULL; 8320 } 8321 } else { 8322 #ifdef CONFIG_BNXT_SRIOV 8323 struct bnxt_vf_info *vf = &bp->vf; 8324 8325 vf->fw_fid = le16_to_cpu(resp->fid); 8326 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 8327 #endif 8328 } 8329 8330 hwrm_func_qcaps_exit: 8331 hwrm_req_drop(bp, req); 8332 return rc; 8333 } 8334 8335 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 8336 { 8337 struct hwrm_dbg_qcaps_output *resp; 8338 struct hwrm_dbg_qcaps_input *req; 8339 int rc; 8340 8341 bp->fw_dbg_cap = 0; 8342 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 8343 return; 8344 8345 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 8346 if (rc) 8347 return; 8348 8349 req->fid = cpu_to_le16(0xffff); 8350 resp = hwrm_req_hold(bp, req); 8351 rc = hwrm_req_send(bp, req); 8352 if (rc) 8353 goto hwrm_dbg_qcaps_exit; 8354 8355 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 8356 8357 hwrm_dbg_qcaps_exit: 8358 hwrm_req_drop(bp, req); 8359 } 8360 8361 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 8362 8363 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 8364 { 8365 int rc; 8366 8367 rc = __bnxt_hwrm_func_qcaps(bp); 8368 if (rc) 8369 return rc; 8370 8371 bnxt_hwrm_dbg_qcaps(bp); 8372 8373 rc = bnxt_hwrm_queue_qportcfg(bp); 8374 if (rc) { 8375 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 8376 return rc; 8377 } 8378 if (bp->hwrm_spec_code >= 0x10803) { 8379 rc = bnxt_alloc_ctx_mem(bp); 8380 if (rc) 8381 return rc; 8382 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 8383 if (!rc) 8384 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 8385 } 8386 return 0; 8387 } 8388 8389 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 8390 { 8391 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 8392 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 8393 u32 flags; 8394 int rc; 8395 8396 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 8397 return 0; 8398 8399 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 8400 if (rc) 8401 return rc; 8402 8403 resp = hwrm_req_hold(bp, req); 8404 rc = hwrm_req_send(bp, req); 8405 if (rc) 8406 goto hwrm_cfa_adv_qcaps_exit; 8407 8408 flags = le32_to_cpu(resp->flags); 8409 if (flags & 8410 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 8411 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 8412 8413 hwrm_cfa_adv_qcaps_exit: 8414 hwrm_req_drop(bp, req); 8415 return rc; 8416 } 8417 8418 static int __bnxt_alloc_fw_health(struct bnxt *bp) 8419 { 8420 if (bp->fw_health) 8421 return 0; 8422 8423 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 8424 if (!bp->fw_health) 8425 return -ENOMEM; 8426 8427 mutex_init(&bp->fw_health->lock); 8428 return 0; 8429 } 8430 8431 static int bnxt_alloc_fw_health(struct bnxt *bp) 8432 { 8433 int rc; 8434 8435 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 8436 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 8437 return 0; 8438 8439 rc = __bnxt_alloc_fw_health(bp); 8440 if (rc) { 8441 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 8442 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 8443 return rc; 8444 } 8445 8446 return 0; 8447 } 8448 8449 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 8450 { 8451 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 8452 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8453 BNXT_FW_HEALTH_WIN_MAP_OFF); 8454 } 8455 8456 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 8457 { 8458 struct bnxt_fw_health *fw_health = bp->fw_health; 8459 u32 reg_type; 8460 8461 if (!fw_health) 8462 return; 8463 8464 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 8465 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 8466 fw_health->status_reliable = false; 8467 8468 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 8469 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 8470 fw_health->resets_reliable = false; 8471 } 8472 8473 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 8474 { 8475 void __iomem *hs; 8476 u32 status_loc; 8477 u32 reg_type; 8478 u32 sig; 8479 8480 if (bp->fw_health) 8481 bp->fw_health->status_reliable = false; 8482 8483 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 8484 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 8485 8486 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 8487 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 8488 if (!bp->chip_num) { 8489 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 8490 bp->chip_num = readl(bp->bar0 + 8491 BNXT_FW_HEALTH_WIN_BASE + 8492 BNXT_GRC_REG_CHIP_NUM); 8493 } 8494 if (!BNXT_CHIP_P5(bp)) 8495 return; 8496 8497 status_loc = BNXT_GRC_REG_STATUS_P5 | 8498 BNXT_FW_HEALTH_REG_TYPE_BAR0; 8499 } else { 8500 status_loc = readl(hs + offsetof(struct hcomm_status, 8501 fw_status_loc)); 8502 } 8503 8504 if (__bnxt_alloc_fw_health(bp)) { 8505 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 8506 return; 8507 } 8508 8509 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 8510 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 8511 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 8512 __bnxt_map_fw_health_reg(bp, status_loc); 8513 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 8514 BNXT_FW_HEALTH_WIN_OFF(status_loc); 8515 } 8516 8517 bp->fw_health->status_reliable = true; 8518 } 8519 8520 static int bnxt_map_fw_health_regs(struct bnxt *bp) 8521 { 8522 struct bnxt_fw_health *fw_health = bp->fw_health; 8523 u32 reg_base = 0xffffffff; 8524 int i; 8525 8526 bp->fw_health->status_reliable = false; 8527 bp->fw_health->resets_reliable = false; 8528 /* Only pre-map the monitoring GRC registers using window 3 */ 8529 for (i = 0; i < 4; i++) { 8530 u32 reg = fw_health->regs[i]; 8531 8532 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 8533 continue; 8534 if (reg_base == 0xffffffff) 8535 reg_base = reg & BNXT_GRC_BASE_MASK; 8536 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 8537 return -ERANGE; 8538 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 8539 } 8540 bp->fw_health->status_reliable = true; 8541 bp->fw_health->resets_reliable = true; 8542 if (reg_base == 0xffffffff) 8543 return 0; 8544 8545 __bnxt_map_fw_health_reg(bp, reg_base); 8546 return 0; 8547 } 8548 8549 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 8550 { 8551 if (!bp->fw_health) 8552 return; 8553 8554 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 8555 bp->fw_health->status_reliable = true; 8556 bp->fw_health->resets_reliable = true; 8557 } else { 8558 bnxt_try_map_fw_health_reg(bp); 8559 } 8560 } 8561 8562 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 8563 { 8564 struct bnxt_fw_health *fw_health = bp->fw_health; 8565 struct hwrm_error_recovery_qcfg_output *resp; 8566 struct hwrm_error_recovery_qcfg_input *req; 8567 int rc, i; 8568 8569 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 8570 return 0; 8571 8572 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 8573 if (rc) 8574 return rc; 8575 8576 resp = hwrm_req_hold(bp, req); 8577 rc = hwrm_req_send(bp, req); 8578 if (rc) 8579 goto err_recovery_out; 8580 fw_health->flags = le32_to_cpu(resp->flags); 8581 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 8582 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 8583 rc = -EINVAL; 8584 goto err_recovery_out; 8585 } 8586 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 8587 fw_health->master_func_wait_dsecs = 8588 le32_to_cpu(resp->master_func_wait_period); 8589 fw_health->normal_func_wait_dsecs = 8590 le32_to_cpu(resp->normal_func_wait_period); 8591 fw_health->post_reset_wait_dsecs = 8592 le32_to_cpu(resp->master_func_wait_period_after_reset); 8593 fw_health->post_reset_max_wait_dsecs = 8594 le32_to_cpu(resp->max_bailout_time_after_reset); 8595 fw_health->regs[BNXT_FW_HEALTH_REG] = 8596 le32_to_cpu(resp->fw_health_status_reg); 8597 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 8598 le32_to_cpu(resp->fw_heartbeat_reg); 8599 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 8600 le32_to_cpu(resp->fw_reset_cnt_reg); 8601 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 8602 le32_to_cpu(resp->reset_inprogress_reg); 8603 fw_health->fw_reset_inprog_reg_mask = 8604 le32_to_cpu(resp->reset_inprogress_reg_mask); 8605 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 8606 if (fw_health->fw_reset_seq_cnt >= 16) { 8607 rc = -EINVAL; 8608 goto err_recovery_out; 8609 } 8610 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 8611 fw_health->fw_reset_seq_regs[i] = 8612 le32_to_cpu(resp->reset_reg[i]); 8613 fw_health->fw_reset_seq_vals[i] = 8614 le32_to_cpu(resp->reset_reg_val[i]); 8615 fw_health->fw_reset_seq_delay_msec[i] = 8616 resp->delay_after_reset[i]; 8617 } 8618 err_recovery_out: 8619 hwrm_req_drop(bp, req); 8620 if (!rc) 8621 rc = bnxt_map_fw_health_regs(bp); 8622 if (rc) 8623 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 8624 return rc; 8625 } 8626 8627 static int bnxt_hwrm_func_reset(struct bnxt *bp) 8628 { 8629 struct hwrm_func_reset_input *req; 8630 int rc; 8631 8632 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 8633 if (rc) 8634 return rc; 8635 8636 req->enables = 0; 8637 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 8638 return hwrm_req_send(bp, req); 8639 } 8640 8641 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 8642 { 8643 struct hwrm_nvm_get_dev_info_output nvm_info; 8644 8645 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 8646 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 8647 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 8648 nvm_info.nvm_cfg_ver_upd); 8649 } 8650 8651 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 8652 { 8653 struct hwrm_queue_qportcfg_output *resp; 8654 struct hwrm_queue_qportcfg_input *req; 8655 u8 i, j, *qptr; 8656 bool no_rdma; 8657 int rc = 0; 8658 8659 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 8660 if (rc) 8661 return rc; 8662 8663 resp = hwrm_req_hold(bp, req); 8664 rc = hwrm_req_send(bp, req); 8665 if (rc) 8666 goto qportcfg_exit; 8667 8668 if (!resp->max_configurable_queues) { 8669 rc = -EINVAL; 8670 goto qportcfg_exit; 8671 } 8672 bp->max_tc = resp->max_configurable_queues; 8673 bp->max_lltc = resp->max_configurable_lossless_queues; 8674 if (bp->max_tc > BNXT_MAX_QUEUE) 8675 bp->max_tc = BNXT_MAX_QUEUE; 8676 8677 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 8678 qptr = &resp->queue_id0; 8679 for (i = 0, j = 0; i < bp->max_tc; i++) { 8680 bp->q_info[j].queue_id = *qptr; 8681 bp->q_ids[i] = *qptr++; 8682 bp->q_info[j].queue_profile = *qptr++; 8683 bp->tc_to_qidx[j] = j; 8684 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 8685 (no_rdma && BNXT_PF(bp))) 8686 j++; 8687 } 8688 bp->max_q = bp->max_tc; 8689 bp->max_tc = max_t(u8, j, 1); 8690 8691 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 8692 bp->max_tc = 1; 8693 8694 if (bp->max_lltc > bp->max_tc) 8695 bp->max_lltc = bp->max_tc; 8696 8697 qportcfg_exit: 8698 hwrm_req_drop(bp, req); 8699 return rc; 8700 } 8701 8702 static int bnxt_hwrm_poll(struct bnxt *bp) 8703 { 8704 struct hwrm_ver_get_input *req; 8705 int rc; 8706 8707 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 8708 if (rc) 8709 return rc; 8710 8711 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 8712 req->hwrm_intf_min = HWRM_VERSION_MINOR; 8713 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 8714 8715 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 8716 rc = hwrm_req_send(bp, req); 8717 return rc; 8718 } 8719 8720 static int bnxt_hwrm_ver_get(struct bnxt *bp) 8721 { 8722 struct hwrm_ver_get_output *resp; 8723 struct hwrm_ver_get_input *req; 8724 u16 fw_maj, fw_min, fw_bld, fw_rsv; 8725 u32 dev_caps_cfg, hwrm_ver; 8726 int rc, len; 8727 8728 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 8729 if (rc) 8730 return rc; 8731 8732 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 8733 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 8734 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 8735 req->hwrm_intf_min = HWRM_VERSION_MINOR; 8736 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 8737 8738 resp = hwrm_req_hold(bp, req); 8739 rc = hwrm_req_send(bp, req); 8740 if (rc) 8741 goto hwrm_ver_get_exit; 8742 8743 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 8744 8745 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 8746 resp->hwrm_intf_min_8b << 8 | 8747 resp->hwrm_intf_upd_8b; 8748 if (resp->hwrm_intf_maj_8b < 1) { 8749 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 8750 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 8751 resp->hwrm_intf_upd_8b); 8752 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 8753 } 8754 8755 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 8756 HWRM_VERSION_UPDATE; 8757 8758 if (bp->hwrm_spec_code > hwrm_ver) 8759 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 8760 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 8761 HWRM_VERSION_UPDATE); 8762 else 8763 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 8764 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 8765 resp->hwrm_intf_upd_8b); 8766 8767 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 8768 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 8769 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 8770 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 8771 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 8772 len = FW_VER_STR_LEN; 8773 } else { 8774 fw_maj = resp->hwrm_fw_maj_8b; 8775 fw_min = resp->hwrm_fw_min_8b; 8776 fw_bld = resp->hwrm_fw_bld_8b; 8777 fw_rsv = resp->hwrm_fw_rsvd_8b; 8778 len = BC_HWRM_STR_LEN; 8779 } 8780 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 8781 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 8782 fw_rsv); 8783 8784 if (strlen(resp->active_pkg_name)) { 8785 int fw_ver_len = strlen(bp->fw_ver_str); 8786 8787 snprintf(bp->fw_ver_str + fw_ver_len, 8788 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 8789 resp->active_pkg_name); 8790 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 8791 } 8792 8793 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 8794 if (!bp->hwrm_cmd_timeout) 8795 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 8796 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 8797 if (!bp->hwrm_cmd_max_timeout) 8798 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 8799 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) 8800 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", 8801 bp->hwrm_cmd_max_timeout / 1000); 8802 8803 if (resp->hwrm_intf_maj_8b >= 1) { 8804 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 8805 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 8806 } 8807 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 8808 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 8809 8810 bp->chip_num = le16_to_cpu(resp->chip_num); 8811 bp->chip_rev = resp->chip_rev; 8812 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 8813 !resp->chip_metal) 8814 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 8815 8816 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 8817 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 8818 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 8819 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 8820 8821 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 8822 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 8823 8824 if (dev_caps_cfg & 8825 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 8826 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 8827 8828 if (dev_caps_cfg & 8829 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 8830 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 8831 8832 if (dev_caps_cfg & 8833 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 8834 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 8835 8836 hwrm_ver_get_exit: 8837 hwrm_req_drop(bp, req); 8838 return rc; 8839 } 8840 8841 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 8842 { 8843 struct hwrm_fw_set_time_input *req; 8844 struct tm tm; 8845 time64_t now = ktime_get_real_seconds(); 8846 int rc; 8847 8848 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 8849 bp->hwrm_spec_code < 0x10400) 8850 return -EOPNOTSUPP; 8851 8852 time64_to_tm(now, 0, &tm); 8853 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 8854 if (rc) 8855 return rc; 8856 8857 req->year = cpu_to_le16(1900 + tm.tm_year); 8858 req->month = 1 + tm.tm_mon; 8859 req->day = tm.tm_mday; 8860 req->hour = tm.tm_hour; 8861 req->minute = tm.tm_min; 8862 req->second = tm.tm_sec; 8863 return hwrm_req_send(bp, req); 8864 } 8865 8866 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 8867 { 8868 u64 sw_tmp; 8869 8870 hw &= mask; 8871 sw_tmp = (*sw & ~mask) | hw; 8872 if (hw < (*sw & mask)) 8873 sw_tmp += mask + 1; 8874 WRITE_ONCE(*sw, sw_tmp); 8875 } 8876 8877 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 8878 int count, bool ignore_zero) 8879 { 8880 int i; 8881 8882 for (i = 0; i < count; i++) { 8883 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 8884 8885 if (ignore_zero && !hw) 8886 continue; 8887 8888 if (masks[i] == -1ULL) 8889 sw_stats[i] = hw; 8890 else 8891 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 8892 } 8893 } 8894 8895 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 8896 { 8897 if (!stats->hw_stats) 8898 return; 8899 8900 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 8901 stats->hw_masks, stats->len / 8, false); 8902 } 8903 8904 static void bnxt_accumulate_all_stats(struct bnxt *bp) 8905 { 8906 struct bnxt_stats_mem *ring0_stats; 8907 bool ignore_zero = false; 8908 int i; 8909 8910 /* Chip bug. Counter intermittently becomes 0. */ 8911 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 8912 ignore_zero = true; 8913 8914 for (i = 0; i < bp->cp_nr_rings; i++) { 8915 struct bnxt_napi *bnapi = bp->bnapi[i]; 8916 struct bnxt_cp_ring_info *cpr; 8917 struct bnxt_stats_mem *stats; 8918 8919 cpr = &bnapi->cp_ring; 8920 stats = &cpr->stats; 8921 if (!i) 8922 ring0_stats = stats; 8923 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 8924 ring0_stats->hw_masks, 8925 ring0_stats->len / 8, ignore_zero); 8926 } 8927 if (bp->flags & BNXT_FLAG_PORT_STATS) { 8928 struct bnxt_stats_mem *stats = &bp->port_stats; 8929 __le64 *hw_stats = stats->hw_stats; 8930 u64 *sw_stats = stats->sw_stats; 8931 u64 *masks = stats->hw_masks; 8932 int cnt; 8933 8934 cnt = sizeof(struct rx_port_stats) / 8; 8935 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 8936 8937 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8938 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8939 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8940 cnt = sizeof(struct tx_port_stats) / 8; 8941 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 8942 } 8943 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 8944 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 8945 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 8946 } 8947 } 8948 8949 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 8950 { 8951 struct hwrm_port_qstats_input *req; 8952 struct bnxt_pf_info *pf = &bp->pf; 8953 int rc; 8954 8955 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 8956 return 0; 8957 8958 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 8959 return -EOPNOTSUPP; 8960 8961 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 8962 if (rc) 8963 return rc; 8964 8965 req->flags = flags; 8966 req->port_id = cpu_to_le16(pf->port_id); 8967 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 8968 BNXT_TX_PORT_STATS_BYTE_OFFSET); 8969 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 8970 return hwrm_req_send(bp, req); 8971 } 8972 8973 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 8974 { 8975 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 8976 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 8977 struct hwrm_port_qstats_ext_output *resp_qs; 8978 struct hwrm_port_qstats_ext_input *req_qs; 8979 struct bnxt_pf_info *pf = &bp->pf; 8980 u32 tx_stat_size; 8981 int rc; 8982 8983 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 8984 return 0; 8985 8986 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 8987 return -EOPNOTSUPP; 8988 8989 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 8990 if (rc) 8991 return rc; 8992 8993 req_qs->flags = flags; 8994 req_qs->port_id = cpu_to_le16(pf->port_id); 8995 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 8996 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 8997 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 8998 sizeof(struct tx_port_stats_ext) : 0; 8999 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 9000 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 9001 resp_qs = hwrm_req_hold(bp, req_qs); 9002 rc = hwrm_req_send(bp, req_qs); 9003 if (!rc) { 9004 bp->fw_rx_stats_ext_size = 9005 le16_to_cpu(resp_qs->rx_stat_size) / 8; 9006 if (BNXT_FW_MAJ(bp) < 220 && 9007 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 9008 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 9009 9010 bp->fw_tx_stats_ext_size = tx_stat_size ? 9011 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 9012 } else { 9013 bp->fw_rx_stats_ext_size = 0; 9014 bp->fw_tx_stats_ext_size = 0; 9015 } 9016 hwrm_req_drop(bp, req_qs); 9017 9018 if (flags) 9019 return rc; 9020 9021 if (bp->fw_tx_stats_ext_size <= 9022 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 9023 bp->pri2cos_valid = 0; 9024 return rc; 9025 } 9026 9027 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 9028 if (rc) 9029 return rc; 9030 9031 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 9032 9033 resp_qc = hwrm_req_hold(bp, req_qc); 9034 rc = hwrm_req_send(bp, req_qc); 9035 if (!rc) { 9036 u8 *pri2cos; 9037 int i, j; 9038 9039 pri2cos = &resp_qc->pri0_cos_queue_id; 9040 for (i = 0; i < 8; i++) { 9041 u8 queue_id = pri2cos[i]; 9042 u8 queue_idx; 9043 9044 /* Per port queue IDs start from 0, 10, 20, etc */ 9045 queue_idx = queue_id % 10; 9046 if (queue_idx > BNXT_MAX_QUEUE) { 9047 bp->pri2cos_valid = false; 9048 hwrm_req_drop(bp, req_qc); 9049 return rc; 9050 } 9051 for (j = 0; j < bp->max_q; j++) { 9052 if (bp->q_ids[j] == queue_id) 9053 bp->pri2cos_idx[i] = queue_idx; 9054 } 9055 } 9056 bp->pri2cos_valid = true; 9057 } 9058 hwrm_req_drop(bp, req_qc); 9059 9060 return rc; 9061 } 9062 9063 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 9064 { 9065 bnxt_hwrm_tunnel_dst_port_free(bp, 9066 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 9067 bnxt_hwrm_tunnel_dst_port_free(bp, 9068 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 9069 } 9070 9071 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 9072 { 9073 int rc, i; 9074 u32 tpa_flags = 0; 9075 9076 if (set_tpa) 9077 tpa_flags = bp->flags & BNXT_FLAG_TPA; 9078 else if (BNXT_NO_FW_ACCESS(bp)) 9079 return 0; 9080 for (i = 0; i < bp->nr_vnics; i++) { 9081 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 9082 if (rc) { 9083 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 9084 i, rc); 9085 return rc; 9086 } 9087 } 9088 return 0; 9089 } 9090 9091 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 9092 { 9093 int i; 9094 9095 for (i = 0; i < bp->nr_vnics; i++) 9096 bnxt_hwrm_vnic_set_rss(bp, i, false); 9097 } 9098 9099 static void bnxt_clear_vnic(struct bnxt *bp) 9100 { 9101 if (!bp->vnic_info) 9102 return; 9103 9104 bnxt_hwrm_clear_vnic_filter(bp); 9105 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 9106 /* clear all RSS setting before free vnic ctx */ 9107 bnxt_hwrm_clear_vnic_rss(bp); 9108 bnxt_hwrm_vnic_ctx_free(bp); 9109 } 9110 /* before free the vnic, undo the vnic tpa settings */ 9111 if (bp->flags & BNXT_FLAG_TPA) 9112 bnxt_set_tpa(bp, false); 9113 bnxt_hwrm_vnic_free(bp); 9114 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9115 bnxt_hwrm_vnic_ctx_free(bp); 9116 } 9117 9118 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 9119 bool irq_re_init) 9120 { 9121 bnxt_clear_vnic(bp); 9122 bnxt_hwrm_ring_free(bp, close_path); 9123 bnxt_hwrm_ring_grp_free(bp); 9124 if (irq_re_init) { 9125 bnxt_hwrm_stat_ctx_free(bp); 9126 bnxt_hwrm_free_tunnel_ports(bp); 9127 } 9128 } 9129 9130 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 9131 { 9132 struct hwrm_func_cfg_input *req; 9133 u8 evb_mode; 9134 int rc; 9135 9136 if (br_mode == BRIDGE_MODE_VEB) 9137 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 9138 else if (br_mode == BRIDGE_MODE_VEPA) 9139 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 9140 else 9141 return -EINVAL; 9142 9143 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 9144 if (rc) 9145 return rc; 9146 9147 req->fid = cpu_to_le16(0xffff); 9148 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 9149 req->evb_mode = evb_mode; 9150 return hwrm_req_send(bp, req); 9151 } 9152 9153 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 9154 { 9155 struct hwrm_func_cfg_input *req; 9156 int rc; 9157 9158 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 9159 return 0; 9160 9161 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 9162 if (rc) 9163 return rc; 9164 9165 req->fid = cpu_to_le16(0xffff); 9166 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 9167 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 9168 if (size == 128) 9169 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 9170 9171 return hwrm_req_send(bp, req); 9172 } 9173 9174 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 9175 { 9176 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 9177 int rc; 9178 9179 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 9180 goto skip_rss_ctx; 9181 9182 /* allocate context for vnic */ 9183 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 9184 if (rc) { 9185 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9186 vnic_id, rc); 9187 goto vnic_setup_err; 9188 } 9189 bp->rsscos_nr_ctxs++; 9190 9191 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9192 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 9193 if (rc) { 9194 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 9195 vnic_id, rc); 9196 goto vnic_setup_err; 9197 } 9198 bp->rsscos_nr_ctxs++; 9199 } 9200 9201 skip_rss_ctx: 9202 /* configure default vnic, ring grp */ 9203 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 9204 if (rc) { 9205 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9206 vnic_id, rc); 9207 goto vnic_setup_err; 9208 } 9209 9210 /* Enable RSS hashing on vnic */ 9211 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 9212 if (rc) { 9213 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 9214 vnic_id, rc); 9215 goto vnic_setup_err; 9216 } 9217 9218 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9219 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 9220 if (rc) { 9221 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9222 vnic_id, rc); 9223 } 9224 } 9225 9226 vnic_setup_err: 9227 return rc; 9228 } 9229 9230 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 9231 { 9232 int rc, i, nr_ctxs; 9233 9234 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 9235 for (i = 0; i < nr_ctxs; i++) { 9236 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 9237 if (rc) { 9238 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 9239 vnic_id, i, rc); 9240 break; 9241 } 9242 bp->rsscos_nr_ctxs++; 9243 } 9244 if (i < nr_ctxs) 9245 return -ENOMEM; 9246 9247 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 9248 if (rc) { 9249 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 9250 vnic_id, rc); 9251 return rc; 9252 } 9253 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 9254 if (rc) { 9255 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9256 vnic_id, rc); 9257 return rc; 9258 } 9259 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9260 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 9261 if (rc) { 9262 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9263 vnic_id, rc); 9264 } 9265 } 9266 return rc; 9267 } 9268 9269 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 9270 { 9271 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9272 return __bnxt_setup_vnic_p5(bp, vnic_id); 9273 else 9274 return __bnxt_setup_vnic(bp, vnic_id); 9275 } 9276 9277 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 9278 { 9279 #ifdef CONFIG_RFS_ACCEL 9280 int i, rc = 0; 9281 9282 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9283 return 0; 9284 9285 for (i = 0; i < bp->rx_nr_rings; i++) { 9286 struct bnxt_vnic_info *vnic; 9287 u16 vnic_id = i + 1; 9288 u16 ring_id = i; 9289 9290 if (vnic_id >= bp->nr_vnics) 9291 break; 9292 9293 vnic = &bp->vnic_info[vnic_id]; 9294 vnic->flags |= BNXT_VNIC_RFS_FLAG; 9295 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 9296 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 9297 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 9298 if (rc) { 9299 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9300 vnic_id, rc); 9301 break; 9302 } 9303 rc = bnxt_setup_vnic(bp, vnic_id); 9304 if (rc) 9305 break; 9306 } 9307 return rc; 9308 #else 9309 return 0; 9310 #endif 9311 } 9312 9313 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 9314 static bool bnxt_promisc_ok(struct bnxt *bp) 9315 { 9316 #ifdef CONFIG_BNXT_SRIOV 9317 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 9318 return false; 9319 #endif 9320 return true; 9321 } 9322 9323 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 9324 { 9325 unsigned int rc = 0; 9326 9327 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 9328 if (rc) { 9329 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 9330 rc); 9331 return rc; 9332 } 9333 9334 rc = bnxt_hwrm_vnic_cfg(bp, 1); 9335 if (rc) { 9336 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 9337 rc); 9338 return rc; 9339 } 9340 return rc; 9341 } 9342 9343 static int bnxt_cfg_rx_mode(struct bnxt *); 9344 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 9345 9346 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 9347 { 9348 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9349 int rc = 0; 9350 unsigned int rx_nr_rings = bp->rx_nr_rings; 9351 9352 if (irq_re_init) { 9353 rc = bnxt_hwrm_stat_ctx_alloc(bp); 9354 if (rc) { 9355 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 9356 rc); 9357 goto err_out; 9358 } 9359 } 9360 9361 rc = bnxt_hwrm_ring_alloc(bp); 9362 if (rc) { 9363 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 9364 goto err_out; 9365 } 9366 9367 rc = bnxt_hwrm_ring_grp_alloc(bp); 9368 if (rc) { 9369 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 9370 goto err_out; 9371 } 9372 9373 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 9374 rx_nr_rings--; 9375 9376 /* default vnic 0 */ 9377 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 9378 if (rc) { 9379 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 9380 goto err_out; 9381 } 9382 9383 if (BNXT_VF(bp)) 9384 bnxt_hwrm_func_qcfg(bp); 9385 9386 rc = bnxt_setup_vnic(bp, 0); 9387 if (rc) 9388 goto err_out; 9389 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 9390 bnxt_hwrm_update_rss_hash_cfg(bp); 9391 9392 if (bp->flags & BNXT_FLAG_RFS) { 9393 rc = bnxt_alloc_rfs_vnics(bp); 9394 if (rc) 9395 goto err_out; 9396 } 9397 9398 if (bp->flags & BNXT_FLAG_TPA) { 9399 rc = bnxt_set_tpa(bp, true); 9400 if (rc) 9401 goto err_out; 9402 } 9403 9404 if (BNXT_VF(bp)) 9405 bnxt_update_vf_mac(bp); 9406 9407 /* Filter for default vnic 0 */ 9408 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 9409 if (rc) { 9410 if (BNXT_VF(bp) && rc == -ENODEV) 9411 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 9412 else 9413 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 9414 goto err_out; 9415 } 9416 vnic->uc_filter_count = 1; 9417 9418 vnic->rx_mask = 0; 9419 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 9420 goto skip_rx_mask; 9421 9422 if (bp->dev->flags & IFF_BROADCAST) 9423 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 9424 9425 if (bp->dev->flags & IFF_PROMISC) 9426 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9427 9428 if (bp->dev->flags & IFF_ALLMULTI) { 9429 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9430 vnic->mc_list_count = 0; 9431 } else if (bp->dev->flags & IFF_MULTICAST) { 9432 u32 mask = 0; 9433 9434 bnxt_mc_list_updated(bp, &mask); 9435 vnic->rx_mask |= mask; 9436 } 9437 9438 rc = bnxt_cfg_rx_mode(bp); 9439 if (rc) 9440 goto err_out; 9441 9442 skip_rx_mask: 9443 rc = bnxt_hwrm_set_coal(bp); 9444 if (rc) 9445 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 9446 rc); 9447 9448 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9449 rc = bnxt_setup_nitroa0_vnic(bp); 9450 if (rc) 9451 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 9452 rc); 9453 } 9454 9455 if (BNXT_VF(bp)) { 9456 bnxt_hwrm_func_qcfg(bp); 9457 netdev_update_features(bp->dev); 9458 } 9459 9460 return 0; 9461 9462 err_out: 9463 bnxt_hwrm_resource_free(bp, 0, true); 9464 9465 return rc; 9466 } 9467 9468 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 9469 { 9470 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 9471 return 0; 9472 } 9473 9474 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 9475 { 9476 bnxt_init_cp_rings(bp); 9477 bnxt_init_rx_rings(bp); 9478 bnxt_init_tx_rings(bp); 9479 bnxt_init_ring_grps(bp, irq_re_init); 9480 bnxt_init_vnics(bp); 9481 9482 return bnxt_init_chip(bp, irq_re_init); 9483 } 9484 9485 static int bnxt_set_real_num_queues(struct bnxt *bp) 9486 { 9487 int rc; 9488 struct net_device *dev = bp->dev; 9489 9490 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 9491 bp->tx_nr_rings_xdp); 9492 if (rc) 9493 return rc; 9494 9495 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 9496 if (rc) 9497 return rc; 9498 9499 #ifdef CONFIG_RFS_ACCEL 9500 if (bp->flags & BNXT_FLAG_RFS) 9501 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 9502 #endif 9503 9504 return rc; 9505 } 9506 9507 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 9508 bool shared) 9509 { 9510 int _rx = *rx, _tx = *tx; 9511 9512 if (shared) { 9513 *rx = min_t(int, _rx, max); 9514 *tx = min_t(int, _tx, max); 9515 } else { 9516 if (max < 2) 9517 return -ENOMEM; 9518 9519 while (_rx + _tx > max) { 9520 if (_rx > _tx && _rx > 1) 9521 _rx--; 9522 else if (_tx > 1) 9523 _tx--; 9524 } 9525 *rx = _rx; 9526 *tx = _tx; 9527 } 9528 return 0; 9529 } 9530 9531 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 9532 { 9533 return (tx - tx_xdp) / tx_sets + tx_xdp; 9534 } 9535 9536 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 9537 { 9538 int tcs = netdev_get_num_tc(bp->dev); 9539 9540 if (!tcs) 9541 tcs = 1; 9542 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 9543 } 9544 9545 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 9546 { 9547 int tcs = netdev_get_num_tc(bp->dev); 9548 9549 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 9550 bp->tx_nr_rings_xdp; 9551 } 9552 9553 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 9554 bool sh) 9555 { 9556 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 9557 9558 if (tx_cp != *tx) { 9559 int tx_saved = tx_cp, rc; 9560 9561 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 9562 if (rc) 9563 return rc; 9564 if (tx_cp != tx_saved) 9565 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 9566 return 0; 9567 } 9568 return __bnxt_trim_rings(bp, rx, tx, max, sh); 9569 } 9570 9571 static void bnxt_setup_msix(struct bnxt *bp) 9572 { 9573 const int len = sizeof(bp->irq_tbl[0].name); 9574 struct net_device *dev = bp->dev; 9575 int tcs, i; 9576 9577 tcs = netdev_get_num_tc(dev); 9578 if (tcs) { 9579 int i, off, count; 9580 9581 for (i = 0; i < tcs; i++) { 9582 count = bp->tx_nr_rings_per_tc; 9583 off = BNXT_TC_TO_RING_BASE(bp, i); 9584 netdev_set_tc_queue(dev, i, count, off); 9585 } 9586 } 9587 9588 for (i = 0; i < bp->cp_nr_rings; i++) { 9589 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 9590 char *attr; 9591 9592 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 9593 attr = "TxRx"; 9594 else if (i < bp->rx_nr_rings) 9595 attr = "rx"; 9596 else 9597 attr = "tx"; 9598 9599 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 9600 attr, i); 9601 bp->irq_tbl[map_idx].handler = bnxt_msix; 9602 } 9603 } 9604 9605 static void bnxt_setup_inta(struct bnxt *bp) 9606 { 9607 const int len = sizeof(bp->irq_tbl[0].name); 9608 9609 if (netdev_get_num_tc(bp->dev)) 9610 netdev_reset_tc(bp->dev); 9611 9612 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 9613 0); 9614 bp->irq_tbl[0].handler = bnxt_inta; 9615 } 9616 9617 static int bnxt_init_int_mode(struct bnxt *bp); 9618 9619 static int bnxt_setup_int_mode(struct bnxt *bp) 9620 { 9621 int rc; 9622 9623 if (!bp->irq_tbl) { 9624 rc = bnxt_init_int_mode(bp); 9625 if (rc || !bp->irq_tbl) 9626 return rc ?: -ENODEV; 9627 } 9628 9629 if (bp->flags & BNXT_FLAG_USING_MSIX) 9630 bnxt_setup_msix(bp); 9631 else 9632 bnxt_setup_inta(bp); 9633 9634 rc = bnxt_set_real_num_queues(bp); 9635 return rc; 9636 } 9637 9638 #ifdef CONFIG_RFS_ACCEL 9639 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 9640 { 9641 return bp->hw_resc.max_rsscos_ctxs; 9642 } 9643 9644 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 9645 { 9646 return bp->hw_resc.max_vnics; 9647 } 9648 #endif 9649 9650 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 9651 { 9652 return bp->hw_resc.max_stat_ctxs; 9653 } 9654 9655 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 9656 { 9657 return bp->hw_resc.max_cp_rings; 9658 } 9659 9660 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 9661 { 9662 unsigned int cp = bp->hw_resc.max_cp_rings; 9663 9664 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 9665 cp -= bnxt_get_ulp_msix_num(bp); 9666 9667 return cp; 9668 } 9669 9670 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 9671 { 9672 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9673 9674 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9675 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 9676 9677 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 9678 } 9679 9680 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 9681 { 9682 bp->hw_resc.max_irqs = max_irqs; 9683 } 9684 9685 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 9686 { 9687 unsigned int cp; 9688 9689 cp = bnxt_get_max_func_cp_rings_for_en(bp); 9690 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9691 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 9692 else 9693 return cp - bp->cp_nr_rings; 9694 } 9695 9696 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 9697 { 9698 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 9699 } 9700 9701 int bnxt_get_avail_msix(struct bnxt *bp, int num) 9702 { 9703 int max_cp = bnxt_get_max_func_cp_rings(bp); 9704 int max_irq = bnxt_get_max_func_irqs(bp); 9705 int total_req = bp->cp_nr_rings + num; 9706 int max_idx, avail_msix; 9707 9708 max_idx = bp->total_irqs; 9709 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 9710 max_idx = min_t(int, bp->total_irqs, max_cp); 9711 avail_msix = max_idx - bp->cp_nr_rings; 9712 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 9713 return avail_msix; 9714 9715 if (max_irq < total_req) { 9716 num = max_irq - bp->cp_nr_rings; 9717 if (num <= 0) 9718 return 0; 9719 } 9720 return num; 9721 } 9722 9723 static int bnxt_get_num_msix(struct bnxt *bp) 9724 { 9725 if (!BNXT_NEW_RM(bp)) 9726 return bnxt_get_max_func_irqs(bp); 9727 9728 return bnxt_nq_rings_in_use(bp); 9729 } 9730 9731 static int bnxt_init_msix(struct bnxt *bp) 9732 { 9733 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp; 9734 struct msix_entry *msix_ent; 9735 9736 total_vecs = bnxt_get_num_msix(bp); 9737 max = bnxt_get_max_func_irqs(bp); 9738 if (total_vecs > max) 9739 total_vecs = max; 9740 9741 if (!total_vecs) 9742 return 0; 9743 9744 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 9745 if (!msix_ent) 9746 return -ENOMEM; 9747 9748 for (i = 0; i < total_vecs; i++) { 9749 msix_ent[i].entry = i; 9750 msix_ent[i].vector = 0; 9751 } 9752 9753 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 9754 min = 2; 9755 9756 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 9757 ulp_msix = bnxt_get_ulp_msix_num(bp); 9758 if (total_vecs < 0 || total_vecs < ulp_msix) { 9759 rc = -ENODEV; 9760 goto msix_setup_exit; 9761 } 9762 9763 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 9764 if (bp->irq_tbl) { 9765 for (i = 0; i < total_vecs; i++) 9766 bp->irq_tbl[i].vector = msix_ent[i].vector; 9767 9768 bp->total_irqs = total_vecs; 9769 /* Trim rings based upon num of vectors allocated */ 9770 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 9771 total_vecs - ulp_msix, min == 1); 9772 if (rc) 9773 goto msix_setup_exit; 9774 9775 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 9776 bp->cp_nr_rings = (min == 1) ? 9777 max_t(int, tx_cp, bp->rx_nr_rings) : 9778 tx_cp + bp->rx_nr_rings; 9779 9780 } else { 9781 rc = -ENOMEM; 9782 goto msix_setup_exit; 9783 } 9784 bp->flags |= BNXT_FLAG_USING_MSIX; 9785 kfree(msix_ent); 9786 return 0; 9787 9788 msix_setup_exit: 9789 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 9790 kfree(bp->irq_tbl); 9791 bp->irq_tbl = NULL; 9792 pci_disable_msix(bp->pdev); 9793 kfree(msix_ent); 9794 return rc; 9795 } 9796 9797 static int bnxt_init_inta(struct bnxt *bp) 9798 { 9799 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL); 9800 if (!bp->irq_tbl) 9801 return -ENOMEM; 9802 9803 bp->total_irqs = 1; 9804 bp->rx_nr_rings = 1; 9805 bp->tx_nr_rings = 1; 9806 bp->cp_nr_rings = 1; 9807 bp->flags |= BNXT_FLAG_SHARED_RINGS; 9808 bp->irq_tbl[0].vector = bp->pdev->irq; 9809 return 0; 9810 } 9811 9812 static int bnxt_init_int_mode(struct bnxt *bp) 9813 { 9814 int rc = -ENODEV; 9815 9816 if (bp->flags & BNXT_FLAG_MSIX_CAP) 9817 rc = bnxt_init_msix(bp); 9818 9819 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 9820 /* fallback to INTA */ 9821 rc = bnxt_init_inta(bp); 9822 } 9823 return rc; 9824 } 9825 9826 static void bnxt_clear_int_mode(struct bnxt *bp) 9827 { 9828 if (bp->flags & BNXT_FLAG_USING_MSIX) 9829 pci_disable_msix(bp->pdev); 9830 9831 kfree(bp->irq_tbl); 9832 bp->irq_tbl = NULL; 9833 bp->flags &= ~BNXT_FLAG_USING_MSIX; 9834 } 9835 9836 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 9837 { 9838 int tcs = netdev_get_num_tc(bp->dev); 9839 bool irq_cleared = false; 9840 int rc; 9841 9842 if (!bnxt_need_reserve_rings(bp)) 9843 return 0; 9844 9845 if (irq_re_init && BNXT_NEW_RM(bp) && 9846 bnxt_get_num_msix(bp) != bp->total_irqs) { 9847 bnxt_ulp_irq_stop(bp); 9848 bnxt_clear_int_mode(bp); 9849 irq_cleared = true; 9850 } 9851 rc = __bnxt_reserve_rings(bp); 9852 if (irq_cleared) { 9853 if (!rc) 9854 rc = bnxt_init_int_mode(bp); 9855 bnxt_ulp_irq_restart(bp, rc); 9856 } 9857 if (rc) { 9858 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 9859 return rc; 9860 } 9861 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 9862 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 9863 netdev_err(bp->dev, "tx ring reservation failure\n"); 9864 netdev_reset_tc(bp->dev); 9865 if (bp->tx_nr_rings_xdp) 9866 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 9867 else 9868 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 9869 return -ENOMEM; 9870 } 9871 return 0; 9872 } 9873 9874 static void bnxt_free_irq(struct bnxt *bp) 9875 { 9876 struct bnxt_irq *irq; 9877 int i; 9878 9879 #ifdef CONFIG_RFS_ACCEL 9880 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 9881 bp->dev->rx_cpu_rmap = NULL; 9882 #endif 9883 if (!bp->irq_tbl || !bp->bnapi) 9884 return; 9885 9886 for (i = 0; i < bp->cp_nr_rings; i++) { 9887 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 9888 9889 irq = &bp->irq_tbl[map_idx]; 9890 if (irq->requested) { 9891 if (irq->have_cpumask) { 9892 irq_set_affinity_hint(irq->vector, NULL); 9893 free_cpumask_var(irq->cpu_mask); 9894 irq->have_cpumask = 0; 9895 } 9896 free_irq(irq->vector, bp->bnapi[i]); 9897 } 9898 9899 irq->requested = 0; 9900 } 9901 } 9902 9903 static int bnxt_request_irq(struct bnxt *bp) 9904 { 9905 int i, j, rc = 0; 9906 unsigned long flags = 0; 9907 #ifdef CONFIG_RFS_ACCEL 9908 struct cpu_rmap *rmap; 9909 #endif 9910 9911 rc = bnxt_setup_int_mode(bp); 9912 if (rc) { 9913 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 9914 rc); 9915 return rc; 9916 } 9917 #ifdef CONFIG_RFS_ACCEL 9918 rmap = bp->dev->rx_cpu_rmap; 9919 #endif 9920 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 9921 flags = IRQF_SHARED; 9922 9923 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 9924 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 9925 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 9926 9927 #ifdef CONFIG_RFS_ACCEL 9928 if (rmap && bp->bnapi[i]->rx_ring) { 9929 rc = irq_cpu_rmap_add(rmap, irq->vector); 9930 if (rc) 9931 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 9932 j); 9933 j++; 9934 } 9935 #endif 9936 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 9937 bp->bnapi[i]); 9938 if (rc) 9939 break; 9940 9941 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); 9942 irq->requested = 1; 9943 9944 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 9945 int numa_node = dev_to_node(&bp->pdev->dev); 9946 9947 irq->have_cpumask = 1; 9948 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 9949 irq->cpu_mask); 9950 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 9951 if (rc) { 9952 netdev_warn(bp->dev, 9953 "Set affinity failed, IRQ = %d\n", 9954 irq->vector); 9955 break; 9956 } 9957 } 9958 } 9959 return rc; 9960 } 9961 9962 static void bnxt_del_napi(struct bnxt *bp) 9963 { 9964 int i; 9965 9966 if (!bp->bnapi) 9967 return; 9968 9969 for (i = 0; i < bp->rx_nr_rings; i++) 9970 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); 9971 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) 9972 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); 9973 9974 for (i = 0; i < bp->cp_nr_rings; i++) { 9975 struct bnxt_napi *bnapi = bp->bnapi[i]; 9976 9977 __netif_napi_del(&bnapi->napi); 9978 } 9979 /* We called __netif_napi_del(), we need 9980 * to respect an RCU grace period before freeing napi structures. 9981 */ 9982 synchronize_net(); 9983 } 9984 9985 static void bnxt_init_napi(struct bnxt *bp) 9986 { 9987 int i; 9988 unsigned int cp_nr_rings = bp->cp_nr_rings; 9989 struct bnxt_napi *bnapi; 9990 9991 if (bp->flags & BNXT_FLAG_USING_MSIX) { 9992 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 9993 9994 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9995 poll_fn = bnxt_poll_p5; 9996 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 9997 cp_nr_rings--; 9998 for (i = 0; i < cp_nr_rings; i++) { 9999 bnapi = bp->bnapi[i]; 10000 netif_napi_add(bp->dev, &bnapi->napi, poll_fn); 10001 } 10002 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10003 bnapi = bp->bnapi[cp_nr_rings]; 10004 netif_napi_add(bp->dev, &bnapi->napi, 10005 bnxt_poll_nitroa0); 10006 } 10007 } else { 10008 bnapi = bp->bnapi[0]; 10009 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll); 10010 } 10011 } 10012 10013 static void bnxt_disable_napi(struct bnxt *bp) 10014 { 10015 int i; 10016 10017 if (!bp->bnapi || 10018 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 10019 return; 10020 10021 for (i = 0; i < bp->cp_nr_rings; i++) { 10022 struct bnxt_napi *bnapi = bp->bnapi[i]; 10023 struct bnxt_cp_ring_info *cpr; 10024 10025 cpr = &bnapi->cp_ring; 10026 if (bnapi->tx_fault) 10027 cpr->sw_stats.tx.tx_resets++; 10028 if (bnapi->in_reset) 10029 cpr->sw_stats.rx.rx_resets++; 10030 napi_disable(&bnapi->napi); 10031 if (bnapi->rx_ring) 10032 cancel_work_sync(&cpr->dim.work); 10033 } 10034 } 10035 10036 static void bnxt_enable_napi(struct bnxt *bp) 10037 { 10038 int i; 10039 10040 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 10041 for (i = 0; i < bp->cp_nr_rings; i++) { 10042 struct bnxt_napi *bnapi = bp->bnapi[i]; 10043 struct bnxt_cp_ring_info *cpr; 10044 10045 bnapi->tx_fault = 0; 10046 10047 cpr = &bnapi->cp_ring; 10048 bnapi->in_reset = false; 10049 10050 if (bnapi->rx_ring) { 10051 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 10052 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 10053 } 10054 napi_enable(&bnapi->napi); 10055 } 10056 } 10057 10058 void bnxt_tx_disable(struct bnxt *bp) 10059 { 10060 int i; 10061 struct bnxt_tx_ring_info *txr; 10062 10063 if (bp->tx_ring) { 10064 for (i = 0; i < bp->tx_nr_rings; i++) { 10065 txr = &bp->tx_ring[i]; 10066 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 10067 } 10068 } 10069 /* Make sure napi polls see @dev_state change */ 10070 synchronize_net(); 10071 /* Drop carrier first to prevent TX timeout */ 10072 netif_carrier_off(bp->dev); 10073 /* Stop all TX queues */ 10074 netif_tx_disable(bp->dev); 10075 } 10076 10077 void bnxt_tx_enable(struct bnxt *bp) 10078 { 10079 int i; 10080 struct bnxt_tx_ring_info *txr; 10081 10082 for (i = 0; i < bp->tx_nr_rings; i++) { 10083 txr = &bp->tx_ring[i]; 10084 WRITE_ONCE(txr->dev_state, 0); 10085 } 10086 /* Make sure napi polls see @dev_state change */ 10087 synchronize_net(); 10088 netif_tx_wake_all_queues(bp->dev); 10089 if (BNXT_LINK_IS_UP(bp)) 10090 netif_carrier_on(bp->dev); 10091 } 10092 10093 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 10094 { 10095 u8 active_fec = link_info->active_fec_sig_mode & 10096 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 10097 10098 switch (active_fec) { 10099 default: 10100 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 10101 return "None"; 10102 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 10103 return "Clause 74 BaseR"; 10104 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 10105 return "Clause 91 RS(528,514)"; 10106 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 10107 return "Clause 91 RS544_1XN"; 10108 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 10109 return "Clause 91 RS(544,514)"; 10110 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 10111 return "Clause 91 RS272_1XN"; 10112 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 10113 return "Clause 91 RS(272,257)"; 10114 } 10115 } 10116 10117 void bnxt_report_link(struct bnxt *bp) 10118 { 10119 if (BNXT_LINK_IS_UP(bp)) { 10120 const char *signal = ""; 10121 const char *flow_ctrl; 10122 const char *duplex; 10123 u32 speed; 10124 u16 fec; 10125 10126 netif_carrier_on(bp->dev); 10127 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 10128 if (speed == SPEED_UNKNOWN) { 10129 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 10130 return; 10131 } 10132 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 10133 duplex = "full"; 10134 else 10135 duplex = "half"; 10136 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 10137 flow_ctrl = "ON - receive & transmit"; 10138 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 10139 flow_ctrl = "ON - transmit"; 10140 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 10141 flow_ctrl = "ON - receive"; 10142 else 10143 flow_ctrl = "none"; 10144 if (bp->link_info.phy_qcfg_resp.option_flags & 10145 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 10146 u8 sig_mode = bp->link_info.active_fec_sig_mode & 10147 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 10148 switch (sig_mode) { 10149 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 10150 signal = "(NRZ) "; 10151 break; 10152 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 10153 signal = "(PAM4 56Gbps) "; 10154 break; 10155 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: 10156 signal = "(PAM4 112Gbps) "; 10157 break; 10158 default: 10159 break; 10160 } 10161 } 10162 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 10163 speed, signal, duplex, flow_ctrl); 10164 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 10165 netdev_info(bp->dev, "EEE is %s\n", 10166 bp->eee.eee_active ? "active" : 10167 "not active"); 10168 fec = bp->link_info.fec_cfg; 10169 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 10170 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 10171 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 10172 bnxt_report_fec(&bp->link_info)); 10173 } else { 10174 netif_carrier_off(bp->dev); 10175 netdev_err(bp->dev, "NIC Link is Down\n"); 10176 } 10177 } 10178 10179 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 10180 { 10181 if (!resp->supported_speeds_auto_mode && 10182 !resp->supported_speeds_force_mode && 10183 !resp->supported_pam4_speeds_auto_mode && 10184 !resp->supported_pam4_speeds_force_mode && 10185 !resp->supported_speeds2_auto_mode && 10186 !resp->supported_speeds2_force_mode) 10187 return true; 10188 return false; 10189 } 10190 10191 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 10192 { 10193 struct bnxt_link_info *link_info = &bp->link_info; 10194 struct hwrm_port_phy_qcaps_output *resp; 10195 struct hwrm_port_phy_qcaps_input *req; 10196 int rc = 0; 10197 10198 if (bp->hwrm_spec_code < 0x10201) 10199 return 0; 10200 10201 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 10202 if (rc) 10203 return rc; 10204 10205 resp = hwrm_req_hold(bp, req); 10206 rc = hwrm_req_send(bp, req); 10207 if (rc) 10208 goto hwrm_phy_qcaps_exit; 10209 10210 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 10211 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 10212 struct ethtool_eee *eee = &bp->eee; 10213 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 10214 10215 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10216 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 10217 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 10218 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 10219 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 10220 } 10221 10222 if (bp->hwrm_spec_code >= 0x10a01) { 10223 if (bnxt_phy_qcaps_no_speed(resp)) { 10224 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 10225 netdev_warn(bp->dev, "Ethernet link disabled\n"); 10226 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 10227 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 10228 netdev_info(bp->dev, "Ethernet link enabled\n"); 10229 /* Phy re-enabled, reprobe the speeds */ 10230 link_info->support_auto_speeds = 0; 10231 link_info->support_pam4_auto_speeds = 0; 10232 link_info->support_auto_speeds2 = 0; 10233 } 10234 } 10235 if (resp->supported_speeds_auto_mode) 10236 link_info->support_auto_speeds = 10237 le16_to_cpu(resp->supported_speeds_auto_mode); 10238 if (resp->supported_pam4_speeds_auto_mode) 10239 link_info->support_pam4_auto_speeds = 10240 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 10241 if (resp->supported_speeds2_auto_mode) 10242 link_info->support_auto_speeds2 = 10243 le16_to_cpu(resp->supported_speeds2_auto_mode); 10244 10245 bp->port_count = resp->port_cnt; 10246 10247 hwrm_phy_qcaps_exit: 10248 hwrm_req_drop(bp, req); 10249 return rc; 10250 } 10251 10252 static bool bnxt_support_dropped(u16 advertising, u16 supported) 10253 { 10254 u16 diff = advertising ^ supported; 10255 10256 return ((supported | diff) != supported); 10257 } 10258 10259 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 10260 { 10261 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 10262 10263 /* Check if any advertised speeds are no longer supported. The caller 10264 * holds the link_lock mutex, so we can modify link_info settings. 10265 */ 10266 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 10267 if (bnxt_support_dropped(link_info->advertising, 10268 link_info->support_auto_speeds2)) { 10269 link_info->advertising = link_info->support_auto_speeds2; 10270 return true; 10271 } 10272 return false; 10273 } 10274 if (bnxt_support_dropped(link_info->advertising, 10275 link_info->support_auto_speeds)) { 10276 link_info->advertising = link_info->support_auto_speeds; 10277 return true; 10278 } 10279 if (bnxt_support_dropped(link_info->advertising_pam4, 10280 link_info->support_pam4_auto_speeds)) { 10281 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 10282 return true; 10283 } 10284 return false; 10285 } 10286 10287 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 10288 { 10289 struct bnxt_link_info *link_info = &bp->link_info; 10290 struct hwrm_port_phy_qcfg_output *resp; 10291 struct hwrm_port_phy_qcfg_input *req; 10292 u8 link_state = link_info->link_state; 10293 bool support_changed; 10294 int rc; 10295 10296 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 10297 if (rc) 10298 return rc; 10299 10300 resp = hwrm_req_hold(bp, req); 10301 rc = hwrm_req_send(bp, req); 10302 if (rc) { 10303 hwrm_req_drop(bp, req); 10304 if (BNXT_VF(bp) && rc == -ENODEV) { 10305 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 10306 rc = 0; 10307 } 10308 return rc; 10309 } 10310 10311 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 10312 link_info->phy_link_status = resp->link; 10313 link_info->duplex = resp->duplex_cfg; 10314 if (bp->hwrm_spec_code >= 0x10800) 10315 link_info->duplex = resp->duplex_state; 10316 link_info->pause = resp->pause; 10317 link_info->auto_mode = resp->auto_mode; 10318 link_info->auto_pause_setting = resp->auto_pause; 10319 link_info->lp_pause = resp->link_partner_adv_pause; 10320 link_info->force_pause_setting = resp->force_pause; 10321 link_info->duplex_setting = resp->duplex_cfg; 10322 if (link_info->phy_link_status == BNXT_LINK_LINK) { 10323 link_info->link_speed = le16_to_cpu(resp->link_speed); 10324 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 10325 link_info->active_lanes = resp->active_lanes; 10326 } else { 10327 link_info->link_speed = 0; 10328 link_info->active_lanes = 0; 10329 } 10330 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 10331 link_info->force_pam4_link_speed = 10332 le16_to_cpu(resp->force_pam4_link_speed); 10333 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); 10334 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 10335 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 10336 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); 10337 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 10338 link_info->auto_pam4_link_speeds = 10339 le16_to_cpu(resp->auto_pam4_link_speed_mask); 10340 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); 10341 link_info->lp_auto_link_speeds = 10342 le16_to_cpu(resp->link_partner_adv_speeds); 10343 link_info->lp_auto_pam4_link_speeds = 10344 resp->link_partner_pam4_adv_speeds; 10345 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 10346 link_info->phy_ver[0] = resp->phy_maj; 10347 link_info->phy_ver[1] = resp->phy_min; 10348 link_info->phy_ver[2] = resp->phy_bld; 10349 link_info->media_type = resp->media_type; 10350 link_info->phy_type = resp->phy_type; 10351 link_info->transceiver = resp->xcvr_pkg_type; 10352 link_info->phy_addr = resp->eee_config_phy_addr & 10353 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 10354 link_info->module_status = resp->module_status; 10355 10356 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 10357 struct ethtool_eee *eee = &bp->eee; 10358 u16 fw_speeds; 10359 10360 eee->eee_active = 0; 10361 if (resp->eee_config_phy_addr & 10362 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 10363 eee->eee_active = 1; 10364 fw_speeds = le16_to_cpu( 10365 resp->link_partner_adv_eee_link_speed_mask); 10366 eee->lp_advertised = 10367 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10368 } 10369 10370 /* Pull initial EEE config */ 10371 if (!chng_link_state) { 10372 if (resp->eee_config_phy_addr & 10373 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 10374 eee->eee_enabled = 1; 10375 10376 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 10377 eee->advertised = 10378 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10379 10380 if (resp->eee_config_phy_addr & 10381 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 10382 __le32 tmr; 10383 10384 eee->tx_lpi_enabled = 1; 10385 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 10386 eee->tx_lpi_timer = le32_to_cpu(tmr) & 10387 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 10388 } 10389 } 10390 } 10391 10392 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 10393 if (bp->hwrm_spec_code >= 0x10504) { 10394 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 10395 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 10396 } 10397 /* TODO: need to add more logic to report VF link */ 10398 if (chng_link_state) { 10399 if (link_info->phy_link_status == BNXT_LINK_LINK) 10400 link_info->link_state = BNXT_LINK_STATE_UP; 10401 else 10402 link_info->link_state = BNXT_LINK_STATE_DOWN; 10403 if (link_state != link_info->link_state) 10404 bnxt_report_link(bp); 10405 } else { 10406 /* always link down if not require to update link state */ 10407 link_info->link_state = BNXT_LINK_STATE_DOWN; 10408 } 10409 hwrm_req_drop(bp, req); 10410 10411 if (!BNXT_PHY_CFG_ABLE(bp)) 10412 return 0; 10413 10414 support_changed = bnxt_support_speed_dropped(link_info); 10415 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 10416 bnxt_hwrm_set_link_setting(bp, true, false); 10417 return 0; 10418 } 10419 10420 static void bnxt_get_port_module_status(struct bnxt *bp) 10421 { 10422 struct bnxt_link_info *link_info = &bp->link_info; 10423 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 10424 u8 module_status; 10425 10426 if (bnxt_update_link(bp, true)) 10427 return; 10428 10429 module_status = link_info->module_status; 10430 switch (module_status) { 10431 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 10432 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 10433 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 10434 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 10435 bp->pf.port_id); 10436 if (bp->hwrm_spec_code >= 0x10201) { 10437 netdev_warn(bp->dev, "Module part number %s\n", 10438 resp->phy_vendor_partnumber); 10439 } 10440 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 10441 netdev_warn(bp->dev, "TX is disabled\n"); 10442 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 10443 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 10444 } 10445 } 10446 10447 static void 10448 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 10449 { 10450 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 10451 if (bp->hwrm_spec_code >= 0x10201) 10452 req->auto_pause = 10453 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 10454 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 10455 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 10456 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 10457 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 10458 req->enables |= 10459 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 10460 } else { 10461 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 10462 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 10463 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 10464 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 10465 req->enables |= 10466 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 10467 if (bp->hwrm_spec_code >= 0x10201) { 10468 req->auto_pause = req->force_pause; 10469 req->enables |= cpu_to_le32( 10470 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 10471 } 10472 } 10473 } 10474 10475 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 10476 { 10477 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 10478 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 10479 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 10480 req->enables |= 10481 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); 10482 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); 10483 } else if (bp->link_info.advertising) { 10484 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 10485 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 10486 } 10487 if (bp->link_info.advertising_pam4) { 10488 req->enables |= 10489 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 10490 req->auto_link_pam4_speed_mask = 10491 cpu_to_le16(bp->link_info.advertising_pam4); 10492 } 10493 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 10494 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 10495 } else { 10496 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 10497 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 10498 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); 10499 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); 10500 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", 10501 (u32)bp->link_info.req_link_speed); 10502 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 10503 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 10504 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 10505 } else { 10506 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 10507 } 10508 } 10509 10510 /* tell chimp that the setting takes effect immediately */ 10511 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 10512 } 10513 10514 int bnxt_hwrm_set_pause(struct bnxt *bp) 10515 { 10516 struct hwrm_port_phy_cfg_input *req; 10517 int rc; 10518 10519 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10520 if (rc) 10521 return rc; 10522 10523 bnxt_hwrm_set_pause_common(bp, req); 10524 10525 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 10526 bp->link_info.force_link_chng) 10527 bnxt_hwrm_set_link_common(bp, req); 10528 10529 rc = hwrm_req_send(bp, req); 10530 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 10531 /* since changing of pause setting doesn't trigger any link 10532 * change event, the driver needs to update the current pause 10533 * result upon successfully return of the phy_cfg command 10534 */ 10535 bp->link_info.pause = 10536 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 10537 bp->link_info.auto_pause_setting = 0; 10538 if (!bp->link_info.force_link_chng) 10539 bnxt_report_link(bp); 10540 } 10541 bp->link_info.force_link_chng = false; 10542 return rc; 10543 } 10544 10545 static void bnxt_hwrm_set_eee(struct bnxt *bp, 10546 struct hwrm_port_phy_cfg_input *req) 10547 { 10548 struct ethtool_eee *eee = &bp->eee; 10549 10550 if (eee->eee_enabled) { 10551 u16 eee_speeds; 10552 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 10553 10554 if (eee->tx_lpi_enabled) 10555 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 10556 else 10557 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 10558 10559 req->flags |= cpu_to_le32(flags); 10560 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 10561 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 10562 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 10563 } else { 10564 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 10565 } 10566 } 10567 10568 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 10569 { 10570 struct hwrm_port_phy_cfg_input *req; 10571 int rc; 10572 10573 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10574 if (rc) 10575 return rc; 10576 10577 if (set_pause) 10578 bnxt_hwrm_set_pause_common(bp, req); 10579 10580 bnxt_hwrm_set_link_common(bp, req); 10581 10582 if (set_eee) 10583 bnxt_hwrm_set_eee(bp, req); 10584 return hwrm_req_send(bp, req); 10585 } 10586 10587 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 10588 { 10589 struct hwrm_port_phy_cfg_input *req; 10590 int rc; 10591 10592 if (!BNXT_SINGLE_PF(bp)) 10593 return 0; 10594 10595 if (pci_num_vf(bp->pdev) && 10596 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 10597 return 0; 10598 10599 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10600 if (rc) 10601 return rc; 10602 10603 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 10604 rc = hwrm_req_send(bp, req); 10605 if (!rc) { 10606 mutex_lock(&bp->link_lock); 10607 /* Device is not obliged link down in certain scenarios, even 10608 * when forced. Setting the state unknown is consistent with 10609 * driver startup and will force link state to be reported 10610 * during subsequent open based on PORT_PHY_QCFG. 10611 */ 10612 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 10613 mutex_unlock(&bp->link_lock); 10614 } 10615 return rc; 10616 } 10617 10618 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 10619 { 10620 #ifdef CONFIG_TEE_BNXT_FW 10621 int rc = tee_bnxt_fw_load(); 10622 10623 if (rc) 10624 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 10625 10626 return rc; 10627 #else 10628 netdev_err(bp->dev, "OP-TEE not supported\n"); 10629 return -ENODEV; 10630 #endif 10631 } 10632 10633 static int bnxt_try_recover_fw(struct bnxt *bp) 10634 { 10635 if (bp->fw_health && bp->fw_health->status_reliable) { 10636 int retry = 0, rc; 10637 u32 sts; 10638 10639 do { 10640 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 10641 rc = bnxt_hwrm_poll(bp); 10642 if (!BNXT_FW_IS_BOOTING(sts) && 10643 !BNXT_FW_IS_RECOVERING(sts)) 10644 break; 10645 retry++; 10646 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 10647 10648 if (!BNXT_FW_IS_HEALTHY(sts)) { 10649 netdev_err(bp->dev, 10650 "Firmware not responding, status: 0x%x\n", 10651 sts); 10652 rc = -ENODEV; 10653 } 10654 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 10655 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 10656 return bnxt_fw_reset_via_optee(bp); 10657 } 10658 return rc; 10659 } 10660 10661 return -ENODEV; 10662 } 10663 10664 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 10665 { 10666 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 10667 10668 if (!BNXT_NEW_RM(bp)) 10669 return; /* no resource reservations required */ 10670 10671 hw_resc->resv_cp_rings = 0; 10672 hw_resc->resv_stat_ctxs = 0; 10673 hw_resc->resv_irqs = 0; 10674 hw_resc->resv_tx_rings = 0; 10675 hw_resc->resv_rx_rings = 0; 10676 hw_resc->resv_hw_ring_grps = 0; 10677 hw_resc->resv_vnics = 0; 10678 if (!fw_reset) { 10679 bp->tx_nr_rings = 0; 10680 bp->rx_nr_rings = 0; 10681 } 10682 } 10683 10684 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 10685 { 10686 int rc; 10687 10688 if (!BNXT_NEW_RM(bp)) 10689 return 0; /* no resource reservations required */ 10690 10691 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 10692 if (rc) 10693 netdev_err(bp->dev, "resc_qcaps failed\n"); 10694 10695 bnxt_clear_reservations(bp, fw_reset); 10696 10697 return rc; 10698 } 10699 10700 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 10701 { 10702 struct hwrm_func_drv_if_change_output *resp; 10703 struct hwrm_func_drv_if_change_input *req; 10704 bool fw_reset = !bp->irq_tbl; 10705 bool resc_reinit = false; 10706 int rc, retry = 0; 10707 u32 flags = 0; 10708 10709 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 10710 return 0; 10711 10712 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 10713 if (rc) 10714 return rc; 10715 10716 if (up) 10717 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 10718 resp = hwrm_req_hold(bp, req); 10719 10720 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 10721 while (retry < BNXT_FW_IF_RETRY) { 10722 rc = hwrm_req_send(bp, req); 10723 if (rc != -EAGAIN) 10724 break; 10725 10726 msleep(50); 10727 retry++; 10728 } 10729 10730 if (rc == -EAGAIN) { 10731 hwrm_req_drop(bp, req); 10732 return rc; 10733 } else if (!rc) { 10734 flags = le32_to_cpu(resp->flags); 10735 } else if (up) { 10736 rc = bnxt_try_recover_fw(bp); 10737 fw_reset = true; 10738 } 10739 hwrm_req_drop(bp, req); 10740 if (rc) 10741 return rc; 10742 10743 if (!up) { 10744 bnxt_inv_fw_health_reg(bp); 10745 return 0; 10746 } 10747 10748 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 10749 resc_reinit = true; 10750 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 10751 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 10752 fw_reset = true; 10753 else 10754 bnxt_remap_fw_health_regs(bp); 10755 10756 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 10757 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 10758 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 10759 return -ENODEV; 10760 } 10761 if (resc_reinit || fw_reset) { 10762 if (fw_reset) { 10763 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 10764 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10765 bnxt_ulp_stop(bp); 10766 bnxt_free_ctx_mem(bp); 10767 bnxt_dcb_free(bp); 10768 rc = bnxt_fw_init_one(bp); 10769 if (rc) { 10770 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 10771 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 10772 return rc; 10773 } 10774 bnxt_clear_int_mode(bp); 10775 rc = bnxt_init_int_mode(bp); 10776 if (rc) { 10777 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 10778 netdev_err(bp->dev, "init int mode failed\n"); 10779 return rc; 10780 } 10781 } 10782 rc = bnxt_cancel_reservations(bp, fw_reset); 10783 } 10784 return rc; 10785 } 10786 10787 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 10788 { 10789 struct hwrm_port_led_qcaps_output *resp; 10790 struct hwrm_port_led_qcaps_input *req; 10791 struct bnxt_pf_info *pf = &bp->pf; 10792 int rc; 10793 10794 bp->num_leds = 0; 10795 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 10796 return 0; 10797 10798 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 10799 if (rc) 10800 return rc; 10801 10802 req->port_id = cpu_to_le16(pf->port_id); 10803 resp = hwrm_req_hold(bp, req); 10804 rc = hwrm_req_send(bp, req); 10805 if (rc) { 10806 hwrm_req_drop(bp, req); 10807 return rc; 10808 } 10809 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 10810 int i; 10811 10812 bp->num_leds = resp->num_leds; 10813 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 10814 bp->num_leds); 10815 for (i = 0; i < bp->num_leds; i++) { 10816 struct bnxt_led_info *led = &bp->leds[i]; 10817 __le16 caps = led->led_state_caps; 10818 10819 if (!led->led_group_id || 10820 !BNXT_LED_ALT_BLINK_CAP(caps)) { 10821 bp->num_leds = 0; 10822 break; 10823 } 10824 } 10825 } 10826 hwrm_req_drop(bp, req); 10827 return 0; 10828 } 10829 10830 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 10831 { 10832 struct hwrm_wol_filter_alloc_output *resp; 10833 struct hwrm_wol_filter_alloc_input *req; 10834 int rc; 10835 10836 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 10837 if (rc) 10838 return rc; 10839 10840 req->port_id = cpu_to_le16(bp->pf.port_id); 10841 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 10842 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 10843 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 10844 10845 resp = hwrm_req_hold(bp, req); 10846 rc = hwrm_req_send(bp, req); 10847 if (!rc) 10848 bp->wol_filter_id = resp->wol_filter_id; 10849 hwrm_req_drop(bp, req); 10850 return rc; 10851 } 10852 10853 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 10854 { 10855 struct hwrm_wol_filter_free_input *req; 10856 int rc; 10857 10858 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 10859 if (rc) 10860 return rc; 10861 10862 req->port_id = cpu_to_le16(bp->pf.port_id); 10863 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 10864 req->wol_filter_id = bp->wol_filter_id; 10865 10866 return hwrm_req_send(bp, req); 10867 } 10868 10869 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 10870 { 10871 struct hwrm_wol_filter_qcfg_output *resp; 10872 struct hwrm_wol_filter_qcfg_input *req; 10873 u16 next_handle = 0; 10874 int rc; 10875 10876 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 10877 if (rc) 10878 return rc; 10879 10880 req->port_id = cpu_to_le16(bp->pf.port_id); 10881 req->handle = cpu_to_le16(handle); 10882 resp = hwrm_req_hold(bp, req); 10883 rc = hwrm_req_send(bp, req); 10884 if (!rc) { 10885 next_handle = le16_to_cpu(resp->next_handle); 10886 if (next_handle != 0) { 10887 if (resp->wol_type == 10888 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 10889 bp->wol = 1; 10890 bp->wol_filter_id = resp->wol_filter_id; 10891 } 10892 } 10893 } 10894 hwrm_req_drop(bp, req); 10895 return next_handle; 10896 } 10897 10898 static void bnxt_get_wol_settings(struct bnxt *bp) 10899 { 10900 u16 handle = 0; 10901 10902 bp->wol = 0; 10903 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 10904 return; 10905 10906 do { 10907 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 10908 } while (handle && handle != 0xffff); 10909 } 10910 10911 static bool bnxt_eee_config_ok(struct bnxt *bp) 10912 { 10913 struct ethtool_eee *eee = &bp->eee; 10914 struct bnxt_link_info *link_info = &bp->link_info; 10915 10916 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 10917 return true; 10918 10919 if (eee->eee_enabled) { 10920 u32 advertising = 10921 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 10922 10923 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 10924 eee->eee_enabled = 0; 10925 return false; 10926 } 10927 if (eee->advertised & ~advertising) { 10928 eee->advertised = advertising & eee->supported; 10929 return false; 10930 } 10931 } 10932 return true; 10933 } 10934 10935 static int bnxt_update_phy_setting(struct bnxt *bp) 10936 { 10937 int rc; 10938 bool update_link = false; 10939 bool update_pause = false; 10940 bool update_eee = false; 10941 struct bnxt_link_info *link_info = &bp->link_info; 10942 10943 rc = bnxt_update_link(bp, true); 10944 if (rc) { 10945 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 10946 rc); 10947 return rc; 10948 } 10949 if (!BNXT_SINGLE_PF(bp)) 10950 return 0; 10951 10952 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 10953 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 10954 link_info->req_flow_ctrl) 10955 update_pause = true; 10956 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 10957 link_info->force_pause_setting != link_info->req_flow_ctrl) 10958 update_pause = true; 10959 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 10960 if (BNXT_AUTO_MODE(link_info->auto_mode)) 10961 update_link = true; 10962 if (bnxt_force_speed_updated(link_info)) 10963 update_link = true; 10964 if (link_info->req_duplex != link_info->duplex_setting) 10965 update_link = true; 10966 } else { 10967 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 10968 update_link = true; 10969 if (bnxt_auto_speed_updated(link_info)) 10970 update_link = true; 10971 } 10972 10973 /* The last close may have shutdown the link, so need to call 10974 * PHY_CFG to bring it back up. 10975 */ 10976 if (!BNXT_LINK_IS_UP(bp)) 10977 update_link = true; 10978 10979 if (!bnxt_eee_config_ok(bp)) 10980 update_eee = true; 10981 10982 if (update_link) 10983 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 10984 else if (update_pause) 10985 rc = bnxt_hwrm_set_pause(bp); 10986 if (rc) { 10987 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 10988 rc); 10989 return rc; 10990 } 10991 10992 return rc; 10993 } 10994 10995 /* Common routine to pre-map certain register block to different GRC window. 10996 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 10997 * in PF and 3 windows in VF that can be customized to map in different 10998 * register blocks. 10999 */ 11000 static void bnxt_preset_reg_win(struct bnxt *bp) 11001 { 11002 if (BNXT_PF(bp)) { 11003 /* CAG registers map to GRC window #4 */ 11004 writel(BNXT_CAG_REG_BASE, 11005 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 11006 } 11007 } 11008 11009 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 11010 11011 static int bnxt_reinit_after_abort(struct bnxt *bp) 11012 { 11013 int rc; 11014 11015 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11016 return -EBUSY; 11017 11018 if (bp->dev->reg_state == NETREG_UNREGISTERED) 11019 return -ENODEV; 11020 11021 rc = bnxt_fw_init_one(bp); 11022 if (!rc) { 11023 bnxt_clear_int_mode(bp); 11024 rc = bnxt_init_int_mode(bp); 11025 if (!rc) { 11026 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11027 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 11028 } 11029 } 11030 return rc; 11031 } 11032 11033 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11034 { 11035 int rc = 0; 11036 11037 bnxt_preset_reg_win(bp); 11038 netif_carrier_off(bp->dev); 11039 if (irq_re_init) { 11040 /* Reserve rings now if none were reserved at driver probe. */ 11041 rc = bnxt_init_dflt_ring_mode(bp); 11042 if (rc) { 11043 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 11044 return rc; 11045 } 11046 } 11047 rc = bnxt_reserve_rings(bp, irq_re_init); 11048 if (rc) 11049 return rc; 11050 if ((bp->flags & BNXT_FLAG_RFS) && 11051 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 11052 /* disable RFS if falling back to INTA */ 11053 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 11054 bp->flags &= ~BNXT_FLAG_RFS; 11055 } 11056 11057 rc = bnxt_alloc_mem(bp, irq_re_init); 11058 if (rc) { 11059 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 11060 goto open_err_free_mem; 11061 } 11062 11063 if (irq_re_init) { 11064 bnxt_init_napi(bp); 11065 rc = bnxt_request_irq(bp); 11066 if (rc) { 11067 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 11068 goto open_err_irq; 11069 } 11070 } 11071 11072 rc = bnxt_init_nic(bp, irq_re_init); 11073 if (rc) { 11074 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 11075 goto open_err_irq; 11076 } 11077 11078 bnxt_enable_napi(bp); 11079 bnxt_debug_dev_init(bp); 11080 11081 if (link_re_init) { 11082 mutex_lock(&bp->link_lock); 11083 rc = bnxt_update_phy_setting(bp); 11084 mutex_unlock(&bp->link_lock); 11085 if (rc) { 11086 netdev_warn(bp->dev, "failed to update phy settings\n"); 11087 if (BNXT_SINGLE_PF(bp)) { 11088 bp->link_info.phy_retry = true; 11089 bp->link_info.phy_retry_expires = 11090 jiffies + 5 * HZ; 11091 } 11092 } 11093 } 11094 11095 if (irq_re_init) 11096 udp_tunnel_nic_reset_ntf(bp->dev); 11097 11098 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 11099 if (!static_key_enabled(&bnxt_xdp_locking_key)) 11100 static_branch_enable(&bnxt_xdp_locking_key); 11101 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 11102 static_branch_disable(&bnxt_xdp_locking_key); 11103 } 11104 set_bit(BNXT_STATE_OPEN, &bp->state); 11105 bnxt_enable_int(bp); 11106 /* Enable TX queues */ 11107 bnxt_tx_enable(bp); 11108 mod_timer(&bp->timer, jiffies + bp->current_interval); 11109 /* Poll link status and check for SFP+ module status */ 11110 mutex_lock(&bp->link_lock); 11111 bnxt_get_port_module_status(bp); 11112 mutex_unlock(&bp->link_lock); 11113 11114 /* VF-reps may need to be re-opened after the PF is re-opened */ 11115 if (BNXT_PF(bp)) 11116 bnxt_vf_reps_open(bp); 11117 bnxt_ptp_init_rtc(bp, true); 11118 bnxt_ptp_cfg_tstamp_filters(bp); 11119 return 0; 11120 11121 open_err_irq: 11122 bnxt_del_napi(bp); 11123 11124 open_err_free_mem: 11125 bnxt_free_skbs(bp); 11126 bnxt_free_irq(bp); 11127 bnxt_free_mem(bp, true); 11128 return rc; 11129 } 11130 11131 /* rtnl_lock held */ 11132 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11133 { 11134 int rc = 0; 11135 11136 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 11137 rc = -EIO; 11138 if (!rc) 11139 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 11140 if (rc) { 11141 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 11142 dev_close(bp->dev); 11143 } 11144 return rc; 11145 } 11146 11147 /* rtnl_lock held, open the NIC half way by allocating all resources, but 11148 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 11149 * self tests. 11150 */ 11151 int bnxt_half_open_nic(struct bnxt *bp) 11152 { 11153 int rc = 0; 11154 11155 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 11156 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 11157 rc = -ENODEV; 11158 goto half_open_err; 11159 } 11160 11161 rc = bnxt_alloc_mem(bp, true); 11162 if (rc) { 11163 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 11164 goto half_open_err; 11165 } 11166 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11167 rc = bnxt_init_nic(bp, true); 11168 if (rc) { 11169 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11170 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 11171 goto half_open_err; 11172 } 11173 return 0; 11174 11175 half_open_err: 11176 bnxt_free_skbs(bp); 11177 bnxt_free_mem(bp, true); 11178 dev_close(bp->dev); 11179 return rc; 11180 } 11181 11182 /* rtnl_lock held, this call can only be made after a previous successful 11183 * call to bnxt_half_open_nic(). 11184 */ 11185 void bnxt_half_close_nic(struct bnxt *bp) 11186 { 11187 bnxt_hwrm_resource_free(bp, false, true); 11188 bnxt_free_skbs(bp); 11189 bnxt_free_mem(bp, true); 11190 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11191 } 11192 11193 void bnxt_reenable_sriov(struct bnxt *bp) 11194 { 11195 if (BNXT_PF(bp)) { 11196 struct bnxt_pf_info *pf = &bp->pf; 11197 int n = pf->active_vfs; 11198 11199 if (n) 11200 bnxt_cfg_hw_sriov(bp, &n, true); 11201 } 11202 } 11203 11204 static int bnxt_open(struct net_device *dev) 11205 { 11206 struct bnxt *bp = netdev_priv(dev); 11207 int rc; 11208 11209 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 11210 rc = bnxt_reinit_after_abort(bp); 11211 if (rc) { 11212 if (rc == -EBUSY) 11213 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 11214 else 11215 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 11216 return -ENODEV; 11217 } 11218 } 11219 11220 rc = bnxt_hwrm_if_change(bp, true); 11221 if (rc) 11222 return rc; 11223 11224 rc = __bnxt_open_nic(bp, true, true); 11225 if (rc) { 11226 bnxt_hwrm_if_change(bp, false); 11227 } else { 11228 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 11229 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11230 bnxt_ulp_start(bp, 0); 11231 bnxt_reenable_sriov(bp); 11232 } 11233 } 11234 } 11235 11236 return rc; 11237 } 11238 11239 static bool bnxt_drv_busy(struct bnxt *bp) 11240 { 11241 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 11242 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 11243 } 11244 11245 static void bnxt_get_ring_stats(struct bnxt *bp, 11246 struct rtnl_link_stats64 *stats); 11247 11248 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 11249 bool link_re_init) 11250 { 11251 /* Close the VF-reps before closing PF */ 11252 if (BNXT_PF(bp)) 11253 bnxt_vf_reps_close(bp); 11254 11255 /* Change device state to avoid TX queue wake up's */ 11256 bnxt_tx_disable(bp); 11257 11258 clear_bit(BNXT_STATE_OPEN, &bp->state); 11259 smp_mb__after_atomic(); 11260 while (bnxt_drv_busy(bp)) 11261 msleep(20); 11262 11263 /* Flush rings and disable interrupts */ 11264 bnxt_shutdown_nic(bp, irq_re_init); 11265 11266 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 11267 11268 bnxt_debug_dev_exit(bp); 11269 bnxt_disable_napi(bp); 11270 del_timer_sync(&bp->timer); 11271 bnxt_free_skbs(bp); 11272 11273 /* Save ring stats before shutdown */ 11274 if (bp->bnapi && irq_re_init) { 11275 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 11276 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 11277 } 11278 if (irq_re_init) { 11279 bnxt_free_irq(bp); 11280 bnxt_del_napi(bp); 11281 } 11282 bnxt_free_mem(bp, irq_re_init); 11283 } 11284 11285 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11286 { 11287 int rc = 0; 11288 11289 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11290 /* If we get here, it means firmware reset is in progress 11291 * while we are trying to close. We can safely proceed with 11292 * the close because we are holding rtnl_lock(). Some firmware 11293 * messages may fail as we proceed to close. We set the 11294 * ABORT_ERR flag here so that the FW reset thread will later 11295 * abort when it gets the rtnl_lock() and sees the flag. 11296 */ 11297 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 11298 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11299 } 11300 11301 #ifdef CONFIG_BNXT_SRIOV 11302 if (bp->sriov_cfg) { 11303 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 11304 !bp->sriov_cfg, 11305 BNXT_SRIOV_CFG_WAIT_TMO); 11306 if (rc) 11307 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 11308 } 11309 #endif 11310 __bnxt_close_nic(bp, irq_re_init, link_re_init); 11311 return rc; 11312 } 11313 11314 static int bnxt_close(struct net_device *dev) 11315 { 11316 struct bnxt *bp = netdev_priv(dev); 11317 11318 bnxt_close_nic(bp, true, true); 11319 bnxt_hwrm_shutdown_link(bp); 11320 bnxt_hwrm_if_change(bp, false); 11321 return 0; 11322 } 11323 11324 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 11325 u16 *val) 11326 { 11327 struct hwrm_port_phy_mdio_read_output *resp; 11328 struct hwrm_port_phy_mdio_read_input *req; 11329 int rc; 11330 11331 if (bp->hwrm_spec_code < 0x10a00) 11332 return -EOPNOTSUPP; 11333 11334 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 11335 if (rc) 11336 return rc; 11337 11338 req->port_id = cpu_to_le16(bp->pf.port_id); 11339 req->phy_addr = phy_addr; 11340 req->reg_addr = cpu_to_le16(reg & 0x1f); 11341 if (mdio_phy_id_is_c45(phy_addr)) { 11342 req->cl45_mdio = 1; 11343 req->phy_addr = mdio_phy_id_prtad(phy_addr); 11344 req->dev_addr = mdio_phy_id_devad(phy_addr); 11345 req->reg_addr = cpu_to_le16(reg); 11346 } 11347 11348 resp = hwrm_req_hold(bp, req); 11349 rc = hwrm_req_send(bp, req); 11350 if (!rc) 11351 *val = le16_to_cpu(resp->reg_data); 11352 hwrm_req_drop(bp, req); 11353 return rc; 11354 } 11355 11356 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 11357 u16 val) 11358 { 11359 struct hwrm_port_phy_mdio_write_input *req; 11360 int rc; 11361 11362 if (bp->hwrm_spec_code < 0x10a00) 11363 return -EOPNOTSUPP; 11364 11365 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 11366 if (rc) 11367 return rc; 11368 11369 req->port_id = cpu_to_le16(bp->pf.port_id); 11370 req->phy_addr = phy_addr; 11371 req->reg_addr = cpu_to_le16(reg & 0x1f); 11372 if (mdio_phy_id_is_c45(phy_addr)) { 11373 req->cl45_mdio = 1; 11374 req->phy_addr = mdio_phy_id_prtad(phy_addr); 11375 req->dev_addr = mdio_phy_id_devad(phy_addr); 11376 req->reg_addr = cpu_to_le16(reg); 11377 } 11378 req->reg_data = cpu_to_le16(val); 11379 11380 return hwrm_req_send(bp, req); 11381 } 11382 11383 /* rtnl_lock held */ 11384 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 11385 { 11386 struct mii_ioctl_data *mdio = if_mii(ifr); 11387 struct bnxt *bp = netdev_priv(dev); 11388 int rc; 11389 11390 switch (cmd) { 11391 case SIOCGMIIPHY: 11392 mdio->phy_id = bp->link_info.phy_addr; 11393 11394 fallthrough; 11395 case SIOCGMIIREG: { 11396 u16 mii_regval = 0; 11397 11398 if (!netif_running(dev)) 11399 return -EAGAIN; 11400 11401 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 11402 &mii_regval); 11403 mdio->val_out = mii_regval; 11404 return rc; 11405 } 11406 11407 case SIOCSMIIREG: 11408 if (!netif_running(dev)) 11409 return -EAGAIN; 11410 11411 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 11412 mdio->val_in); 11413 11414 case SIOCSHWTSTAMP: 11415 return bnxt_hwtstamp_set(dev, ifr); 11416 11417 case SIOCGHWTSTAMP: 11418 return bnxt_hwtstamp_get(dev, ifr); 11419 11420 default: 11421 /* do nothing */ 11422 break; 11423 } 11424 return -EOPNOTSUPP; 11425 } 11426 11427 static void bnxt_get_ring_stats(struct bnxt *bp, 11428 struct rtnl_link_stats64 *stats) 11429 { 11430 int i; 11431 11432 for (i = 0; i < bp->cp_nr_rings; i++) { 11433 struct bnxt_napi *bnapi = bp->bnapi[i]; 11434 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 11435 u64 *sw = cpr->stats.sw_stats; 11436 11437 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 11438 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 11439 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 11440 11441 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 11442 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 11443 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 11444 11445 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 11446 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 11447 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 11448 11449 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 11450 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 11451 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 11452 11453 stats->rx_missed_errors += 11454 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 11455 11456 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 11457 11458 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 11459 11460 stats->rx_dropped += 11461 cpr->sw_stats.rx.rx_netpoll_discards + 11462 cpr->sw_stats.rx.rx_oom_discards; 11463 } 11464 } 11465 11466 static void bnxt_add_prev_stats(struct bnxt *bp, 11467 struct rtnl_link_stats64 *stats) 11468 { 11469 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 11470 11471 stats->rx_packets += prev_stats->rx_packets; 11472 stats->tx_packets += prev_stats->tx_packets; 11473 stats->rx_bytes += prev_stats->rx_bytes; 11474 stats->tx_bytes += prev_stats->tx_bytes; 11475 stats->rx_missed_errors += prev_stats->rx_missed_errors; 11476 stats->multicast += prev_stats->multicast; 11477 stats->rx_dropped += prev_stats->rx_dropped; 11478 stats->tx_dropped += prev_stats->tx_dropped; 11479 } 11480 11481 static void 11482 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 11483 { 11484 struct bnxt *bp = netdev_priv(dev); 11485 11486 set_bit(BNXT_STATE_READ_STATS, &bp->state); 11487 /* Make sure bnxt_close_nic() sees that we are reading stats before 11488 * we check the BNXT_STATE_OPEN flag. 11489 */ 11490 smp_mb__after_atomic(); 11491 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11492 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 11493 *stats = bp->net_stats_prev; 11494 return; 11495 } 11496 11497 bnxt_get_ring_stats(bp, stats); 11498 bnxt_add_prev_stats(bp, stats); 11499 11500 if (bp->flags & BNXT_FLAG_PORT_STATS) { 11501 u64 *rx = bp->port_stats.sw_stats; 11502 u64 *tx = bp->port_stats.sw_stats + 11503 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 11504 11505 stats->rx_crc_errors = 11506 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 11507 stats->rx_frame_errors = 11508 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 11509 stats->rx_length_errors = 11510 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 11511 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 11512 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 11513 stats->rx_errors = 11514 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 11515 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 11516 stats->collisions = 11517 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 11518 stats->tx_fifo_errors = 11519 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 11520 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 11521 } 11522 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 11523 } 11524 11525 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 11526 struct bnxt_total_ring_err_stats *stats, 11527 struct bnxt_cp_ring_info *cpr) 11528 { 11529 struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; 11530 u64 *hw_stats = cpr->stats.sw_stats; 11531 11532 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 11533 stats->rx_total_resets += sw_stats->rx.rx_resets; 11534 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 11535 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 11536 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 11537 stats->rx_total_ring_discards += 11538 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 11539 stats->tx_total_resets += sw_stats->tx.tx_resets; 11540 stats->tx_total_ring_discards += 11541 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 11542 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 11543 } 11544 11545 void bnxt_get_ring_err_stats(struct bnxt *bp, 11546 struct bnxt_total_ring_err_stats *stats) 11547 { 11548 int i; 11549 11550 for (i = 0; i < bp->cp_nr_rings; i++) 11551 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 11552 } 11553 11554 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 11555 { 11556 struct net_device *dev = bp->dev; 11557 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11558 struct netdev_hw_addr *ha; 11559 u8 *haddr; 11560 int mc_count = 0; 11561 bool update = false; 11562 int off = 0; 11563 11564 netdev_for_each_mc_addr(ha, dev) { 11565 if (mc_count >= BNXT_MAX_MC_ADDRS) { 11566 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11567 vnic->mc_list_count = 0; 11568 return false; 11569 } 11570 haddr = ha->addr; 11571 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 11572 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 11573 update = true; 11574 } 11575 off += ETH_ALEN; 11576 mc_count++; 11577 } 11578 if (mc_count) 11579 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 11580 11581 if (mc_count != vnic->mc_list_count) { 11582 vnic->mc_list_count = mc_count; 11583 update = true; 11584 } 11585 return update; 11586 } 11587 11588 static bool bnxt_uc_list_updated(struct bnxt *bp) 11589 { 11590 struct net_device *dev = bp->dev; 11591 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11592 struct netdev_hw_addr *ha; 11593 int off = 0; 11594 11595 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 11596 return true; 11597 11598 netdev_for_each_uc_addr(ha, dev) { 11599 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 11600 return true; 11601 11602 off += ETH_ALEN; 11603 } 11604 return false; 11605 } 11606 11607 static void bnxt_set_rx_mode(struct net_device *dev) 11608 { 11609 struct bnxt *bp = netdev_priv(dev); 11610 struct bnxt_vnic_info *vnic; 11611 bool mc_update = false; 11612 bool uc_update; 11613 u32 mask; 11614 11615 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 11616 return; 11617 11618 vnic = &bp->vnic_info[0]; 11619 mask = vnic->rx_mask; 11620 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 11621 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 11622 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 11623 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 11624 11625 if (dev->flags & IFF_PROMISC) 11626 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 11627 11628 uc_update = bnxt_uc_list_updated(bp); 11629 11630 if (dev->flags & IFF_BROADCAST) 11631 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 11632 if (dev->flags & IFF_ALLMULTI) { 11633 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11634 vnic->mc_list_count = 0; 11635 } else if (dev->flags & IFF_MULTICAST) { 11636 mc_update = bnxt_mc_list_updated(bp, &mask); 11637 } 11638 11639 if (mask != vnic->rx_mask || uc_update || mc_update) { 11640 vnic->rx_mask = mask; 11641 11642 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 11643 } 11644 } 11645 11646 static int bnxt_cfg_rx_mode(struct bnxt *bp) 11647 { 11648 struct net_device *dev = bp->dev; 11649 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11650 struct hwrm_cfa_l2_filter_free_input *req; 11651 struct netdev_hw_addr *ha; 11652 int i, off = 0, rc; 11653 bool uc_update; 11654 11655 netif_addr_lock_bh(dev); 11656 uc_update = bnxt_uc_list_updated(bp); 11657 netif_addr_unlock_bh(dev); 11658 11659 if (!uc_update) 11660 goto skip_uc; 11661 11662 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 11663 if (rc) 11664 return rc; 11665 hwrm_req_hold(bp, req); 11666 for (i = 1; i < vnic->uc_filter_count; i++) { 11667 req->l2_filter_id = vnic->fw_l2_filter_id[i]; 11668 11669 rc = hwrm_req_send(bp, req); 11670 } 11671 hwrm_req_drop(bp, req); 11672 11673 vnic->uc_filter_count = 1; 11674 11675 netif_addr_lock_bh(dev); 11676 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 11677 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 11678 } else { 11679 netdev_for_each_uc_addr(ha, dev) { 11680 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 11681 off += ETH_ALEN; 11682 vnic->uc_filter_count++; 11683 } 11684 } 11685 netif_addr_unlock_bh(dev); 11686 11687 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 11688 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 11689 if (rc) { 11690 if (BNXT_VF(bp) && rc == -ENODEV) { 11691 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 11692 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 11693 else 11694 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 11695 rc = 0; 11696 } else { 11697 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 11698 } 11699 vnic->uc_filter_count = i; 11700 return rc; 11701 } 11702 } 11703 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 11704 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 11705 11706 skip_uc: 11707 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 11708 !bnxt_promisc_ok(bp)) 11709 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 11710 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 11711 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 11712 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 11713 rc); 11714 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 11715 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11716 vnic->mc_list_count = 0; 11717 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 11718 } 11719 if (rc) 11720 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 11721 rc); 11722 11723 return rc; 11724 } 11725 11726 static bool bnxt_can_reserve_rings(struct bnxt *bp) 11727 { 11728 #ifdef CONFIG_BNXT_SRIOV 11729 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 11730 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11731 11732 /* No minimum rings were provisioned by the PF. Don't 11733 * reserve rings by default when device is down. 11734 */ 11735 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 11736 return true; 11737 11738 if (!netif_running(bp->dev)) 11739 return false; 11740 } 11741 #endif 11742 return true; 11743 } 11744 11745 /* If the chip and firmware supports RFS */ 11746 static bool bnxt_rfs_supported(struct bnxt *bp) 11747 { 11748 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 11749 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 11750 return true; 11751 return false; 11752 } 11753 /* 212 firmware is broken for aRFS */ 11754 if (BNXT_FW_MAJ(bp) == 212) 11755 return false; 11756 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 11757 return true; 11758 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 11759 return true; 11760 return false; 11761 } 11762 11763 /* If runtime conditions support RFS */ 11764 static bool bnxt_rfs_capable(struct bnxt *bp) 11765 { 11766 #ifdef CONFIG_RFS_ACCEL 11767 int vnics, max_vnics, max_rss_ctxs; 11768 11769 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11770 return bnxt_rfs_supported(bp); 11771 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 11772 return false; 11773 11774 vnics = 1 + bp->rx_nr_rings; 11775 max_vnics = bnxt_get_max_func_vnics(bp); 11776 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 11777 11778 /* RSS contexts not a limiting factor */ 11779 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 11780 max_rss_ctxs = max_vnics; 11781 if (vnics > max_vnics || vnics > max_rss_ctxs) { 11782 if (bp->rx_nr_rings > 1) 11783 netdev_warn(bp->dev, 11784 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 11785 min(max_rss_ctxs - 1, max_vnics - 1)); 11786 return false; 11787 } 11788 11789 if (!BNXT_NEW_RM(bp)) 11790 return true; 11791 11792 if (vnics == bp->hw_resc.resv_vnics) 11793 return true; 11794 11795 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 11796 if (vnics <= bp->hw_resc.resv_vnics) 11797 return true; 11798 11799 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 11800 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 11801 return false; 11802 #else 11803 return false; 11804 #endif 11805 } 11806 11807 static netdev_features_t bnxt_fix_features(struct net_device *dev, 11808 netdev_features_t features) 11809 { 11810 struct bnxt *bp = netdev_priv(dev); 11811 netdev_features_t vlan_features; 11812 11813 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 11814 features &= ~NETIF_F_NTUPLE; 11815 11816 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 11817 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11818 11819 if (!(features & NETIF_F_GRO)) 11820 features &= ~NETIF_F_GRO_HW; 11821 11822 if (features & NETIF_F_GRO_HW) 11823 features &= ~NETIF_F_LRO; 11824 11825 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 11826 * turned on or off together. 11827 */ 11828 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 11829 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 11830 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 11831 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 11832 else if (vlan_features) 11833 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 11834 } 11835 #ifdef CONFIG_BNXT_SRIOV 11836 if (BNXT_VF(bp) && bp->vf.vlan) 11837 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 11838 #endif 11839 return features; 11840 } 11841 11842 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 11843 { 11844 struct bnxt *bp = netdev_priv(dev); 11845 u32 flags = bp->flags; 11846 u32 changes; 11847 int rc = 0; 11848 bool re_init = false; 11849 bool update_tpa = false; 11850 11851 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 11852 if (features & NETIF_F_GRO_HW) 11853 flags |= BNXT_FLAG_GRO; 11854 else if (features & NETIF_F_LRO) 11855 flags |= BNXT_FLAG_LRO; 11856 11857 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 11858 flags &= ~BNXT_FLAG_TPA; 11859 11860 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 11861 flags |= BNXT_FLAG_STRIP_VLAN; 11862 11863 if (features & NETIF_F_NTUPLE) 11864 flags |= BNXT_FLAG_RFS; 11865 11866 changes = flags ^ bp->flags; 11867 if (changes & BNXT_FLAG_TPA) { 11868 update_tpa = true; 11869 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 11870 (flags & BNXT_FLAG_TPA) == 0 || 11871 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 11872 re_init = true; 11873 } 11874 11875 if (changes & ~BNXT_FLAG_TPA) 11876 re_init = true; 11877 11878 if (flags != bp->flags) { 11879 u32 old_flags = bp->flags; 11880 11881 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11882 bp->flags = flags; 11883 if (update_tpa) 11884 bnxt_set_ring_params(bp); 11885 return rc; 11886 } 11887 11888 if (re_init) { 11889 bnxt_close_nic(bp, false, false); 11890 bp->flags = flags; 11891 if (update_tpa) 11892 bnxt_set_ring_params(bp); 11893 11894 return bnxt_open_nic(bp, false, false); 11895 } 11896 if (update_tpa) { 11897 bp->flags = flags; 11898 rc = bnxt_set_tpa(bp, 11899 (flags & BNXT_FLAG_TPA) ? 11900 true : false); 11901 if (rc) 11902 bp->flags = old_flags; 11903 } 11904 } 11905 return rc; 11906 } 11907 11908 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 11909 u8 **nextp) 11910 { 11911 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 11912 struct hop_jumbo_hdr *jhdr; 11913 int hdr_count = 0; 11914 u8 *nexthdr; 11915 int start; 11916 11917 /* Check that there are at most 2 IPv6 extension headers, no 11918 * fragment header, and each is <= 64 bytes. 11919 */ 11920 start = nw_off + sizeof(*ip6h); 11921 nexthdr = &ip6h->nexthdr; 11922 while (ipv6_ext_hdr(*nexthdr)) { 11923 struct ipv6_opt_hdr *hp; 11924 int hdrlen; 11925 11926 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 11927 *nexthdr == NEXTHDR_FRAGMENT) 11928 return false; 11929 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 11930 skb_headlen(skb), NULL); 11931 if (!hp) 11932 return false; 11933 if (*nexthdr == NEXTHDR_AUTH) 11934 hdrlen = ipv6_authlen(hp); 11935 else 11936 hdrlen = ipv6_optlen(hp); 11937 11938 if (hdrlen > 64) 11939 return false; 11940 11941 /* The ext header may be a hop-by-hop header inserted for 11942 * big TCP purposes. This will be removed before sending 11943 * from NIC, so do not count it. 11944 */ 11945 if (*nexthdr == NEXTHDR_HOP) { 11946 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 11947 goto increment_hdr; 11948 11949 jhdr = (struct hop_jumbo_hdr *)hp; 11950 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 11951 jhdr->nexthdr != IPPROTO_TCP) 11952 goto increment_hdr; 11953 11954 goto next_hdr; 11955 } 11956 increment_hdr: 11957 hdr_count++; 11958 next_hdr: 11959 nexthdr = &hp->nexthdr; 11960 start += hdrlen; 11961 } 11962 if (nextp) { 11963 /* Caller will check inner protocol */ 11964 if (skb->encapsulation) { 11965 *nextp = nexthdr; 11966 return true; 11967 } 11968 *nextp = NULL; 11969 } 11970 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 11971 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 11972 } 11973 11974 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 11975 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 11976 { 11977 struct udphdr *uh = udp_hdr(skb); 11978 __be16 udp_port = uh->dest; 11979 11980 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port) 11981 return false; 11982 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) { 11983 struct ethhdr *eh = inner_eth_hdr(skb); 11984 11985 switch (eh->h_proto) { 11986 case htons(ETH_P_IP): 11987 return true; 11988 case htons(ETH_P_IPV6): 11989 return bnxt_exthdr_check(bp, skb, 11990 skb_inner_network_offset(skb), 11991 NULL); 11992 } 11993 } 11994 return false; 11995 } 11996 11997 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 11998 { 11999 switch (l4_proto) { 12000 case IPPROTO_UDP: 12001 return bnxt_udp_tunl_check(bp, skb); 12002 case IPPROTO_IPIP: 12003 return true; 12004 case IPPROTO_GRE: { 12005 switch (skb->inner_protocol) { 12006 default: 12007 return false; 12008 case htons(ETH_P_IP): 12009 return true; 12010 case htons(ETH_P_IPV6): 12011 fallthrough; 12012 } 12013 } 12014 case IPPROTO_IPV6: 12015 /* Check ext headers of inner ipv6 */ 12016 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 12017 NULL); 12018 } 12019 return false; 12020 } 12021 12022 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 12023 struct net_device *dev, 12024 netdev_features_t features) 12025 { 12026 struct bnxt *bp = netdev_priv(dev); 12027 u8 *l4_proto; 12028 12029 features = vlan_features_check(skb, features); 12030 switch (vlan_get_protocol(skb)) { 12031 case htons(ETH_P_IP): 12032 if (!skb->encapsulation) 12033 return features; 12034 l4_proto = &ip_hdr(skb)->protocol; 12035 if (bnxt_tunl_check(bp, skb, *l4_proto)) 12036 return features; 12037 break; 12038 case htons(ETH_P_IPV6): 12039 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 12040 &l4_proto)) 12041 break; 12042 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 12043 return features; 12044 break; 12045 } 12046 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 12047 } 12048 12049 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 12050 u32 *reg_buf) 12051 { 12052 struct hwrm_dbg_read_direct_output *resp; 12053 struct hwrm_dbg_read_direct_input *req; 12054 __le32 *dbg_reg_buf; 12055 dma_addr_t mapping; 12056 int rc, i; 12057 12058 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 12059 if (rc) 12060 return rc; 12061 12062 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 12063 &mapping); 12064 if (!dbg_reg_buf) { 12065 rc = -ENOMEM; 12066 goto dbg_rd_reg_exit; 12067 } 12068 12069 req->host_dest_addr = cpu_to_le64(mapping); 12070 12071 resp = hwrm_req_hold(bp, req); 12072 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 12073 req->read_len32 = cpu_to_le32(num_words); 12074 12075 rc = hwrm_req_send(bp, req); 12076 if (rc || resp->error_code) { 12077 rc = -EIO; 12078 goto dbg_rd_reg_exit; 12079 } 12080 for (i = 0; i < num_words; i++) 12081 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 12082 12083 dbg_rd_reg_exit: 12084 hwrm_req_drop(bp, req); 12085 return rc; 12086 } 12087 12088 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 12089 u32 ring_id, u32 *prod, u32 *cons) 12090 { 12091 struct hwrm_dbg_ring_info_get_output *resp; 12092 struct hwrm_dbg_ring_info_get_input *req; 12093 int rc; 12094 12095 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 12096 if (rc) 12097 return rc; 12098 12099 req->ring_type = ring_type; 12100 req->fw_ring_id = cpu_to_le32(ring_id); 12101 resp = hwrm_req_hold(bp, req); 12102 rc = hwrm_req_send(bp, req); 12103 if (!rc) { 12104 *prod = le32_to_cpu(resp->producer_index); 12105 *cons = le32_to_cpu(resp->consumer_index); 12106 } 12107 hwrm_req_drop(bp, req); 12108 return rc; 12109 } 12110 12111 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 12112 { 12113 struct bnxt_tx_ring_info *txr; 12114 int i = bnapi->index, j; 12115 12116 bnxt_for_each_napi_tx(j, bnapi, txr) 12117 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 12118 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 12119 txr->tx_cons); 12120 } 12121 12122 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 12123 { 12124 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 12125 int i = bnapi->index; 12126 12127 if (!rxr) 12128 return; 12129 12130 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 12131 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 12132 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 12133 rxr->rx_sw_agg_prod); 12134 } 12135 12136 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 12137 { 12138 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 12139 int i = bnapi->index; 12140 12141 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 12142 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 12143 } 12144 12145 static void bnxt_dbg_dump_states(struct bnxt *bp) 12146 { 12147 int i; 12148 struct bnxt_napi *bnapi; 12149 12150 for (i = 0; i < bp->cp_nr_rings; i++) { 12151 bnapi = bp->bnapi[i]; 12152 if (netif_msg_drv(bp)) { 12153 bnxt_dump_tx_sw_state(bnapi); 12154 bnxt_dump_rx_sw_state(bnapi); 12155 bnxt_dump_cp_sw_state(bnapi); 12156 } 12157 } 12158 } 12159 12160 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 12161 { 12162 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 12163 struct hwrm_ring_reset_input *req; 12164 struct bnxt_napi *bnapi = rxr->bnapi; 12165 struct bnxt_cp_ring_info *cpr; 12166 u16 cp_ring_id; 12167 int rc; 12168 12169 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 12170 if (rc) 12171 return rc; 12172 12173 cpr = &bnapi->cp_ring; 12174 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 12175 req->cmpl_ring = cpu_to_le16(cp_ring_id); 12176 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 12177 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 12178 return hwrm_req_send_silent(bp, req); 12179 } 12180 12181 static void bnxt_reset_task(struct bnxt *bp, bool silent) 12182 { 12183 if (!silent) 12184 bnxt_dbg_dump_states(bp); 12185 if (netif_running(bp->dev)) { 12186 int rc; 12187 12188 if (silent) { 12189 bnxt_close_nic(bp, false, false); 12190 bnxt_open_nic(bp, false, false); 12191 } else { 12192 bnxt_ulp_stop(bp); 12193 bnxt_close_nic(bp, true, false); 12194 rc = bnxt_open_nic(bp, true, false); 12195 bnxt_ulp_start(bp, rc); 12196 } 12197 } 12198 } 12199 12200 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 12201 { 12202 struct bnxt *bp = netdev_priv(dev); 12203 12204 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 12205 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 12206 } 12207 12208 static void bnxt_fw_health_check(struct bnxt *bp) 12209 { 12210 struct bnxt_fw_health *fw_health = bp->fw_health; 12211 struct pci_dev *pdev = bp->pdev; 12212 u32 val; 12213 12214 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12215 return; 12216 12217 /* Make sure it is enabled before checking the tmr_counter. */ 12218 smp_rmb(); 12219 if (fw_health->tmr_counter) { 12220 fw_health->tmr_counter--; 12221 return; 12222 } 12223 12224 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 12225 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 12226 fw_health->arrests++; 12227 goto fw_reset; 12228 } 12229 12230 fw_health->last_fw_heartbeat = val; 12231 12232 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12233 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 12234 fw_health->discoveries++; 12235 goto fw_reset; 12236 } 12237 12238 fw_health->tmr_counter = fw_health->tmr_multiplier; 12239 return; 12240 12241 fw_reset: 12242 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 12243 } 12244 12245 static void bnxt_timer(struct timer_list *t) 12246 { 12247 struct bnxt *bp = from_timer(bp, t, timer); 12248 struct net_device *dev = bp->dev; 12249 12250 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 12251 return; 12252 12253 if (atomic_read(&bp->intr_sem) != 0) 12254 goto bnxt_restart_timer; 12255 12256 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 12257 bnxt_fw_health_check(bp); 12258 12259 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 12260 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 12261 12262 if (bnxt_tc_flower_enabled(bp)) 12263 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 12264 12265 #ifdef CONFIG_RFS_ACCEL 12266 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 12267 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 12268 #endif /*CONFIG_RFS_ACCEL*/ 12269 12270 if (bp->link_info.phy_retry) { 12271 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 12272 bp->link_info.phy_retry = false; 12273 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 12274 } else { 12275 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 12276 } 12277 } 12278 12279 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12280 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 12281 12282 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 12283 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 12284 12285 bnxt_restart_timer: 12286 mod_timer(&bp->timer, jiffies + bp->current_interval); 12287 } 12288 12289 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 12290 { 12291 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 12292 * set. If the device is being closed, bnxt_close() may be holding 12293 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 12294 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 12295 */ 12296 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12297 rtnl_lock(); 12298 } 12299 12300 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 12301 { 12302 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12303 rtnl_unlock(); 12304 } 12305 12306 /* Only called from bnxt_sp_task() */ 12307 static void bnxt_reset(struct bnxt *bp, bool silent) 12308 { 12309 bnxt_rtnl_lock_sp(bp); 12310 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 12311 bnxt_reset_task(bp, silent); 12312 bnxt_rtnl_unlock_sp(bp); 12313 } 12314 12315 /* Only called from bnxt_sp_task() */ 12316 static void bnxt_rx_ring_reset(struct bnxt *bp) 12317 { 12318 int i; 12319 12320 bnxt_rtnl_lock_sp(bp); 12321 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12322 bnxt_rtnl_unlock_sp(bp); 12323 return; 12324 } 12325 /* Disable and flush TPA before resetting the RX ring */ 12326 if (bp->flags & BNXT_FLAG_TPA) 12327 bnxt_set_tpa(bp, false); 12328 for (i = 0; i < bp->rx_nr_rings; i++) { 12329 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 12330 struct bnxt_cp_ring_info *cpr; 12331 int rc; 12332 12333 if (!rxr->bnapi->in_reset) 12334 continue; 12335 12336 rc = bnxt_hwrm_rx_ring_reset(bp, i); 12337 if (rc) { 12338 if (rc == -EINVAL || rc == -EOPNOTSUPP) 12339 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 12340 else 12341 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 12342 rc); 12343 bnxt_reset_task(bp, true); 12344 break; 12345 } 12346 bnxt_free_one_rx_ring_skbs(bp, i); 12347 rxr->rx_prod = 0; 12348 rxr->rx_agg_prod = 0; 12349 rxr->rx_sw_agg_prod = 0; 12350 rxr->rx_next_cons = 0; 12351 rxr->bnapi->in_reset = false; 12352 bnxt_alloc_one_rx_ring(bp, i); 12353 cpr = &rxr->bnapi->cp_ring; 12354 cpr->sw_stats.rx.rx_resets++; 12355 if (bp->flags & BNXT_FLAG_AGG_RINGS) 12356 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 12357 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 12358 } 12359 if (bp->flags & BNXT_FLAG_TPA) 12360 bnxt_set_tpa(bp, true); 12361 bnxt_rtnl_unlock_sp(bp); 12362 } 12363 12364 static void bnxt_fw_reset_close(struct bnxt *bp) 12365 { 12366 bnxt_ulp_stop(bp); 12367 /* When firmware is in fatal state, quiesce device and disable 12368 * bus master to prevent any potential bad DMAs before freeing 12369 * kernel memory. 12370 */ 12371 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 12372 u16 val = 0; 12373 12374 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 12375 if (val == 0xffff) 12376 bp->fw_reset_min_dsecs = 0; 12377 bnxt_tx_disable(bp); 12378 bnxt_disable_napi(bp); 12379 bnxt_disable_int_sync(bp); 12380 bnxt_free_irq(bp); 12381 bnxt_clear_int_mode(bp); 12382 pci_disable_device(bp->pdev); 12383 } 12384 __bnxt_close_nic(bp, true, false); 12385 bnxt_vf_reps_free(bp); 12386 bnxt_clear_int_mode(bp); 12387 bnxt_hwrm_func_drv_unrgtr(bp); 12388 if (pci_is_enabled(bp->pdev)) 12389 pci_disable_device(bp->pdev); 12390 bnxt_free_ctx_mem(bp); 12391 } 12392 12393 static bool is_bnxt_fw_ok(struct bnxt *bp) 12394 { 12395 struct bnxt_fw_health *fw_health = bp->fw_health; 12396 bool no_heartbeat = false, has_reset = false; 12397 u32 val; 12398 12399 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 12400 if (val == fw_health->last_fw_heartbeat) 12401 no_heartbeat = true; 12402 12403 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12404 if (val != fw_health->last_fw_reset_cnt) 12405 has_reset = true; 12406 12407 if (!no_heartbeat && has_reset) 12408 return true; 12409 12410 return false; 12411 } 12412 12413 /* rtnl_lock is acquired before calling this function */ 12414 static void bnxt_force_fw_reset(struct bnxt *bp) 12415 { 12416 struct bnxt_fw_health *fw_health = bp->fw_health; 12417 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 12418 u32 wait_dsecs; 12419 12420 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 12421 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12422 return; 12423 12424 if (ptp) { 12425 spin_lock_bh(&ptp->ptp_lock); 12426 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12427 spin_unlock_bh(&ptp->ptp_lock); 12428 } else { 12429 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12430 } 12431 bnxt_fw_reset_close(bp); 12432 wait_dsecs = fw_health->master_func_wait_dsecs; 12433 if (fw_health->primary) { 12434 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 12435 wait_dsecs = 0; 12436 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 12437 } else { 12438 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 12439 wait_dsecs = fw_health->normal_func_wait_dsecs; 12440 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12441 } 12442 12443 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 12444 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 12445 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 12446 } 12447 12448 void bnxt_fw_exception(struct bnxt *bp) 12449 { 12450 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 12451 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 12452 bnxt_rtnl_lock_sp(bp); 12453 bnxt_force_fw_reset(bp); 12454 bnxt_rtnl_unlock_sp(bp); 12455 } 12456 12457 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 12458 * < 0 on error. 12459 */ 12460 static int bnxt_get_registered_vfs(struct bnxt *bp) 12461 { 12462 #ifdef CONFIG_BNXT_SRIOV 12463 int rc; 12464 12465 if (!BNXT_PF(bp)) 12466 return 0; 12467 12468 rc = bnxt_hwrm_func_qcfg(bp); 12469 if (rc) { 12470 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 12471 return rc; 12472 } 12473 if (bp->pf.registered_vfs) 12474 return bp->pf.registered_vfs; 12475 if (bp->sriov_cfg) 12476 return 1; 12477 #endif 12478 return 0; 12479 } 12480 12481 void bnxt_fw_reset(struct bnxt *bp) 12482 { 12483 bnxt_rtnl_lock_sp(bp); 12484 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 12485 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12486 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 12487 int n = 0, tmo; 12488 12489 if (ptp) { 12490 spin_lock_bh(&ptp->ptp_lock); 12491 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12492 spin_unlock_bh(&ptp->ptp_lock); 12493 } else { 12494 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12495 } 12496 if (bp->pf.active_vfs && 12497 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 12498 n = bnxt_get_registered_vfs(bp); 12499 if (n < 0) { 12500 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 12501 n); 12502 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12503 dev_close(bp->dev); 12504 goto fw_reset_exit; 12505 } else if (n > 0) { 12506 u16 vf_tmo_dsecs = n * 10; 12507 12508 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 12509 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 12510 bp->fw_reset_state = 12511 BNXT_FW_RESET_STATE_POLL_VF; 12512 bnxt_queue_fw_reset_work(bp, HZ / 10); 12513 goto fw_reset_exit; 12514 } 12515 bnxt_fw_reset_close(bp); 12516 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 12517 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 12518 tmo = HZ / 10; 12519 } else { 12520 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12521 tmo = bp->fw_reset_min_dsecs * HZ / 10; 12522 } 12523 bnxt_queue_fw_reset_work(bp, tmo); 12524 } 12525 fw_reset_exit: 12526 bnxt_rtnl_unlock_sp(bp); 12527 } 12528 12529 static void bnxt_chk_missed_irq(struct bnxt *bp) 12530 { 12531 int i; 12532 12533 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 12534 return; 12535 12536 for (i = 0; i < bp->cp_nr_rings; i++) { 12537 struct bnxt_napi *bnapi = bp->bnapi[i]; 12538 struct bnxt_cp_ring_info *cpr; 12539 u32 fw_ring_id; 12540 int j; 12541 12542 if (!bnapi) 12543 continue; 12544 12545 cpr = &bnapi->cp_ring; 12546 for (j = 0; j < cpr->cp_ring_count; j++) { 12547 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 12548 u32 val[2]; 12549 12550 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 12551 continue; 12552 12553 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 12554 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 12555 continue; 12556 } 12557 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 12558 bnxt_dbg_hwrm_ring_info_get(bp, 12559 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 12560 fw_ring_id, &val[0], &val[1]); 12561 cpr->sw_stats.cmn.missed_irqs++; 12562 } 12563 } 12564 } 12565 12566 static void bnxt_cfg_ntp_filters(struct bnxt *); 12567 12568 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 12569 { 12570 struct bnxt_link_info *link_info = &bp->link_info; 12571 12572 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 12573 link_info->autoneg = BNXT_AUTONEG_SPEED; 12574 if (bp->hwrm_spec_code >= 0x10201) { 12575 if (link_info->auto_pause_setting & 12576 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 12577 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 12578 } else { 12579 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 12580 } 12581 bnxt_set_auto_speed(link_info); 12582 } else { 12583 bnxt_set_force_speed(link_info); 12584 link_info->req_duplex = link_info->duplex_setting; 12585 } 12586 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 12587 link_info->req_flow_ctrl = 12588 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 12589 else 12590 link_info->req_flow_ctrl = link_info->force_pause_setting; 12591 } 12592 12593 static void bnxt_fw_echo_reply(struct bnxt *bp) 12594 { 12595 struct bnxt_fw_health *fw_health = bp->fw_health; 12596 struct hwrm_func_echo_response_input *req; 12597 int rc; 12598 12599 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 12600 if (rc) 12601 return; 12602 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 12603 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 12604 hwrm_req_send(bp, req); 12605 } 12606 12607 static void bnxt_sp_task(struct work_struct *work) 12608 { 12609 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 12610 12611 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12612 smp_mb__after_atomic(); 12613 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12614 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12615 return; 12616 } 12617 12618 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 12619 bnxt_cfg_rx_mode(bp); 12620 12621 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 12622 bnxt_cfg_ntp_filters(bp); 12623 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 12624 bnxt_hwrm_exec_fwd_req(bp); 12625 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 12626 bnxt_hwrm_port_qstats(bp, 0); 12627 bnxt_hwrm_port_qstats_ext(bp, 0); 12628 bnxt_accumulate_all_stats(bp); 12629 } 12630 12631 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 12632 int rc; 12633 12634 mutex_lock(&bp->link_lock); 12635 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 12636 &bp->sp_event)) 12637 bnxt_hwrm_phy_qcaps(bp); 12638 12639 rc = bnxt_update_link(bp, true); 12640 if (rc) 12641 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 12642 rc); 12643 12644 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 12645 &bp->sp_event)) 12646 bnxt_init_ethtool_link_settings(bp); 12647 mutex_unlock(&bp->link_lock); 12648 } 12649 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 12650 int rc; 12651 12652 mutex_lock(&bp->link_lock); 12653 rc = bnxt_update_phy_setting(bp); 12654 mutex_unlock(&bp->link_lock); 12655 if (rc) { 12656 netdev_warn(bp->dev, "update phy settings retry failed\n"); 12657 } else { 12658 bp->link_info.phy_retry = false; 12659 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 12660 } 12661 } 12662 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 12663 mutex_lock(&bp->link_lock); 12664 bnxt_get_port_module_status(bp); 12665 mutex_unlock(&bp->link_lock); 12666 } 12667 12668 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 12669 bnxt_tc_flow_stats_work(bp); 12670 12671 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 12672 bnxt_chk_missed_irq(bp); 12673 12674 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 12675 bnxt_fw_echo_reply(bp); 12676 12677 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 12678 bnxt_hwmon_notify_event(bp); 12679 12680 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 12681 * must be the last functions to be called before exiting. 12682 */ 12683 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 12684 bnxt_reset(bp, false); 12685 12686 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 12687 bnxt_reset(bp, true); 12688 12689 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 12690 bnxt_rx_ring_reset(bp); 12691 12692 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 12693 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 12694 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 12695 bnxt_devlink_health_fw_report(bp); 12696 else 12697 bnxt_fw_reset(bp); 12698 } 12699 12700 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 12701 if (!is_bnxt_fw_ok(bp)) 12702 bnxt_devlink_health_fw_report(bp); 12703 } 12704 12705 smp_mb__before_atomic(); 12706 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12707 } 12708 12709 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 12710 int *max_cp); 12711 12712 /* Under rtnl_lock */ 12713 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 12714 int tx_xdp) 12715 { 12716 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 12717 int tx_rings_needed, stats; 12718 int rx_rings = rx; 12719 int cp, vnics; 12720 12721 if (tcs) 12722 tx_sets = tcs; 12723 12724 if (bp->flags & BNXT_FLAG_AGG_RINGS) 12725 rx_rings <<= 1; 12726 12727 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 12728 12729 if (max_rx < rx_rings) 12730 return -ENOMEM; 12731 12732 tx_rings_needed = tx * tx_sets + tx_xdp; 12733 if (max_tx < tx_rings_needed) 12734 return -ENOMEM; 12735 12736 vnics = 1; 12737 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == 12738 BNXT_FLAG_RFS) 12739 vnics += rx; 12740 12741 tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp); 12742 cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 12743 if (max_cp < cp) 12744 return -ENOMEM; 12745 stats = cp; 12746 if (BNXT_NEW_RM(bp)) { 12747 cp += bnxt_get_ulp_msix_num(bp); 12748 stats += bnxt_get_ulp_stat_ctxs(bp); 12749 } 12750 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 12751 stats, vnics); 12752 } 12753 12754 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 12755 { 12756 if (bp->bar2) { 12757 pci_iounmap(pdev, bp->bar2); 12758 bp->bar2 = NULL; 12759 } 12760 12761 if (bp->bar1) { 12762 pci_iounmap(pdev, bp->bar1); 12763 bp->bar1 = NULL; 12764 } 12765 12766 if (bp->bar0) { 12767 pci_iounmap(pdev, bp->bar0); 12768 bp->bar0 = NULL; 12769 } 12770 } 12771 12772 static void bnxt_cleanup_pci(struct bnxt *bp) 12773 { 12774 bnxt_unmap_bars(bp, bp->pdev); 12775 pci_release_regions(bp->pdev); 12776 if (pci_is_enabled(bp->pdev)) 12777 pci_disable_device(bp->pdev); 12778 } 12779 12780 static void bnxt_init_dflt_coal(struct bnxt *bp) 12781 { 12782 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 12783 struct bnxt_coal *coal; 12784 u16 flags = 0; 12785 12786 if (coal_cap->cmpl_params & 12787 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 12788 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 12789 12790 /* Tick values in micro seconds. 12791 * 1 coal_buf x bufs_per_record = 1 completion record. 12792 */ 12793 coal = &bp->rx_coal; 12794 coal->coal_ticks = 10; 12795 coal->coal_bufs = 30; 12796 coal->coal_ticks_irq = 1; 12797 coal->coal_bufs_irq = 2; 12798 coal->idle_thresh = 50; 12799 coal->bufs_per_record = 2; 12800 coal->budget = 64; /* NAPI budget */ 12801 coal->flags = flags; 12802 12803 coal = &bp->tx_coal; 12804 coal->coal_ticks = 28; 12805 coal->coal_bufs = 30; 12806 coal->coal_ticks_irq = 2; 12807 coal->coal_bufs_irq = 2; 12808 coal->bufs_per_record = 1; 12809 coal->flags = flags; 12810 12811 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 12812 } 12813 12814 /* FW that pre-reserves 1 VNIC per function */ 12815 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 12816 { 12817 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 12818 12819 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 12820 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 12821 return true; 12822 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 12823 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 12824 return true; 12825 return false; 12826 } 12827 12828 static int bnxt_fw_init_one_p1(struct bnxt *bp) 12829 { 12830 int rc; 12831 12832 bp->fw_cap = 0; 12833 rc = bnxt_hwrm_ver_get(bp); 12834 bnxt_try_map_fw_health_reg(bp); 12835 if (rc) { 12836 rc = bnxt_try_recover_fw(bp); 12837 if (rc) 12838 return rc; 12839 rc = bnxt_hwrm_ver_get(bp); 12840 if (rc) 12841 return rc; 12842 } 12843 12844 bnxt_nvm_cfg_ver_get(bp); 12845 12846 rc = bnxt_hwrm_func_reset(bp); 12847 if (rc) 12848 return -ENODEV; 12849 12850 bnxt_hwrm_fw_set_time(bp); 12851 return 0; 12852 } 12853 12854 static int bnxt_fw_init_one_p2(struct bnxt *bp) 12855 { 12856 int rc; 12857 12858 /* Get the MAX capabilities for this function */ 12859 rc = bnxt_hwrm_func_qcaps(bp); 12860 if (rc) { 12861 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 12862 rc); 12863 return -ENODEV; 12864 } 12865 12866 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 12867 if (rc) 12868 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 12869 rc); 12870 12871 if (bnxt_alloc_fw_health(bp)) { 12872 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 12873 } else { 12874 rc = bnxt_hwrm_error_recovery_qcfg(bp); 12875 if (rc) 12876 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 12877 rc); 12878 } 12879 12880 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 12881 if (rc) 12882 return -ENODEV; 12883 12884 if (bnxt_fw_pre_resv_vnics(bp)) 12885 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 12886 12887 bnxt_hwrm_func_qcfg(bp); 12888 bnxt_hwrm_vnic_qcaps(bp); 12889 bnxt_hwrm_port_led_qcaps(bp); 12890 bnxt_ethtool_init(bp); 12891 if (bp->fw_cap & BNXT_FW_CAP_PTP) 12892 __bnxt_hwrm_ptp_qcfg(bp); 12893 bnxt_dcb_init(bp); 12894 bnxt_hwmon_init(bp); 12895 return 0; 12896 } 12897 12898 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 12899 { 12900 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; 12901 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 12902 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 12903 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 12904 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 12905 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 12906 bp->rss_hash_delta = bp->rss_hash_cfg; 12907 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 12908 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; 12909 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 12910 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 12911 } 12912 } 12913 12914 static void bnxt_set_dflt_rfs(struct bnxt *bp) 12915 { 12916 struct net_device *dev = bp->dev; 12917 12918 dev->hw_features &= ~NETIF_F_NTUPLE; 12919 dev->features &= ~NETIF_F_NTUPLE; 12920 bp->flags &= ~BNXT_FLAG_RFS; 12921 if (bnxt_rfs_supported(bp)) { 12922 dev->hw_features |= NETIF_F_NTUPLE; 12923 if (bnxt_rfs_capable(bp)) { 12924 bp->flags |= BNXT_FLAG_RFS; 12925 dev->features |= NETIF_F_NTUPLE; 12926 } 12927 } 12928 } 12929 12930 static void bnxt_fw_init_one_p3(struct bnxt *bp) 12931 { 12932 struct pci_dev *pdev = bp->pdev; 12933 12934 bnxt_set_dflt_rss_hash_type(bp); 12935 bnxt_set_dflt_rfs(bp); 12936 12937 bnxt_get_wol_settings(bp); 12938 if (bp->flags & BNXT_FLAG_WOL_CAP) 12939 device_set_wakeup_enable(&pdev->dev, bp->wol); 12940 else 12941 device_set_wakeup_capable(&pdev->dev, false); 12942 12943 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 12944 bnxt_hwrm_coal_params_qcaps(bp); 12945 } 12946 12947 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 12948 12949 int bnxt_fw_init_one(struct bnxt *bp) 12950 { 12951 int rc; 12952 12953 rc = bnxt_fw_init_one_p1(bp); 12954 if (rc) { 12955 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 12956 return rc; 12957 } 12958 rc = bnxt_fw_init_one_p2(bp); 12959 if (rc) { 12960 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 12961 return rc; 12962 } 12963 rc = bnxt_probe_phy(bp, false); 12964 if (rc) 12965 return rc; 12966 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 12967 if (rc) 12968 return rc; 12969 12970 bnxt_fw_init_one_p3(bp); 12971 return 0; 12972 } 12973 12974 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 12975 { 12976 struct bnxt_fw_health *fw_health = bp->fw_health; 12977 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 12978 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 12979 u32 reg_type, reg_off, delay_msecs; 12980 12981 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 12982 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 12983 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 12984 switch (reg_type) { 12985 case BNXT_FW_HEALTH_REG_TYPE_CFG: 12986 pci_write_config_dword(bp->pdev, reg_off, val); 12987 break; 12988 case BNXT_FW_HEALTH_REG_TYPE_GRC: 12989 writel(reg_off & BNXT_GRC_BASE_MASK, 12990 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 12991 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 12992 fallthrough; 12993 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 12994 writel(val, bp->bar0 + reg_off); 12995 break; 12996 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 12997 writel(val, bp->bar1 + reg_off); 12998 break; 12999 } 13000 if (delay_msecs) { 13001 pci_read_config_dword(bp->pdev, 0, &val); 13002 msleep(delay_msecs); 13003 } 13004 } 13005 13006 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 13007 { 13008 struct hwrm_func_qcfg_output *resp; 13009 struct hwrm_func_qcfg_input *req; 13010 bool result = true; /* firmware will enforce if unknown */ 13011 13012 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 13013 return result; 13014 13015 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 13016 return result; 13017 13018 req->fid = cpu_to_le16(0xffff); 13019 resp = hwrm_req_hold(bp, req); 13020 if (!hwrm_req_send(bp, req)) 13021 result = !!(le16_to_cpu(resp->flags) & 13022 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 13023 hwrm_req_drop(bp, req); 13024 return result; 13025 } 13026 13027 static void bnxt_reset_all(struct bnxt *bp) 13028 { 13029 struct bnxt_fw_health *fw_health = bp->fw_health; 13030 int i, rc; 13031 13032 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 13033 bnxt_fw_reset_via_optee(bp); 13034 bp->fw_reset_timestamp = jiffies; 13035 return; 13036 } 13037 13038 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 13039 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 13040 bnxt_fw_reset_writel(bp, i); 13041 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 13042 struct hwrm_fw_reset_input *req; 13043 13044 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 13045 if (!rc) { 13046 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 13047 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 13048 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 13049 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 13050 rc = hwrm_req_send(bp, req); 13051 } 13052 if (rc != -ENODEV) 13053 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 13054 } 13055 bp->fw_reset_timestamp = jiffies; 13056 } 13057 13058 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 13059 { 13060 return time_after(jiffies, bp->fw_reset_timestamp + 13061 (bp->fw_reset_max_dsecs * HZ / 10)); 13062 } 13063 13064 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 13065 { 13066 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13067 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { 13068 bnxt_ulp_start(bp, rc); 13069 bnxt_dl_health_fw_status_update(bp, false); 13070 } 13071 bp->fw_reset_state = 0; 13072 dev_close(bp->dev); 13073 } 13074 13075 static void bnxt_fw_reset_task(struct work_struct *work) 13076 { 13077 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 13078 int rc = 0; 13079 13080 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 13081 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 13082 return; 13083 } 13084 13085 switch (bp->fw_reset_state) { 13086 case BNXT_FW_RESET_STATE_POLL_VF: { 13087 int n = bnxt_get_registered_vfs(bp); 13088 int tmo; 13089 13090 if (n < 0) { 13091 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 13092 n, jiffies_to_msecs(jiffies - 13093 bp->fw_reset_timestamp)); 13094 goto fw_reset_abort; 13095 } else if (n > 0) { 13096 if (bnxt_fw_reset_timeout(bp)) { 13097 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13098 bp->fw_reset_state = 0; 13099 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 13100 n); 13101 return; 13102 } 13103 bnxt_queue_fw_reset_work(bp, HZ / 10); 13104 return; 13105 } 13106 bp->fw_reset_timestamp = jiffies; 13107 rtnl_lock(); 13108 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 13109 bnxt_fw_reset_abort(bp, rc); 13110 rtnl_unlock(); 13111 return; 13112 } 13113 bnxt_fw_reset_close(bp); 13114 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 13115 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 13116 tmo = HZ / 10; 13117 } else { 13118 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13119 tmo = bp->fw_reset_min_dsecs * HZ / 10; 13120 } 13121 rtnl_unlock(); 13122 bnxt_queue_fw_reset_work(bp, tmo); 13123 return; 13124 } 13125 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 13126 u32 val; 13127 13128 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 13129 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 13130 !bnxt_fw_reset_timeout(bp)) { 13131 bnxt_queue_fw_reset_work(bp, HZ / 5); 13132 return; 13133 } 13134 13135 if (!bp->fw_health->primary) { 13136 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 13137 13138 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13139 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 13140 return; 13141 } 13142 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 13143 } 13144 fallthrough; 13145 case BNXT_FW_RESET_STATE_RESET_FW: 13146 bnxt_reset_all(bp); 13147 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 13148 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 13149 return; 13150 case BNXT_FW_RESET_STATE_ENABLE_DEV: 13151 bnxt_inv_fw_health_reg(bp); 13152 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 13153 !bp->fw_reset_min_dsecs) { 13154 u16 val; 13155 13156 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 13157 if (val == 0xffff) { 13158 if (bnxt_fw_reset_timeout(bp)) { 13159 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 13160 rc = -ETIMEDOUT; 13161 goto fw_reset_abort; 13162 } 13163 bnxt_queue_fw_reset_work(bp, HZ / 1000); 13164 return; 13165 } 13166 } 13167 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 13168 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 13169 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 13170 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 13171 bnxt_dl_remote_reload(bp); 13172 if (pci_enable_device(bp->pdev)) { 13173 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 13174 rc = -ENODEV; 13175 goto fw_reset_abort; 13176 } 13177 pci_set_master(bp->pdev); 13178 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 13179 fallthrough; 13180 case BNXT_FW_RESET_STATE_POLL_FW: 13181 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 13182 rc = bnxt_hwrm_poll(bp); 13183 if (rc) { 13184 if (bnxt_fw_reset_timeout(bp)) { 13185 netdev_err(bp->dev, "Firmware reset aborted\n"); 13186 goto fw_reset_abort_status; 13187 } 13188 bnxt_queue_fw_reset_work(bp, HZ / 5); 13189 return; 13190 } 13191 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 13192 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 13193 fallthrough; 13194 case BNXT_FW_RESET_STATE_OPENING: 13195 while (!rtnl_trylock()) { 13196 bnxt_queue_fw_reset_work(bp, HZ / 10); 13197 return; 13198 } 13199 rc = bnxt_open(bp->dev); 13200 if (rc) { 13201 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 13202 bnxt_fw_reset_abort(bp, rc); 13203 rtnl_unlock(); 13204 return; 13205 } 13206 13207 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 13208 bp->fw_health->enabled) { 13209 bp->fw_health->last_fw_reset_cnt = 13210 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13211 } 13212 bp->fw_reset_state = 0; 13213 /* Make sure fw_reset_state is 0 before clearing the flag */ 13214 smp_mb__before_atomic(); 13215 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13216 bnxt_ulp_start(bp, 0); 13217 bnxt_reenable_sriov(bp); 13218 bnxt_vf_reps_alloc(bp); 13219 bnxt_vf_reps_open(bp); 13220 bnxt_ptp_reapply_pps(bp); 13221 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 13222 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 13223 bnxt_dl_health_fw_recovery_done(bp); 13224 bnxt_dl_health_fw_status_update(bp, true); 13225 } 13226 rtnl_unlock(); 13227 break; 13228 } 13229 return; 13230 13231 fw_reset_abort_status: 13232 if (bp->fw_health->status_reliable || 13233 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 13234 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 13235 13236 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 13237 } 13238 fw_reset_abort: 13239 rtnl_lock(); 13240 bnxt_fw_reset_abort(bp, rc); 13241 rtnl_unlock(); 13242 } 13243 13244 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 13245 { 13246 int rc; 13247 struct bnxt *bp = netdev_priv(dev); 13248 13249 SET_NETDEV_DEV(dev, &pdev->dev); 13250 13251 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 13252 rc = pci_enable_device(pdev); 13253 if (rc) { 13254 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 13255 goto init_err; 13256 } 13257 13258 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 13259 dev_err(&pdev->dev, 13260 "Cannot find PCI device base address, aborting\n"); 13261 rc = -ENODEV; 13262 goto init_err_disable; 13263 } 13264 13265 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 13266 if (rc) { 13267 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 13268 goto init_err_disable; 13269 } 13270 13271 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 13272 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 13273 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 13274 rc = -EIO; 13275 goto init_err_release; 13276 } 13277 13278 pci_set_master(pdev); 13279 13280 bp->dev = dev; 13281 bp->pdev = pdev; 13282 13283 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 13284 * determines the BAR size. 13285 */ 13286 bp->bar0 = pci_ioremap_bar(pdev, 0); 13287 if (!bp->bar0) { 13288 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 13289 rc = -ENOMEM; 13290 goto init_err_release; 13291 } 13292 13293 bp->bar2 = pci_ioremap_bar(pdev, 4); 13294 if (!bp->bar2) { 13295 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 13296 rc = -ENOMEM; 13297 goto init_err_release; 13298 } 13299 13300 INIT_WORK(&bp->sp_task, bnxt_sp_task); 13301 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 13302 13303 spin_lock_init(&bp->ntp_fltr_lock); 13304 #if BITS_PER_LONG == 32 13305 spin_lock_init(&bp->db_lock); 13306 #endif 13307 13308 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 13309 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 13310 13311 timer_setup(&bp->timer, bnxt_timer, 0); 13312 bp->current_interval = BNXT_TIMER_INTERVAL; 13313 13314 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 13315 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 13316 13317 clear_bit(BNXT_STATE_OPEN, &bp->state); 13318 return 0; 13319 13320 init_err_release: 13321 bnxt_unmap_bars(bp, pdev); 13322 pci_release_regions(pdev); 13323 13324 init_err_disable: 13325 pci_disable_device(pdev); 13326 13327 init_err: 13328 return rc; 13329 } 13330 13331 /* rtnl_lock held */ 13332 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 13333 { 13334 struct sockaddr *addr = p; 13335 struct bnxt *bp = netdev_priv(dev); 13336 int rc = 0; 13337 13338 if (!is_valid_ether_addr(addr->sa_data)) 13339 return -EADDRNOTAVAIL; 13340 13341 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 13342 return 0; 13343 13344 rc = bnxt_approve_mac(bp, addr->sa_data, true); 13345 if (rc) 13346 return rc; 13347 13348 eth_hw_addr_set(dev, addr->sa_data); 13349 if (netif_running(dev)) { 13350 bnxt_close_nic(bp, false, false); 13351 rc = bnxt_open_nic(bp, false, false); 13352 } 13353 13354 return rc; 13355 } 13356 13357 /* rtnl_lock held */ 13358 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 13359 { 13360 struct bnxt *bp = netdev_priv(dev); 13361 13362 if (netif_running(dev)) 13363 bnxt_close_nic(bp, true, false); 13364 13365 dev->mtu = new_mtu; 13366 bnxt_set_ring_params(bp); 13367 13368 if (netif_running(dev)) 13369 return bnxt_open_nic(bp, true, false); 13370 13371 return 0; 13372 } 13373 13374 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 13375 { 13376 struct bnxt *bp = netdev_priv(dev); 13377 bool sh = false; 13378 int rc, tx_cp; 13379 13380 if (tc > bp->max_tc) { 13381 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 13382 tc, bp->max_tc); 13383 return -EINVAL; 13384 } 13385 13386 if (netdev_get_num_tc(dev) == tc) 13387 return 0; 13388 13389 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 13390 sh = true; 13391 13392 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 13393 sh, tc, bp->tx_nr_rings_xdp); 13394 if (rc) 13395 return rc; 13396 13397 /* Needs to close the device and do hw resource re-allocations */ 13398 if (netif_running(bp->dev)) 13399 bnxt_close_nic(bp, true, false); 13400 13401 if (tc) { 13402 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 13403 netdev_set_num_tc(dev, tc); 13404 } else { 13405 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13406 netdev_reset_tc(dev); 13407 } 13408 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 13409 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 13410 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 13411 tx_cp + bp->rx_nr_rings; 13412 13413 if (netif_running(bp->dev)) 13414 return bnxt_open_nic(bp, true, false); 13415 13416 return 0; 13417 } 13418 13419 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 13420 void *cb_priv) 13421 { 13422 struct bnxt *bp = cb_priv; 13423 13424 if (!bnxt_tc_flower_enabled(bp) || 13425 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 13426 return -EOPNOTSUPP; 13427 13428 switch (type) { 13429 case TC_SETUP_CLSFLOWER: 13430 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 13431 default: 13432 return -EOPNOTSUPP; 13433 } 13434 } 13435 13436 LIST_HEAD(bnxt_block_cb_list); 13437 13438 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 13439 void *type_data) 13440 { 13441 struct bnxt *bp = netdev_priv(dev); 13442 13443 switch (type) { 13444 case TC_SETUP_BLOCK: 13445 return flow_block_cb_setup_simple(type_data, 13446 &bnxt_block_cb_list, 13447 bnxt_setup_tc_block_cb, 13448 bp, bp, true); 13449 case TC_SETUP_QDISC_MQPRIO: { 13450 struct tc_mqprio_qopt *mqprio = type_data; 13451 13452 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 13453 13454 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 13455 } 13456 default: 13457 return -EOPNOTSUPP; 13458 } 13459 } 13460 13461 #ifdef CONFIG_RFS_ACCEL 13462 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 13463 struct bnxt_ntuple_filter *f2) 13464 { 13465 struct flow_keys *keys1 = &f1->fkeys; 13466 struct flow_keys *keys2 = &f2->fkeys; 13467 13468 if (keys1->basic.n_proto != keys2->basic.n_proto || 13469 keys1->basic.ip_proto != keys2->basic.ip_proto) 13470 return false; 13471 13472 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 13473 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 13474 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) 13475 return false; 13476 } else { 13477 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, 13478 sizeof(keys1->addrs.v6addrs.src)) || 13479 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, 13480 sizeof(keys1->addrs.v6addrs.dst))) 13481 return false; 13482 } 13483 13484 if (keys1->ports.ports == keys2->ports.ports && 13485 keys1->control.flags == keys2->control.flags && 13486 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 13487 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 13488 return true; 13489 13490 return false; 13491 } 13492 13493 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 13494 u16 rxq_index, u32 flow_id) 13495 { 13496 struct bnxt *bp = netdev_priv(dev); 13497 struct bnxt_ntuple_filter *fltr, *new_fltr; 13498 struct flow_keys *fkeys; 13499 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 13500 int rc = 0, idx, bit_id, l2_idx = 0; 13501 struct hlist_head *head; 13502 u32 flags; 13503 13504 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 13505 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 13506 int off = 0, j; 13507 13508 netif_addr_lock_bh(dev); 13509 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 13510 if (ether_addr_equal(eth->h_dest, 13511 vnic->uc_list + off)) { 13512 l2_idx = j + 1; 13513 break; 13514 } 13515 } 13516 netif_addr_unlock_bh(dev); 13517 if (!l2_idx) 13518 return -EINVAL; 13519 } 13520 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 13521 if (!new_fltr) 13522 return -ENOMEM; 13523 13524 fkeys = &new_fltr->fkeys; 13525 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 13526 rc = -EPROTONOSUPPORT; 13527 goto err_free; 13528 } 13529 13530 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 13531 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 13532 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 13533 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 13534 rc = -EPROTONOSUPPORT; 13535 goto err_free; 13536 } 13537 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 13538 bp->hwrm_spec_code < 0x10601) { 13539 rc = -EPROTONOSUPPORT; 13540 goto err_free; 13541 } 13542 flags = fkeys->control.flags; 13543 if (((flags & FLOW_DIS_ENCAPSULATION) && 13544 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 13545 rc = -EPROTONOSUPPORT; 13546 goto err_free; 13547 } 13548 13549 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 13550 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 13551 13552 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 13553 head = &bp->ntp_fltr_hash_tbl[idx]; 13554 rcu_read_lock(); 13555 hlist_for_each_entry_rcu(fltr, head, hash) { 13556 if (bnxt_fltr_match(fltr, new_fltr)) { 13557 rc = fltr->sw_id; 13558 rcu_read_unlock(); 13559 goto err_free; 13560 } 13561 } 13562 rcu_read_unlock(); 13563 13564 spin_lock_bh(&bp->ntp_fltr_lock); 13565 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 13566 BNXT_NTP_FLTR_MAX_FLTR, 0); 13567 if (bit_id < 0) { 13568 spin_unlock_bh(&bp->ntp_fltr_lock); 13569 rc = -ENOMEM; 13570 goto err_free; 13571 } 13572 13573 new_fltr->sw_id = (u16)bit_id; 13574 new_fltr->flow_id = flow_id; 13575 new_fltr->l2_fltr_idx = l2_idx; 13576 new_fltr->rxq = rxq_index; 13577 hlist_add_head_rcu(&new_fltr->hash, head); 13578 bp->ntp_fltr_count++; 13579 spin_unlock_bh(&bp->ntp_fltr_lock); 13580 13581 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 13582 13583 return new_fltr->sw_id; 13584 13585 err_free: 13586 kfree(new_fltr); 13587 return rc; 13588 } 13589 13590 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 13591 { 13592 int i; 13593 13594 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 13595 struct hlist_head *head; 13596 struct hlist_node *tmp; 13597 struct bnxt_ntuple_filter *fltr; 13598 int rc; 13599 13600 head = &bp->ntp_fltr_hash_tbl[i]; 13601 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 13602 bool del = false; 13603 13604 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 13605 if (rps_may_expire_flow(bp->dev, fltr->rxq, 13606 fltr->flow_id, 13607 fltr->sw_id)) { 13608 bnxt_hwrm_cfa_ntuple_filter_free(bp, 13609 fltr); 13610 del = true; 13611 } 13612 } else { 13613 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 13614 fltr); 13615 if (rc) 13616 del = true; 13617 else 13618 set_bit(BNXT_FLTR_VALID, &fltr->state); 13619 } 13620 13621 if (del) { 13622 spin_lock_bh(&bp->ntp_fltr_lock); 13623 hlist_del_rcu(&fltr->hash); 13624 bp->ntp_fltr_count--; 13625 spin_unlock_bh(&bp->ntp_fltr_lock); 13626 synchronize_rcu(); 13627 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 13628 kfree(fltr); 13629 } 13630 } 13631 } 13632 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 13633 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 13634 } 13635 13636 #else 13637 13638 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 13639 { 13640 } 13641 13642 #endif /* CONFIG_RFS_ACCEL */ 13643 13644 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 13645 unsigned int entry, struct udp_tunnel_info *ti) 13646 { 13647 struct bnxt *bp = netdev_priv(netdev); 13648 unsigned int cmd; 13649 13650 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 13651 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 13652 else 13653 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 13654 13655 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 13656 } 13657 13658 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 13659 unsigned int entry, struct udp_tunnel_info *ti) 13660 { 13661 struct bnxt *bp = netdev_priv(netdev); 13662 unsigned int cmd; 13663 13664 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 13665 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 13666 else 13667 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 13668 13669 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 13670 } 13671 13672 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 13673 .set_port = bnxt_udp_tunnel_set_port, 13674 .unset_port = bnxt_udp_tunnel_unset_port, 13675 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 13676 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 13677 .tables = { 13678 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 13679 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 13680 }, 13681 }; 13682 13683 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 13684 struct net_device *dev, u32 filter_mask, 13685 int nlflags) 13686 { 13687 struct bnxt *bp = netdev_priv(dev); 13688 13689 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 13690 nlflags, filter_mask, NULL); 13691 } 13692 13693 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 13694 u16 flags, struct netlink_ext_ack *extack) 13695 { 13696 struct bnxt *bp = netdev_priv(dev); 13697 struct nlattr *attr, *br_spec; 13698 int rem, rc = 0; 13699 13700 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 13701 return -EOPNOTSUPP; 13702 13703 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 13704 if (!br_spec) 13705 return -EINVAL; 13706 13707 nla_for_each_nested(attr, br_spec, rem) { 13708 u16 mode; 13709 13710 if (nla_type(attr) != IFLA_BRIDGE_MODE) 13711 continue; 13712 13713 mode = nla_get_u16(attr); 13714 if (mode == bp->br_mode) 13715 break; 13716 13717 rc = bnxt_hwrm_set_br_mode(bp, mode); 13718 if (!rc) 13719 bp->br_mode = mode; 13720 break; 13721 } 13722 return rc; 13723 } 13724 13725 int bnxt_get_port_parent_id(struct net_device *dev, 13726 struct netdev_phys_item_id *ppid) 13727 { 13728 struct bnxt *bp = netdev_priv(dev); 13729 13730 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 13731 return -EOPNOTSUPP; 13732 13733 /* The PF and it's VF-reps only support the switchdev framework */ 13734 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 13735 return -EOPNOTSUPP; 13736 13737 ppid->id_len = sizeof(bp->dsn); 13738 memcpy(ppid->id, bp->dsn, ppid->id_len); 13739 13740 return 0; 13741 } 13742 13743 static const struct net_device_ops bnxt_netdev_ops = { 13744 .ndo_open = bnxt_open, 13745 .ndo_start_xmit = bnxt_start_xmit, 13746 .ndo_stop = bnxt_close, 13747 .ndo_get_stats64 = bnxt_get_stats64, 13748 .ndo_set_rx_mode = bnxt_set_rx_mode, 13749 .ndo_eth_ioctl = bnxt_ioctl, 13750 .ndo_validate_addr = eth_validate_addr, 13751 .ndo_set_mac_address = bnxt_change_mac_addr, 13752 .ndo_change_mtu = bnxt_change_mtu, 13753 .ndo_fix_features = bnxt_fix_features, 13754 .ndo_set_features = bnxt_set_features, 13755 .ndo_features_check = bnxt_features_check, 13756 .ndo_tx_timeout = bnxt_tx_timeout, 13757 #ifdef CONFIG_BNXT_SRIOV 13758 .ndo_get_vf_config = bnxt_get_vf_config, 13759 .ndo_set_vf_mac = bnxt_set_vf_mac, 13760 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 13761 .ndo_set_vf_rate = bnxt_set_vf_bw, 13762 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 13763 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 13764 .ndo_set_vf_trust = bnxt_set_vf_trust, 13765 #endif 13766 .ndo_setup_tc = bnxt_setup_tc, 13767 #ifdef CONFIG_RFS_ACCEL 13768 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 13769 #endif 13770 .ndo_bpf = bnxt_xdp, 13771 .ndo_xdp_xmit = bnxt_xdp_xmit, 13772 .ndo_bridge_getlink = bnxt_bridge_getlink, 13773 .ndo_bridge_setlink = bnxt_bridge_setlink, 13774 }; 13775 13776 static void bnxt_remove_one(struct pci_dev *pdev) 13777 { 13778 struct net_device *dev = pci_get_drvdata(pdev); 13779 struct bnxt *bp = netdev_priv(dev); 13780 13781 if (BNXT_PF(bp)) 13782 bnxt_sriov_disable(bp); 13783 13784 bnxt_rdma_aux_device_uninit(bp); 13785 13786 bnxt_ptp_clear(bp); 13787 unregister_netdev(dev); 13788 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13789 /* Flush any pending tasks */ 13790 cancel_work_sync(&bp->sp_task); 13791 cancel_delayed_work_sync(&bp->fw_reset_task); 13792 bp->sp_event = 0; 13793 13794 bnxt_dl_fw_reporters_destroy(bp); 13795 bnxt_dl_unregister(bp); 13796 bnxt_shutdown_tc(bp); 13797 13798 bnxt_clear_int_mode(bp); 13799 bnxt_hwrm_func_drv_unrgtr(bp); 13800 bnxt_free_hwrm_resources(bp); 13801 bnxt_hwmon_uninit(bp); 13802 bnxt_ethtool_free(bp); 13803 bnxt_dcb_free(bp); 13804 kfree(bp->ptp_cfg); 13805 bp->ptp_cfg = NULL; 13806 kfree(bp->fw_health); 13807 bp->fw_health = NULL; 13808 bnxt_cleanup_pci(bp); 13809 bnxt_free_ctx_mem(bp); 13810 kfree(bp->rss_indir_tbl); 13811 bp->rss_indir_tbl = NULL; 13812 bnxt_free_port_stats(bp); 13813 free_netdev(dev); 13814 } 13815 13816 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 13817 { 13818 int rc = 0; 13819 struct bnxt_link_info *link_info = &bp->link_info; 13820 13821 bp->phy_flags = 0; 13822 rc = bnxt_hwrm_phy_qcaps(bp); 13823 if (rc) { 13824 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 13825 rc); 13826 return rc; 13827 } 13828 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 13829 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 13830 else 13831 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 13832 if (!fw_dflt) 13833 return 0; 13834 13835 mutex_lock(&bp->link_lock); 13836 rc = bnxt_update_link(bp, false); 13837 if (rc) { 13838 mutex_unlock(&bp->link_lock); 13839 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 13840 rc); 13841 return rc; 13842 } 13843 13844 /* Older firmware does not have supported_auto_speeds, so assume 13845 * that all supported speeds can be autonegotiated. 13846 */ 13847 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 13848 link_info->support_auto_speeds = link_info->support_speeds; 13849 13850 bnxt_init_ethtool_link_settings(bp); 13851 mutex_unlock(&bp->link_lock); 13852 return 0; 13853 } 13854 13855 static int bnxt_get_max_irq(struct pci_dev *pdev) 13856 { 13857 u16 ctrl; 13858 13859 if (!pdev->msix_cap) 13860 return 1; 13861 13862 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 13863 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 13864 } 13865 13866 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 13867 int *max_cp) 13868 { 13869 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 13870 int max_ring_grps = 0, max_irq; 13871 13872 *max_tx = hw_resc->max_tx_rings; 13873 *max_rx = hw_resc->max_rx_rings; 13874 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 13875 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 13876 bnxt_get_ulp_msix_num(bp), 13877 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 13878 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 13879 *max_cp = min_t(int, *max_cp, max_irq); 13880 max_ring_grps = hw_resc->max_hw_ring_grps; 13881 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 13882 *max_cp -= 1; 13883 *max_rx -= 2; 13884 } 13885 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13886 *max_rx >>= 1; 13887 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 13888 if (*max_cp < (*max_rx + *max_tx)) { 13889 *max_rx = *max_cp / 2; 13890 *max_tx = *max_rx; 13891 } 13892 /* On P5 chips, max_cp output param should be available NQs */ 13893 *max_cp = max_irq; 13894 } 13895 *max_rx = min_t(int, *max_rx, max_ring_grps); 13896 } 13897 13898 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 13899 { 13900 int rx, tx, cp; 13901 13902 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 13903 *max_rx = rx; 13904 *max_tx = tx; 13905 if (!rx || !tx || !cp) 13906 return -ENOMEM; 13907 13908 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 13909 } 13910 13911 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 13912 bool shared) 13913 { 13914 int rc; 13915 13916 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 13917 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 13918 /* Not enough rings, try disabling agg rings. */ 13919 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 13920 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 13921 if (rc) { 13922 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 13923 bp->flags |= BNXT_FLAG_AGG_RINGS; 13924 return rc; 13925 } 13926 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 13927 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 13928 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 13929 bnxt_set_ring_params(bp); 13930 } 13931 13932 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 13933 int max_cp, max_stat, max_irq; 13934 13935 /* Reserve minimum resources for RoCE */ 13936 max_cp = bnxt_get_max_func_cp_rings(bp); 13937 max_stat = bnxt_get_max_func_stat_ctxs(bp); 13938 max_irq = bnxt_get_max_func_irqs(bp); 13939 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 13940 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 13941 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 13942 return 0; 13943 13944 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 13945 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 13946 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 13947 max_cp = min_t(int, max_cp, max_irq); 13948 max_cp = min_t(int, max_cp, max_stat); 13949 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 13950 if (rc) 13951 rc = 0; 13952 } 13953 return rc; 13954 } 13955 13956 /* In initial default shared ring setting, each shared ring must have a 13957 * RX/TX ring pair. 13958 */ 13959 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 13960 { 13961 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 13962 bp->rx_nr_rings = bp->cp_nr_rings; 13963 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 13964 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13965 } 13966 13967 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 13968 { 13969 int dflt_rings, max_rx_rings, max_tx_rings, rc; 13970 13971 if (!bnxt_can_reserve_rings(bp)) 13972 return 0; 13973 13974 if (sh) 13975 bp->flags |= BNXT_FLAG_SHARED_RINGS; 13976 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 13977 /* Reduce default rings on multi-port cards so that total default 13978 * rings do not exceed CPU count. 13979 */ 13980 if (bp->port_count > 1) { 13981 int max_rings = 13982 max_t(int, num_online_cpus() / bp->port_count, 1); 13983 13984 dflt_rings = min_t(int, dflt_rings, max_rings); 13985 } 13986 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 13987 if (rc) 13988 return rc; 13989 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 13990 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 13991 if (sh) 13992 bnxt_trim_dflt_sh_rings(bp); 13993 else 13994 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 13995 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13996 13997 rc = __bnxt_reserve_rings(bp); 13998 if (rc && rc != -ENODEV) 13999 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 14000 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14001 if (sh) 14002 bnxt_trim_dflt_sh_rings(bp); 14003 14004 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 14005 if (bnxt_need_reserve_rings(bp)) { 14006 rc = __bnxt_reserve_rings(bp); 14007 if (rc && rc != -ENODEV) 14008 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 14009 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14010 } 14011 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 14012 bp->rx_nr_rings++; 14013 bp->cp_nr_rings++; 14014 } 14015 if (rc) { 14016 bp->tx_nr_rings = 0; 14017 bp->rx_nr_rings = 0; 14018 } 14019 return rc; 14020 } 14021 14022 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 14023 { 14024 int rc; 14025 14026 if (bp->tx_nr_rings) 14027 return 0; 14028 14029 bnxt_ulp_irq_stop(bp); 14030 bnxt_clear_int_mode(bp); 14031 rc = bnxt_set_dflt_rings(bp, true); 14032 if (rc) { 14033 if (BNXT_VF(bp) && rc == -ENODEV) 14034 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 14035 else 14036 netdev_err(bp->dev, "Not enough rings available.\n"); 14037 goto init_dflt_ring_err; 14038 } 14039 rc = bnxt_init_int_mode(bp); 14040 if (rc) 14041 goto init_dflt_ring_err; 14042 14043 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14044 14045 bnxt_set_dflt_rfs(bp); 14046 14047 init_dflt_ring_err: 14048 bnxt_ulp_irq_restart(bp, rc); 14049 return rc; 14050 } 14051 14052 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 14053 { 14054 int rc; 14055 14056 ASSERT_RTNL(); 14057 bnxt_hwrm_func_qcaps(bp); 14058 14059 if (netif_running(bp->dev)) 14060 __bnxt_close_nic(bp, true, false); 14061 14062 bnxt_ulp_irq_stop(bp); 14063 bnxt_clear_int_mode(bp); 14064 rc = bnxt_init_int_mode(bp); 14065 bnxt_ulp_irq_restart(bp, rc); 14066 14067 if (netif_running(bp->dev)) { 14068 if (rc) 14069 dev_close(bp->dev); 14070 else 14071 rc = bnxt_open_nic(bp, true, false); 14072 } 14073 14074 return rc; 14075 } 14076 14077 static int bnxt_init_mac_addr(struct bnxt *bp) 14078 { 14079 int rc = 0; 14080 14081 if (BNXT_PF(bp)) { 14082 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 14083 } else { 14084 #ifdef CONFIG_BNXT_SRIOV 14085 struct bnxt_vf_info *vf = &bp->vf; 14086 bool strict_approval = true; 14087 14088 if (is_valid_ether_addr(vf->mac_addr)) { 14089 /* overwrite netdev dev_addr with admin VF MAC */ 14090 eth_hw_addr_set(bp->dev, vf->mac_addr); 14091 /* Older PF driver or firmware may not approve this 14092 * correctly. 14093 */ 14094 strict_approval = false; 14095 } else { 14096 eth_hw_addr_random(bp->dev); 14097 } 14098 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 14099 #endif 14100 } 14101 return rc; 14102 } 14103 14104 static void bnxt_vpd_read_info(struct bnxt *bp) 14105 { 14106 struct pci_dev *pdev = bp->pdev; 14107 unsigned int vpd_size, kw_len; 14108 int pos, size; 14109 u8 *vpd_data; 14110 14111 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 14112 if (IS_ERR(vpd_data)) { 14113 pci_warn(pdev, "Unable to read VPD\n"); 14114 return; 14115 } 14116 14117 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 14118 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 14119 if (pos < 0) 14120 goto read_sn; 14121 14122 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 14123 memcpy(bp->board_partno, &vpd_data[pos], size); 14124 14125 read_sn: 14126 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 14127 PCI_VPD_RO_KEYWORD_SERIALNO, 14128 &kw_len); 14129 if (pos < 0) 14130 goto exit; 14131 14132 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 14133 memcpy(bp->board_serialno, &vpd_data[pos], size); 14134 exit: 14135 kfree(vpd_data); 14136 } 14137 14138 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 14139 { 14140 struct pci_dev *pdev = bp->pdev; 14141 u64 qword; 14142 14143 qword = pci_get_dsn(pdev); 14144 if (!qword) { 14145 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 14146 return -EOPNOTSUPP; 14147 } 14148 14149 put_unaligned_le64(qword, dsn); 14150 14151 bp->flags |= BNXT_FLAG_DSN_VALID; 14152 return 0; 14153 } 14154 14155 static int bnxt_map_db_bar(struct bnxt *bp) 14156 { 14157 if (!bp->db_size) 14158 return -ENODEV; 14159 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 14160 if (!bp->bar1) 14161 return -ENOMEM; 14162 return 0; 14163 } 14164 14165 void bnxt_print_device_info(struct bnxt *bp) 14166 { 14167 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 14168 board_info[bp->board_idx].name, 14169 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 14170 14171 pcie_print_link_status(bp->pdev); 14172 } 14173 14174 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 14175 { 14176 struct net_device *dev; 14177 struct bnxt *bp; 14178 int rc, max_irqs; 14179 14180 if (pci_is_bridge(pdev)) 14181 return -ENODEV; 14182 14183 /* Clear any pending DMA transactions from crash kernel 14184 * while loading driver in capture kernel. 14185 */ 14186 if (is_kdump_kernel()) { 14187 pci_clear_master(pdev); 14188 pcie_flr(pdev); 14189 } 14190 14191 max_irqs = bnxt_get_max_irq(pdev); 14192 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 14193 max_irqs); 14194 if (!dev) 14195 return -ENOMEM; 14196 14197 bp = netdev_priv(dev); 14198 bp->board_idx = ent->driver_data; 14199 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 14200 bnxt_set_max_func_irqs(bp, max_irqs); 14201 14202 if (bnxt_vf_pciid(bp->board_idx)) 14203 bp->flags |= BNXT_FLAG_VF; 14204 14205 /* No devlink port registration in case of a VF */ 14206 if (BNXT_PF(bp)) 14207 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 14208 14209 if (pdev->msix_cap) 14210 bp->flags |= BNXT_FLAG_MSIX_CAP; 14211 14212 rc = bnxt_init_board(pdev, dev); 14213 if (rc < 0) 14214 goto init_err_free; 14215 14216 dev->netdev_ops = &bnxt_netdev_ops; 14217 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 14218 dev->ethtool_ops = &bnxt_ethtool_ops; 14219 pci_set_drvdata(pdev, dev); 14220 14221 rc = bnxt_alloc_hwrm_resources(bp); 14222 if (rc) 14223 goto init_err_pci_clean; 14224 14225 mutex_init(&bp->hwrm_cmd_lock); 14226 mutex_init(&bp->link_lock); 14227 14228 rc = bnxt_fw_init_one_p1(bp); 14229 if (rc) 14230 goto init_err_pci_clean; 14231 14232 if (BNXT_PF(bp)) 14233 bnxt_vpd_read_info(bp); 14234 14235 if (BNXT_CHIP_P5_PLUS(bp)) { 14236 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 14237 if (BNXT_CHIP_P7(bp)) 14238 bp->flags |= BNXT_FLAG_CHIP_P7; 14239 } 14240 14241 rc = bnxt_alloc_rss_indir_tbl(bp); 14242 if (rc) 14243 goto init_err_pci_clean; 14244 14245 rc = bnxt_fw_init_one_p2(bp); 14246 if (rc) 14247 goto init_err_pci_clean; 14248 14249 rc = bnxt_map_db_bar(bp); 14250 if (rc) { 14251 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 14252 rc); 14253 goto init_err_pci_clean; 14254 } 14255 14256 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 14257 NETIF_F_TSO | NETIF_F_TSO6 | 14258 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 14259 NETIF_F_GSO_IPXIP4 | 14260 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 14261 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 14262 NETIF_F_RXCSUM | NETIF_F_GRO; 14263 14264 if (BNXT_SUPPORTS_TPA(bp)) 14265 dev->hw_features |= NETIF_F_LRO; 14266 14267 dev->hw_enc_features = 14268 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 14269 NETIF_F_TSO | NETIF_F_TSO6 | 14270 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 14271 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 14272 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 14273 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 14274 14275 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 14276 NETIF_F_GSO_GRE_CSUM; 14277 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 14278 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 14279 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 14280 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 14281 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 14282 if (BNXT_SUPPORTS_TPA(bp)) 14283 dev->hw_features |= NETIF_F_GRO_HW; 14284 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 14285 if (dev->features & NETIF_F_GRO_HW) 14286 dev->features &= ~NETIF_F_LRO; 14287 dev->priv_flags |= IFF_UNICAST_FLT; 14288 14289 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 14290 14291 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 14292 NETDEV_XDP_ACT_RX_SG; 14293 14294 #ifdef CONFIG_BNXT_SRIOV 14295 init_waitqueue_head(&bp->sriov_cfg_wait); 14296 #endif 14297 if (BNXT_SUPPORTS_TPA(bp)) { 14298 bp->gro_func = bnxt_gro_func_5730x; 14299 if (BNXT_CHIP_P4(bp)) 14300 bp->gro_func = bnxt_gro_func_5731x; 14301 else if (BNXT_CHIP_P5_PLUS(bp)) 14302 bp->gro_func = bnxt_gro_func_5750x; 14303 } 14304 if (!BNXT_CHIP_P4_PLUS(bp)) 14305 bp->flags |= BNXT_FLAG_DOUBLE_DB; 14306 14307 rc = bnxt_init_mac_addr(bp); 14308 if (rc) { 14309 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 14310 rc = -EADDRNOTAVAIL; 14311 goto init_err_pci_clean; 14312 } 14313 14314 if (BNXT_PF(bp)) { 14315 /* Read the adapter's DSN to use as the eswitch switch_id */ 14316 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 14317 } 14318 14319 /* MTU range: 60 - FW defined max */ 14320 dev->min_mtu = ETH_ZLEN; 14321 dev->max_mtu = bp->max_mtu; 14322 14323 rc = bnxt_probe_phy(bp, true); 14324 if (rc) 14325 goto init_err_pci_clean; 14326 14327 bnxt_set_rx_skb_mode(bp, false); 14328 bnxt_set_tpa_flags(bp); 14329 bnxt_set_ring_params(bp); 14330 rc = bnxt_set_dflt_rings(bp, true); 14331 if (rc) { 14332 if (BNXT_VF(bp) && rc == -ENODEV) { 14333 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 14334 } else { 14335 netdev_err(bp->dev, "Not enough rings available.\n"); 14336 rc = -ENOMEM; 14337 } 14338 goto init_err_pci_clean; 14339 } 14340 14341 bnxt_fw_init_one_p3(bp); 14342 14343 bnxt_init_dflt_coal(bp); 14344 14345 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 14346 bp->flags |= BNXT_FLAG_STRIP_VLAN; 14347 14348 rc = bnxt_init_int_mode(bp); 14349 if (rc) 14350 goto init_err_pci_clean; 14351 14352 /* No TC has been set yet and rings may have been trimmed due to 14353 * limited MSIX, so we re-initialize the TX rings per TC. 14354 */ 14355 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14356 14357 if (BNXT_PF(bp)) { 14358 if (!bnxt_pf_wq) { 14359 bnxt_pf_wq = 14360 create_singlethread_workqueue("bnxt_pf_wq"); 14361 if (!bnxt_pf_wq) { 14362 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 14363 rc = -ENOMEM; 14364 goto init_err_pci_clean; 14365 } 14366 } 14367 rc = bnxt_init_tc(bp); 14368 if (rc) 14369 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 14370 rc); 14371 } 14372 14373 bnxt_inv_fw_health_reg(bp); 14374 rc = bnxt_dl_register(bp); 14375 if (rc) 14376 goto init_err_dl; 14377 14378 rc = register_netdev(dev); 14379 if (rc) 14380 goto init_err_cleanup; 14381 14382 bnxt_dl_fw_reporters_create(bp); 14383 14384 bnxt_rdma_aux_device_init(bp); 14385 14386 bnxt_print_device_info(bp); 14387 14388 pci_save_state(pdev); 14389 14390 return 0; 14391 init_err_cleanup: 14392 bnxt_dl_unregister(bp); 14393 init_err_dl: 14394 bnxt_shutdown_tc(bp); 14395 bnxt_clear_int_mode(bp); 14396 14397 init_err_pci_clean: 14398 bnxt_hwrm_func_drv_unrgtr(bp); 14399 bnxt_free_hwrm_resources(bp); 14400 bnxt_hwmon_uninit(bp); 14401 bnxt_ethtool_free(bp); 14402 bnxt_ptp_clear(bp); 14403 kfree(bp->ptp_cfg); 14404 bp->ptp_cfg = NULL; 14405 kfree(bp->fw_health); 14406 bp->fw_health = NULL; 14407 bnxt_cleanup_pci(bp); 14408 bnxt_free_ctx_mem(bp); 14409 kfree(bp->rss_indir_tbl); 14410 bp->rss_indir_tbl = NULL; 14411 14412 init_err_free: 14413 free_netdev(dev); 14414 return rc; 14415 } 14416 14417 static void bnxt_shutdown(struct pci_dev *pdev) 14418 { 14419 struct net_device *dev = pci_get_drvdata(pdev); 14420 struct bnxt *bp; 14421 14422 if (!dev) 14423 return; 14424 14425 rtnl_lock(); 14426 bp = netdev_priv(dev); 14427 if (!bp) 14428 goto shutdown_exit; 14429 14430 if (netif_running(dev)) 14431 dev_close(dev); 14432 14433 bnxt_clear_int_mode(bp); 14434 pci_disable_device(pdev); 14435 14436 if (system_state == SYSTEM_POWER_OFF) { 14437 pci_wake_from_d3(pdev, bp->wol); 14438 pci_set_power_state(pdev, PCI_D3hot); 14439 } 14440 14441 shutdown_exit: 14442 rtnl_unlock(); 14443 } 14444 14445 #ifdef CONFIG_PM_SLEEP 14446 static int bnxt_suspend(struct device *device) 14447 { 14448 struct net_device *dev = dev_get_drvdata(device); 14449 struct bnxt *bp = netdev_priv(dev); 14450 int rc = 0; 14451 14452 rtnl_lock(); 14453 bnxt_ulp_stop(bp); 14454 if (netif_running(dev)) { 14455 netif_device_detach(dev); 14456 rc = bnxt_close(dev); 14457 } 14458 bnxt_hwrm_func_drv_unrgtr(bp); 14459 pci_disable_device(bp->pdev); 14460 bnxt_free_ctx_mem(bp); 14461 rtnl_unlock(); 14462 return rc; 14463 } 14464 14465 static int bnxt_resume(struct device *device) 14466 { 14467 struct net_device *dev = dev_get_drvdata(device); 14468 struct bnxt *bp = netdev_priv(dev); 14469 int rc = 0; 14470 14471 rtnl_lock(); 14472 rc = pci_enable_device(bp->pdev); 14473 if (rc) { 14474 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 14475 rc); 14476 goto resume_exit; 14477 } 14478 pci_set_master(bp->pdev); 14479 if (bnxt_hwrm_ver_get(bp)) { 14480 rc = -ENODEV; 14481 goto resume_exit; 14482 } 14483 rc = bnxt_hwrm_func_reset(bp); 14484 if (rc) { 14485 rc = -EBUSY; 14486 goto resume_exit; 14487 } 14488 14489 rc = bnxt_hwrm_func_qcaps(bp); 14490 if (rc) 14491 goto resume_exit; 14492 14493 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 14494 rc = -ENODEV; 14495 goto resume_exit; 14496 } 14497 14498 bnxt_get_wol_settings(bp); 14499 if (netif_running(dev)) { 14500 rc = bnxt_open(dev); 14501 if (!rc) 14502 netif_device_attach(dev); 14503 } 14504 14505 resume_exit: 14506 bnxt_ulp_start(bp, rc); 14507 if (!rc) 14508 bnxt_reenable_sriov(bp); 14509 rtnl_unlock(); 14510 return rc; 14511 } 14512 14513 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 14514 #define BNXT_PM_OPS (&bnxt_pm_ops) 14515 14516 #else 14517 14518 #define BNXT_PM_OPS NULL 14519 14520 #endif /* CONFIG_PM_SLEEP */ 14521 14522 /** 14523 * bnxt_io_error_detected - called when PCI error is detected 14524 * @pdev: Pointer to PCI device 14525 * @state: The current pci connection state 14526 * 14527 * This function is called after a PCI bus error affecting 14528 * this device has been detected. 14529 */ 14530 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 14531 pci_channel_state_t state) 14532 { 14533 struct net_device *netdev = pci_get_drvdata(pdev); 14534 struct bnxt *bp = netdev_priv(netdev); 14535 14536 netdev_info(netdev, "PCI I/O error detected\n"); 14537 14538 rtnl_lock(); 14539 netif_device_detach(netdev); 14540 14541 bnxt_ulp_stop(bp); 14542 14543 if (state == pci_channel_io_perm_failure) { 14544 rtnl_unlock(); 14545 return PCI_ERS_RESULT_DISCONNECT; 14546 } 14547 14548 if (state == pci_channel_io_frozen) 14549 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 14550 14551 if (netif_running(netdev)) 14552 bnxt_close(netdev); 14553 14554 if (pci_is_enabled(pdev)) 14555 pci_disable_device(pdev); 14556 bnxt_free_ctx_mem(bp); 14557 rtnl_unlock(); 14558 14559 /* Request a slot slot reset. */ 14560 return PCI_ERS_RESULT_NEED_RESET; 14561 } 14562 14563 /** 14564 * bnxt_io_slot_reset - called after the pci bus has been reset. 14565 * @pdev: Pointer to PCI device 14566 * 14567 * Restart the card from scratch, as if from a cold-boot. 14568 * At this point, the card has exprienced a hard reset, 14569 * followed by fixups by BIOS, and has its config space 14570 * set up identically to what it was at cold boot. 14571 */ 14572 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 14573 { 14574 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 14575 struct net_device *netdev = pci_get_drvdata(pdev); 14576 struct bnxt *bp = netdev_priv(netdev); 14577 int retry = 0; 14578 int err = 0; 14579 int off; 14580 14581 netdev_info(bp->dev, "PCI Slot Reset\n"); 14582 14583 rtnl_lock(); 14584 14585 if (pci_enable_device(pdev)) { 14586 dev_err(&pdev->dev, 14587 "Cannot re-enable PCI device after reset.\n"); 14588 } else { 14589 pci_set_master(pdev); 14590 /* Upon fatal error, our device internal logic that latches to 14591 * BAR value is getting reset and will restore only upon 14592 * rewritting the BARs. 14593 * 14594 * As pci_restore_state() does not re-write the BARs if the 14595 * value is same as saved value earlier, driver needs to 14596 * write the BARs to 0 to force restore, in case of fatal error. 14597 */ 14598 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 14599 &bp->state)) { 14600 for (off = PCI_BASE_ADDRESS_0; 14601 off <= PCI_BASE_ADDRESS_5; off += 4) 14602 pci_write_config_dword(bp->pdev, off, 0); 14603 } 14604 pci_restore_state(pdev); 14605 pci_save_state(pdev); 14606 14607 bnxt_inv_fw_health_reg(bp); 14608 bnxt_try_map_fw_health_reg(bp); 14609 14610 /* In some PCIe AER scenarios, firmware may take up to 14611 * 10 seconds to become ready in the worst case. 14612 */ 14613 do { 14614 err = bnxt_try_recover_fw(bp); 14615 if (!err) 14616 break; 14617 retry++; 14618 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 14619 14620 if (err) { 14621 dev_err(&pdev->dev, "Firmware not ready\n"); 14622 goto reset_exit; 14623 } 14624 14625 err = bnxt_hwrm_func_reset(bp); 14626 if (!err) 14627 result = PCI_ERS_RESULT_RECOVERED; 14628 14629 bnxt_ulp_irq_stop(bp); 14630 bnxt_clear_int_mode(bp); 14631 err = bnxt_init_int_mode(bp); 14632 bnxt_ulp_irq_restart(bp, err); 14633 } 14634 14635 reset_exit: 14636 bnxt_clear_reservations(bp, true); 14637 rtnl_unlock(); 14638 14639 return result; 14640 } 14641 14642 /** 14643 * bnxt_io_resume - called when traffic can start flowing again. 14644 * @pdev: Pointer to PCI device 14645 * 14646 * This callback is called when the error recovery driver tells 14647 * us that its OK to resume normal operation. 14648 */ 14649 static void bnxt_io_resume(struct pci_dev *pdev) 14650 { 14651 struct net_device *netdev = pci_get_drvdata(pdev); 14652 struct bnxt *bp = netdev_priv(netdev); 14653 int err; 14654 14655 netdev_info(bp->dev, "PCI Slot Resume\n"); 14656 rtnl_lock(); 14657 14658 err = bnxt_hwrm_func_qcaps(bp); 14659 if (!err && netif_running(netdev)) 14660 err = bnxt_open(netdev); 14661 14662 bnxt_ulp_start(bp, err); 14663 if (!err) { 14664 bnxt_reenable_sriov(bp); 14665 netif_device_attach(netdev); 14666 } 14667 14668 rtnl_unlock(); 14669 } 14670 14671 static const struct pci_error_handlers bnxt_err_handler = { 14672 .error_detected = bnxt_io_error_detected, 14673 .slot_reset = bnxt_io_slot_reset, 14674 .resume = bnxt_io_resume 14675 }; 14676 14677 static struct pci_driver bnxt_pci_driver = { 14678 .name = DRV_MODULE_NAME, 14679 .id_table = bnxt_pci_tbl, 14680 .probe = bnxt_init_one, 14681 .remove = bnxt_remove_one, 14682 .shutdown = bnxt_shutdown, 14683 .driver.pm = BNXT_PM_OPS, 14684 .err_handler = &bnxt_err_handler, 14685 #if defined(CONFIG_BNXT_SRIOV) 14686 .sriov_configure = bnxt_sriov_configure, 14687 #endif 14688 }; 14689 14690 static int __init bnxt_init(void) 14691 { 14692 int err; 14693 14694 bnxt_debug_init(); 14695 err = pci_register_driver(&bnxt_pci_driver); 14696 if (err) { 14697 bnxt_debug_exit(); 14698 return err; 14699 } 14700 14701 return 0; 14702 } 14703 14704 static void __exit bnxt_exit(void) 14705 { 14706 pci_unregister_driver(&bnxt_pci_driver); 14707 if (bnxt_pf_wq) 14708 destroy_workqueue(bnxt_pf_wq); 14709 bnxt_debug_exit(); 14710 } 14711 14712 module_init(bnxt_init); 14713 module_exit(bnxt_exit); 14714