1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_queues.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_hwrm.h" 62 #include "bnxt_ulp.h" 63 #include "bnxt_sriov.h" 64 #include "bnxt_ethtool.h" 65 #include "bnxt_dcb.h" 66 #include "bnxt_xdp.h" 67 #include "bnxt_ptp.h" 68 #include "bnxt_vfr.h" 69 #include "bnxt_tc.h" 70 #include "bnxt_devlink.h" 71 #include "bnxt_debugfs.h" 72 #include "bnxt_hwmon.h" 73 74 #define BNXT_TX_TIMEOUT (5 * HZ) 75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 76 NETIF_MSG_TX_ERR) 77 78 MODULE_LICENSE("GPL"); 79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 80 81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 83 #define BNXT_RX_COPY_THRESH 256 84 85 #define BNXT_TX_PUSH_THRESH 164 86 87 /* indexed by enum board_idx */ 88 static const struct { 89 char *name; 90 } board_info[] = { 91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 123 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 124 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 125 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 126 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 127 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 128 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 129 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 130 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 131 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 132 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 133 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 134 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 135 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 136 }; 137 138 static const struct pci_device_id bnxt_pci_tbl[] = { 139 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 140 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 141 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 142 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 143 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 144 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 145 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 146 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 147 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 148 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 149 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 150 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 151 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 152 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 153 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 154 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 155 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 156 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 157 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 158 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 159 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 160 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 161 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 162 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 163 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 164 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 165 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 166 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 167 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 168 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 170 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 171 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 172 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 173 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 174 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 175 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 176 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 177 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 179 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 180 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 182 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 183 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 184 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 185 #ifdef CONFIG_BNXT_SRIOV 186 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 187 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 188 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 189 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 190 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 191 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 192 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 193 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 194 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 195 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 196 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 197 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 198 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 199 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 201 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 202 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 203 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 204 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 205 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 206 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 207 #endif 208 { 0 } 209 }; 210 211 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 212 213 static const u16 bnxt_vf_req_snif[] = { 214 HWRM_FUNC_CFG, 215 HWRM_FUNC_VF_CFG, 216 HWRM_PORT_PHY_QCFG, 217 HWRM_CFA_L2_FILTER_ALLOC, 218 }; 219 220 static const u16 bnxt_async_events_arr[] = { 221 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 222 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 223 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 224 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 225 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 226 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 227 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 228 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 229 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 230 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 231 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 232 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 233 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 234 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 235 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 236 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 237 }; 238 239 static struct workqueue_struct *bnxt_pf_wq; 240 241 static bool bnxt_vf_pciid(enum board_idx idx) 242 { 243 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 244 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 245 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 246 idx == NETXTREME_E_P5_VF_HV); 247 } 248 249 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 250 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 251 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 252 253 #define BNXT_CP_DB_IRQ_DIS(db) \ 254 writel(DB_CP_IRQ_DIS_FLAGS, db) 255 256 #define BNXT_DB_CQ(db, idx) \ 257 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 258 259 #define BNXT_DB_NQ_P5(db, idx) \ 260 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 261 (db)->doorbell) 262 263 #define BNXT_DB_CQ_ARM(db, idx) \ 264 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 265 266 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 267 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 268 DB_RING_IDX(db, idx), (db)->doorbell) 269 270 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 271 { 272 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 273 BNXT_DB_NQ_P5(db, idx); 274 else 275 BNXT_DB_CQ(db, idx); 276 } 277 278 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 279 { 280 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 281 BNXT_DB_NQ_ARM_P5(db, idx); 282 else 283 BNXT_DB_CQ_ARM(db, idx); 284 } 285 286 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 287 { 288 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 289 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 290 DB_RING_IDX(db, idx), db->doorbell); 291 else 292 BNXT_DB_CQ(db, idx); 293 } 294 295 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 296 { 297 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 298 return; 299 300 if (BNXT_PF(bp)) 301 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 302 else 303 schedule_delayed_work(&bp->fw_reset_task, delay); 304 } 305 306 static void __bnxt_queue_sp_work(struct bnxt *bp) 307 { 308 if (BNXT_PF(bp)) 309 queue_work(bnxt_pf_wq, &bp->sp_task); 310 else 311 schedule_work(&bp->sp_task); 312 } 313 314 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 315 { 316 set_bit(event, &bp->sp_event); 317 __bnxt_queue_sp_work(bp); 318 } 319 320 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 321 { 322 if (!rxr->bnapi->in_reset) { 323 rxr->bnapi->in_reset = true; 324 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 325 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 326 else 327 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 328 __bnxt_queue_sp_work(bp); 329 } 330 rxr->rx_next_cons = 0xffff; 331 } 332 333 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 334 u16 curr) 335 { 336 struct bnxt_napi *bnapi = txr->bnapi; 337 338 if (bnapi->tx_fault) 339 return; 340 341 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 342 txr->txq_index, txr->tx_hw_cons, 343 txr->tx_cons, txr->tx_prod, curr); 344 WARN_ON_ONCE(1); 345 bnapi->tx_fault = 1; 346 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 347 } 348 349 const u16 bnxt_lhint_arr[] = { 350 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 351 TX_BD_FLAGS_LHINT_512_TO_1023, 352 TX_BD_FLAGS_LHINT_1024_TO_2047, 353 TX_BD_FLAGS_LHINT_1024_TO_2047, 354 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 355 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 356 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 357 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 358 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 359 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 360 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 361 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 362 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 363 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 364 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 365 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 366 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 367 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 368 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 369 }; 370 371 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 372 { 373 struct metadata_dst *md_dst = skb_metadata_dst(skb); 374 375 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 376 return 0; 377 378 return md_dst->u.port_info.port_id; 379 } 380 381 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 382 u16 prod) 383 { 384 /* Sync BD data before updating doorbell */ 385 wmb(); 386 bnxt_db_write(bp, &txr->tx_db, prod); 387 txr->kick_pending = 0; 388 } 389 390 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 391 { 392 struct bnxt *bp = netdev_priv(dev); 393 struct tx_bd *txbd, *txbd0; 394 struct tx_bd_ext *txbd1; 395 struct netdev_queue *txq; 396 int i; 397 dma_addr_t mapping; 398 unsigned int length, pad = 0; 399 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 400 u16 prod, last_frag; 401 struct pci_dev *pdev = bp->pdev; 402 struct bnxt_tx_ring_info *txr; 403 struct bnxt_sw_tx_bd *tx_buf; 404 __le32 lflags = 0; 405 406 i = skb_get_queue_mapping(skb); 407 if (unlikely(i >= bp->tx_nr_rings)) { 408 dev_kfree_skb_any(skb); 409 dev_core_stats_tx_dropped_inc(dev); 410 return NETDEV_TX_OK; 411 } 412 413 txq = netdev_get_tx_queue(dev, i); 414 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 415 prod = txr->tx_prod; 416 417 free_size = bnxt_tx_avail(bp, txr); 418 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 419 /* We must have raced with NAPI cleanup */ 420 if (net_ratelimit() && txr->kick_pending) 421 netif_warn(bp, tx_err, dev, 422 "bnxt: ring busy w/ flush pending!\n"); 423 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 424 bp->tx_wake_thresh)) 425 return NETDEV_TX_BUSY; 426 } 427 428 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 429 goto tx_free; 430 431 length = skb->len; 432 len = skb_headlen(skb); 433 last_frag = skb_shinfo(skb)->nr_frags; 434 435 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 436 437 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 438 tx_buf->skb = skb; 439 tx_buf->nr_frags = last_frag; 440 441 vlan_tag_flags = 0; 442 cfa_action = bnxt_xmit_get_cfa_action(skb); 443 if (skb_vlan_tag_present(skb)) { 444 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 445 skb_vlan_tag_get(skb); 446 /* Currently supports 8021Q, 8021AD vlan offloads 447 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 448 */ 449 if (skb->vlan_proto == htons(ETH_P_8021Q)) 450 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 451 } 452 453 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 454 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 455 456 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && 457 atomic_dec_if_positive(&ptp->tx_avail) >= 0) { 458 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, 459 &ptp->tx_hdr_off)) { 460 if (vlan_tag_flags) 461 ptp->tx_hdr_off += VLAN_HLEN; 462 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 463 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 464 } else { 465 atomic_inc(&bp->ptp_cfg->tx_avail); 466 } 467 } 468 } 469 470 if (unlikely(skb->no_fcs)) 471 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 472 473 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 474 !lflags) { 475 struct tx_push_buffer *tx_push_buf = txr->tx_push; 476 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 477 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 478 void __iomem *db = txr->tx_db.doorbell; 479 void *pdata = tx_push_buf->data; 480 u64 *end; 481 int j, push_len; 482 483 /* Set COAL_NOW to be ready quickly for the next push */ 484 tx_push->tx_bd_len_flags_type = 485 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 486 TX_BD_TYPE_LONG_TX_BD | 487 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 488 TX_BD_FLAGS_COAL_NOW | 489 TX_BD_FLAGS_PACKET_END | 490 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 491 492 if (skb->ip_summed == CHECKSUM_PARTIAL) 493 tx_push1->tx_bd_hsize_lflags = 494 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 495 else 496 tx_push1->tx_bd_hsize_lflags = 0; 497 498 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 499 tx_push1->tx_bd_cfa_action = 500 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 501 502 end = pdata + length; 503 end = PTR_ALIGN(end, 8) - 1; 504 *end = 0; 505 506 skb_copy_from_linear_data(skb, pdata, len); 507 pdata += len; 508 for (j = 0; j < last_frag; j++) { 509 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 510 void *fptr; 511 512 fptr = skb_frag_address_safe(frag); 513 if (!fptr) 514 goto normal_tx; 515 516 memcpy(pdata, fptr, skb_frag_size(frag)); 517 pdata += skb_frag_size(frag); 518 } 519 520 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 521 txbd->tx_bd_haddr = txr->data_mapping; 522 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 523 prod = NEXT_TX(prod); 524 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 525 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 526 memcpy(txbd, tx_push1, sizeof(*txbd)); 527 prod = NEXT_TX(prod); 528 tx_push->doorbell = 529 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 530 DB_RING_IDX(&txr->tx_db, prod)); 531 WRITE_ONCE(txr->tx_prod, prod); 532 533 tx_buf->is_push = 1; 534 netdev_tx_sent_queue(txq, skb->len); 535 wmb(); /* Sync is_push and byte queue before pushing data */ 536 537 push_len = (length + sizeof(*tx_push) + 7) / 8; 538 if (push_len > 16) { 539 __iowrite64_copy(db, tx_push_buf, 16); 540 __iowrite32_copy(db + 4, tx_push_buf + 1, 541 (push_len - 16) << 1); 542 } else { 543 __iowrite64_copy(db, tx_push_buf, push_len); 544 } 545 546 goto tx_done; 547 } 548 549 normal_tx: 550 if (length < BNXT_MIN_PKT_SIZE) { 551 pad = BNXT_MIN_PKT_SIZE - length; 552 if (skb_pad(skb, pad)) 553 /* SKB already freed. */ 554 goto tx_kick_pending; 555 length = BNXT_MIN_PKT_SIZE; 556 } 557 558 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 559 560 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 561 goto tx_free; 562 563 dma_unmap_addr_set(tx_buf, mapping, mapping); 564 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 565 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 566 567 txbd->tx_bd_haddr = cpu_to_le64(mapping); 568 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 569 570 prod = NEXT_TX(prod); 571 txbd1 = (struct tx_bd_ext *) 572 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 573 574 txbd1->tx_bd_hsize_lflags = lflags; 575 if (skb_is_gso(skb)) { 576 u32 hdr_len; 577 578 if (skb->encapsulation) 579 hdr_len = skb_inner_tcp_all_headers(skb); 580 else 581 hdr_len = skb_tcp_all_headers(skb); 582 583 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 584 TX_BD_FLAGS_T_IPID | 585 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 586 length = skb_shinfo(skb)->gso_size; 587 txbd1->tx_bd_mss = cpu_to_le32(length); 588 length += hdr_len; 589 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 590 txbd1->tx_bd_hsize_lflags |= 591 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 592 txbd1->tx_bd_mss = 0; 593 } 594 595 length >>= 9; 596 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 597 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 598 skb->len); 599 i = 0; 600 goto tx_dma_error; 601 } 602 flags |= bnxt_lhint_arr[length]; 603 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 604 605 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 606 txbd1->tx_bd_cfa_action = 607 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 608 txbd0 = txbd; 609 for (i = 0; i < last_frag; i++) { 610 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 611 612 prod = NEXT_TX(prod); 613 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 614 615 len = skb_frag_size(frag); 616 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 617 DMA_TO_DEVICE); 618 619 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 620 goto tx_dma_error; 621 622 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 623 dma_unmap_addr_set(tx_buf, mapping, mapping); 624 625 txbd->tx_bd_haddr = cpu_to_le64(mapping); 626 627 flags = len << TX_BD_LEN_SHIFT; 628 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 629 } 630 631 flags &= ~TX_BD_LEN; 632 txbd->tx_bd_len_flags_type = 633 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 634 TX_BD_FLAGS_PACKET_END); 635 636 netdev_tx_sent_queue(txq, skb->len); 637 638 skb_tx_timestamp(skb); 639 640 prod = NEXT_TX(prod); 641 WRITE_ONCE(txr->tx_prod, prod); 642 643 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 644 bnxt_txr_db_kick(bp, txr, prod); 645 } else { 646 if (free_size >= bp->tx_wake_thresh) 647 txbd0->tx_bd_len_flags_type |= 648 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 649 txr->kick_pending = 1; 650 } 651 652 tx_done: 653 654 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 655 if (netdev_xmit_more() && !tx_buf->is_push) 656 bnxt_txr_db_kick(bp, txr, prod); 657 658 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 659 bp->tx_wake_thresh); 660 } 661 return NETDEV_TX_OK; 662 663 tx_dma_error: 664 if (BNXT_TX_PTP_IS_SET(lflags)) 665 atomic_inc(&bp->ptp_cfg->tx_avail); 666 667 last_frag = i; 668 669 /* start back at beginning and unmap skb */ 670 prod = txr->tx_prod; 671 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 672 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 673 skb_headlen(skb), DMA_TO_DEVICE); 674 prod = NEXT_TX(prod); 675 676 /* unmap remaining mapped pages */ 677 for (i = 0; i < last_frag; i++) { 678 prod = NEXT_TX(prod); 679 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 680 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 681 skb_frag_size(&skb_shinfo(skb)->frags[i]), 682 DMA_TO_DEVICE); 683 } 684 685 tx_free: 686 dev_kfree_skb_any(skb); 687 tx_kick_pending: 688 if (txr->kick_pending) 689 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 690 txr->tx_buf_ring[txr->tx_prod].skb = NULL; 691 dev_core_stats_tx_dropped_inc(dev); 692 return NETDEV_TX_OK; 693 } 694 695 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 696 int budget) 697 { 698 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 699 struct pci_dev *pdev = bp->pdev; 700 u16 hw_cons = txr->tx_hw_cons; 701 unsigned int tx_bytes = 0; 702 u16 cons = txr->tx_cons; 703 int tx_pkts = 0; 704 705 while (RING_TX(bp, cons) != hw_cons) { 706 struct bnxt_sw_tx_bd *tx_buf; 707 struct sk_buff *skb; 708 int j, last; 709 710 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 711 cons = NEXT_TX(cons); 712 skb = tx_buf->skb; 713 tx_buf->skb = NULL; 714 715 if (unlikely(!skb)) { 716 bnxt_sched_reset_txr(bp, txr, cons); 717 return; 718 } 719 720 tx_pkts++; 721 tx_bytes += skb->len; 722 723 if (tx_buf->is_push) { 724 tx_buf->is_push = 0; 725 goto next_tx_int; 726 } 727 728 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 729 skb_headlen(skb), DMA_TO_DEVICE); 730 last = tx_buf->nr_frags; 731 732 for (j = 0; j < last; j++) { 733 cons = NEXT_TX(cons); 734 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 735 dma_unmap_page( 736 &pdev->dev, 737 dma_unmap_addr(tx_buf, mapping), 738 skb_frag_size(&skb_shinfo(skb)->frags[j]), 739 DMA_TO_DEVICE); 740 } 741 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 742 if (BNXT_CHIP_P5(bp)) { 743 /* PTP worker takes ownership of the skb */ 744 if (!bnxt_get_tx_ts_p5(bp, skb)) 745 skb = NULL; 746 else 747 atomic_inc(&bp->ptp_cfg->tx_avail); 748 } 749 } 750 751 next_tx_int: 752 cons = NEXT_TX(cons); 753 754 dev_consume_skb_any(skb); 755 } 756 757 WRITE_ONCE(txr->tx_cons, cons); 758 759 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 760 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 761 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 762 } 763 764 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 765 { 766 struct bnxt_tx_ring_info *txr; 767 int i; 768 769 bnxt_for_each_napi_tx(i, bnapi, txr) { 770 if (txr->tx_hw_cons != txr->tx_cons) 771 __bnxt_tx_int(bp, txr, budget); 772 } 773 bnapi->events &= ~BNXT_TX_CMP_EVENT; 774 } 775 776 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 777 struct bnxt_rx_ring_info *rxr, 778 unsigned int *offset, 779 gfp_t gfp) 780 { 781 struct page *page; 782 783 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 784 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 785 BNXT_RX_PAGE_SIZE); 786 } else { 787 page = page_pool_dev_alloc_pages(rxr->page_pool); 788 *offset = 0; 789 } 790 if (!page) 791 return NULL; 792 793 *mapping = page_pool_get_dma_addr(page) + *offset; 794 return page; 795 } 796 797 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 798 gfp_t gfp) 799 { 800 u8 *data; 801 struct pci_dev *pdev = bp->pdev; 802 803 if (gfp == GFP_ATOMIC) 804 data = napi_alloc_frag(bp->rx_buf_size); 805 else 806 data = netdev_alloc_frag(bp->rx_buf_size); 807 if (!data) 808 return NULL; 809 810 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 811 bp->rx_buf_use_size, bp->rx_dir, 812 DMA_ATTR_WEAK_ORDERING); 813 814 if (dma_mapping_error(&pdev->dev, *mapping)) { 815 skb_free_frag(data); 816 data = NULL; 817 } 818 return data; 819 } 820 821 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 822 u16 prod, gfp_t gfp) 823 { 824 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 825 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 826 dma_addr_t mapping; 827 828 if (BNXT_RX_PAGE_MODE(bp)) { 829 unsigned int offset; 830 struct page *page = 831 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 832 833 if (!page) 834 return -ENOMEM; 835 836 mapping += bp->rx_dma_offset; 837 rx_buf->data = page; 838 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 839 } else { 840 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); 841 842 if (!data) 843 return -ENOMEM; 844 845 rx_buf->data = data; 846 rx_buf->data_ptr = data + bp->rx_offset; 847 } 848 rx_buf->mapping = mapping; 849 850 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 851 return 0; 852 } 853 854 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 855 { 856 u16 prod = rxr->rx_prod; 857 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 858 struct bnxt *bp = rxr->bnapi->bp; 859 struct rx_bd *cons_bd, *prod_bd; 860 861 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 862 cons_rx_buf = &rxr->rx_buf_ring[cons]; 863 864 prod_rx_buf->data = data; 865 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 866 867 prod_rx_buf->mapping = cons_rx_buf->mapping; 868 869 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 870 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 871 872 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 873 } 874 875 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 876 { 877 u16 next, max = rxr->rx_agg_bmap_size; 878 879 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 880 if (next >= max) 881 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 882 return next; 883 } 884 885 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 886 struct bnxt_rx_ring_info *rxr, 887 u16 prod, gfp_t gfp) 888 { 889 struct rx_bd *rxbd = 890 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 891 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 892 struct page *page; 893 dma_addr_t mapping; 894 u16 sw_prod = rxr->rx_sw_agg_prod; 895 unsigned int offset = 0; 896 897 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 898 899 if (!page) 900 return -ENOMEM; 901 902 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 903 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 904 905 __set_bit(sw_prod, rxr->rx_agg_bmap); 906 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 907 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 908 909 rx_agg_buf->page = page; 910 rx_agg_buf->offset = offset; 911 rx_agg_buf->mapping = mapping; 912 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 913 rxbd->rx_bd_opaque = sw_prod; 914 return 0; 915 } 916 917 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 918 struct bnxt_cp_ring_info *cpr, 919 u16 cp_cons, u16 curr) 920 { 921 struct rx_agg_cmp *agg; 922 923 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 924 agg = (struct rx_agg_cmp *) 925 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 926 return agg; 927 } 928 929 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 930 struct bnxt_rx_ring_info *rxr, 931 u16 agg_id, u16 curr) 932 { 933 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 934 935 return &tpa_info->agg_arr[curr]; 936 } 937 938 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 939 u16 start, u32 agg_bufs, bool tpa) 940 { 941 struct bnxt_napi *bnapi = cpr->bnapi; 942 struct bnxt *bp = bnapi->bp; 943 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 944 u16 prod = rxr->rx_agg_prod; 945 u16 sw_prod = rxr->rx_sw_agg_prod; 946 bool p5_tpa = false; 947 u32 i; 948 949 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 950 p5_tpa = true; 951 952 for (i = 0; i < agg_bufs; i++) { 953 u16 cons; 954 struct rx_agg_cmp *agg; 955 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 956 struct rx_bd *prod_bd; 957 struct page *page; 958 959 if (p5_tpa) 960 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 961 else 962 agg = bnxt_get_agg(bp, cpr, idx, start + i); 963 cons = agg->rx_agg_cmp_opaque; 964 __clear_bit(cons, rxr->rx_agg_bmap); 965 966 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 967 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 968 969 __set_bit(sw_prod, rxr->rx_agg_bmap); 970 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 971 cons_rx_buf = &rxr->rx_agg_ring[cons]; 972 973 /* It is possible for sw_prod to be equal to cons, so 974 * set cons_rx_buf->page to NULL first. 975 */ 976 page = cons_rx_buf->page; 977 cons_rx_buf->page = NULL; 978 prod_rx_buf->page = page; 979 prod_rx_buf->offset = cons_rx_buf->offset; 980 981 prod_rx_buf->mapping = cons_rx_buf->mapping; 982 983 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 984 985 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 986 prod_bd->rx_bd_opaque = sw_prod; 987 988 prod = NEXT_RX_AGG(prod); 989 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 990 } 991 rxr->rx_agg_prod = prod; 992 rxr->rx_sw_agg_prod = sw_prod; 993 } 994 995 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 996 struct bnxt_rx_ring_info *rxr, 997 u16 cons, void *data, u8 *data_ptr, 998 dma_addr_t dma_addr, 999 unsigned int offset_and_len) 1000 { 1001 unsigned int len = offset_and_len & 0xffff; 1002 struct page *page = data; 1003 u16 prod = rxr->rx_prod; 1004 struct sk_buff *skb; 1005 int err; 1006 1007 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1008 if (unlikely(err)) { 1009 bnxt_reuse_rx_data(rxr, cons, data); 1010 return NULL; 1011 } 1012 dma_addr -= bp->rx_dma_offset; 1013 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1014 bp->rx_dir); 1015 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1016 if (!skb) { 1017 page_pool_recycle_direct(rxr->page_pool, page); 1018 return NULL; 1019 } 1020 skb_mark_for_recycle(skb); 1021 skb_reserve(skb, bp->rx_offset); 1022 __skb_put(skb, len); 1023 1024 return skb; 1025 } 1026 1027 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1028 struct bnxt_rx_ring_info *rxr, 1029 u16 cons, void *data, u8 *data_ptr, 1030 dma_addr_t dma_addr, 1031 unsigned int offset_and_len) 1032 { 1033 unsigned int payload = offset_and_len >> 16; 1034 unsigned int len = offset_and_len & 0xffff; 1035 skb_frag_t *frag; 1036 struct page *page = data; 1037 u16 prod = rxr->rx_prod; 1038 struct sk_buff *skb; 1039 int off, err; 1040 1041 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1042 if (unlikely(err)) { 1043 bnxt_reuse_rx_data(rxr, cons, data); 1044 return NULL; 1045 } 1046 dma_addr -= bp->rx_dma_offset; 1047 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1048 bp->rx_dir); 1049 1050 if (unlikely(!payload)) 1051 payload = eth_get_headlen(bp->dev, data_ptr, len); 1052 1053 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1054 if (!skb) { 1055 page_pool_recycle_direct(rxr->page_pool, page); 1056 return NULL; 1057 } 1058 1059 skb_mark_for_recycle(skb); 1060 off = (void *)data_ptr - page_address(page); 1061 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1062 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1063 payload + NET_IP_ALIGN); 1064 1065 frag = &skb_shinfo(skb)->frags[0]; 1066 skb_frag_size_sub(frag, payload); 1067 skb_frag_off_add(frag, payload); 1068 skb->data_len -= payload; 1069 skb->tail += payload; 1070 1071 return skb; 1072 } 1073 1074 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1075 struct bnxt_rx_ring_info *rxr, u16 cons, 1076 void *data, u8 *data_ptr, 1077 dma_addr_t dma_addr, 1078 unsigned int offset_and_len) 1079 { 1080 u16 prod = rxr->rx_prod; 1081 struct sk_buff *skb; 1082 int err; 1083 1084 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1085 if (unlikely(err)) { 1086 bnxt_reuse_rx_data(rxr, cons, data); 1087 return NULL; 1088 } 1089 1090 skb = napi_build_skb(data, bp->rx_buf_size); 1091 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1092 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 1093 if (!skb) { 1094 skb_free_frag(data); 1095 return NULL; 1096 } 1097 1098 skb_reserve(skb, bp->rx_offset); 1099 skb_put(skb, offset_and_len & 0xffff); 1100 return skb; 1101 } 1102 1103 static u32 __bnxt_rx_agg_pages(struct bnxt *bp, 1104 struct bnxt_cp_ring_info *cpr, 1105 struct skb_shared_info *shinfo, 1106 u16 idx, u32 agg_bufs, bool tpa, 1107 struct xdp_buff *xdp) 1108 { 1109 struct bnxt_napi *bnapi = cpr->bnapi; 1110 struct pci_dev *pdev = bp->pdev; 1111 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1112 u16 prod = rxr->rx_agg_prod; 1113 u32 i, total_frag_len = 0; 1114 bool p5_tpa = false; 1115 1116 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1117 p5_tpa = true; 1118 1119 for (i = 0; i < agg_bufs; i++) { 1120 skb_frag_t *frag = &shinfo->frags[i]; 1121 u16 cons, frag_len; 1122 struct rx_agg_cmp *agg; 1123 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1124 struct page *page; 1125 dma_addr_t mapping; 1126 1127 if (p5_tpa) 1128 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1129 else 1130 agg = bnxt_get_agg(bp, cpr, idx, i); 1131 cons = agg->rx_agg_cmp_opaque; 1132 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1133 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1134 1135 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1136 skb_frag_fill_page_desc(frag, cons_rx_buf->page, 1137 cons_rx_buf->offset, frag_len); 1138 shinfo->nr_frags = i + 1; 1139 __clear_bit(cons, rxr->rx_agg_bmap); 1140 1141 /* It is possible for bnxt_alloc_rx_page() to allocate 1142 * a sw_prod index that equals the cons index, so we 1143 * need to clear the cons entry now. 1144 */ 1145 mapping = cons_rx_buf->mapping; 1146 page = cons_rx_buf->page; 1147 cons_rx_buf->page = NULL; 1148 1149 if (xdp && page_is_pfmemalloc(page)) 1150 xdp_buff_set_frag_pfmemalloc(xdp); 1151 1152 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1153 --shinfo->nr_frags; 1154 cons_rx_buf->page = page; 1155 1156 /* Update prod since possibly some pages have been 1157 * allocated already. 1158 */ 1159 rxr->rx_agg_prod = prod; 1160 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1161 return 0; 1162 } 1163 1164 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1165 bp->rx_dir); 1166 1167 total_frag_len += frag_len; 1168 prod = NEXT_RX_AGG(prod); 1169 } 1170 rxr->rx_agg_prod = prod; 1171 return total_frag_len; 1172 } 1173 1174 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, 1175 struct bnxt_cp_ring_info *cpr, 1176 struct sk_buff *skb, u16 idx, 1177 u32 agg_bufs, bool tpa) 1178 { 1179 struct skb_shared_info *shinfo = skb_shinfo(skb); 1180 u32 total_frag_len = 0; 1181 1182 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, 1183 agg_bufs, tpa, NULL); 1184 if (!total_frag_len) { 1185 skb_mark_for_recycle(skb); 1186 dev_kfree_skb(skb); 1187 return NULL; 1188 } 1189 1190 skb->data_len += total_frag_len; 1191 skb->len += total_frag_len; 1192 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; 1193 return skb; 1194 } 1195 1196 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, 1197 struct bnxt_cp_ring_info *cpr, 1198 struct xdp_buff *xdp, u16 idx, 1199 u32 agg_bufs, bool tpa) 1200 { 1201 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1202 u32 total_frag_len = 0; 1203 1204 if (!xdp_buff_has_frags(xdp)) 1205 shinfo->nr_frags = 0; 1206 1207 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, 1208 idx, agg_bufs, tpa, xdp); 1209 if (total_frag_len) { 1210 xdp_buff_set_frags_flag(xdp); 1211 shinfo->nr_frags = agg_bufs; 1212 shinfo->xdp_frags_size = total_frag_len; 1213 } 1214 return total_frag_len; 1215 } 1216 1217 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1218 u8 agg_bufs, u32 *raw_cons) 1219 { 1220 u16 last; 1221 struct rx_agg_cmp *agg; 1222 1223 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1224 last = RING_CMP(*raw_cons); 1225 agg = (struct rx_agg_cmp *) 1226 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1227 return RX_AGG_CMP_VALID(agg, *raw_cons); 1228 } 1229 1230 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1231 unsigned int len, 1232 dma_addr_t mapping) 1233 { 1234 struct bnxt *bp = bnapi->bp; 1235 struct pci_dev *pdev = bp->pdev; 1236 struct sk_buff *skb; 1237 1238 skb = napi_alloc_skb(&bnapi->napi, len); 1239 if (!skb) 1240 return NULL; 1241 1242 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1243 bp->rx_dir); 1244 1245 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1246 len + NET_IP_ALIGN); 1247 1248 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1249 bp->rx_dir); 1250 1251 skb_put(skb, len); 1252 return skb; 1253 } 1254 1255 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1256 u32 *raw_cons, void *cmp) 1257 { 1258 struct rx_cmp *rxcmp = cmp; 1259 u32 tmp_raw_cons = *raw_cons; 1260 u8 cmp_type, agg_bufs = 0; 1261 1262 cmp_type = RX_CMP_TYPE(rxcmp); 1263 1264 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1265 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1266 RX_CMP_AGG_BUFS) >> 1267 RX_CMP_AGG_BUFS_SHIFT; 1268 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1269 struct rx_tpa_end_cmp *tpa_end = cmp; 1270 1271 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1272 return 0; 1273 1274 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1275 } 1276 1277 if (agg_bufs) { 1278 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1279 return -EBUSY; 1280 } 1281 *raw_cons = tmp_raw_cons; 1282 return 0; 1283 } 1284 1285 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1286 { 1287 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1288 u16 idx = agg_id & MAX_TPA_P5_MASK; 1289 1290 if (test_bit(idx, map->agg_idx_bmap)) 1291 idx = find_first_zero_bit(map->agg_idx_bmap, 1292 BNXT_AGG_IDX_BMAP_SIZE); 1293 __set_bit(idx, map->agg_idx_bmap); 1294 map->agg_id_tbl[agg_id] = idx; 1295 return idx; 1296 } 1297 1298 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1299 { 1300 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1301 1302 __clear_bit(idx, map->agg_idx_bmap); 1303 } 1304 1305 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1306 { 1307 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1308 1309 return map->agg_id_tbl[agg_id]; 1310 } 1311 1312 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1313 struct rx_tpa_start_cmp *tpa_start, 1314 struct rx_tpa_start_cmp_ext *tpa_start1) 1315 { 1316 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1317 struct bnxt_tpa_info *tpa_info; 1318 u16 cons, prod, agg_id; 1319 struct rx_bd *prod_bd; 1320 dma_addr_t mapping; 1321 1322 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1323 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1324 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1325 } else { 1326 agg_id = TPA_START_AGG_ID(tpa_start); 1327 } 1328 cons = tpa_start->rx_tpa_start_cmp_opaque; 1329 prod = rxr->rx_prod; 1330 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1331 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1332 tpa_info = &rxr->rx_tpa[agg_id]; 1333 1334 if (unlikely(cons != rxr->rx_next_cons || 1335 TPA_START_ERROR(tpa_start))) { 1336 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1337 cons, rxr->rx_next_cons, 1338 TPA_START_ERROR_CODE(tpa_start1)); 1339 bnxt_sched_reset_rxr(bp, rxr); 1340 return; 1341 } 1342 /* Store cfa_code in tpa_info to use in tpa_end 1343 * completion processing. 1344 */ 1345 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1346 prod_rx_buf->data = tpa_info->data; 1347 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1348 1349 mapping = tpa_info->mapping; 1350 prod_rx_buf->mapping = mapping; 1351 1352 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1353 1354 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1355 1356 tpa_info->data = cons_rx_buf->data; 1357 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1358 cons_rx_buf->data = NULL; 1359 tpa_info->mapping = cons_rx_buf->mapping; 1360 1361 tpa_info->len = 1362 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1363 RX_TPA_START_CMP_LEN_SHIFT; 1364 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1365 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1366 1367 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1368 tpa_info->gso_type = SKB_GSO_TCPV4; 1369 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1370 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1371 tpa_info->gso_type = SKB_GSO_TCPV6; 1372 tpa_info->rss_hash = 1373 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1374 } else { 1375 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1376 tpa_info->gso_type = 0; 1377 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1378 } 1379 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1380 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1381 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1382 tpa_info->agg_count = 0; 1383 1384 rxr->rx_prod = NEXT_RX(prod); 1385 cons = RING_RX(bp, NEXT_RX(cons)); 1386 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1387 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1388 1389 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1390 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1391 cons_rx_buf->data = NULL; 1392 } 1393 1394 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1395 { 1396 if (agg_bufs) 1397 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1398 } 1399 1400 #ifdef CONFIG_INET 1401 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1402 { 1403 struct udphdr *uh = NULL; 1404 1405 if (ip_proto == htons(ETH_P_IP)) { 1406 struct iphdr *iph = (struct iphdr *)skb->data; 1407 1408 if (iph->protocol == IPPROTO_UDP) 1409 uh = (struct udphdr *)(iph + 1); 1410 } else { 1411 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1412 1413 if (iph->nexthdr == IPPROTO_UDP) 1414 uh = (struct udphdr *)(iph + 1); 1415 } 1416 if (uh) { 1417 if (uh->check) 1418 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1419 else 1420 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1421 } 1422 } 1423 #endif 1424 1425 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1426 int payload_off, int tcp_ts, 1427 struct sk_buff *skb) 1428 { 1429 #ifdef CONFIG_INET 1430 struct tcphdr *th; 1431 int len, nw_off; 1432 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1433 u32 hdr_info = tpa_info->hdr_info; 1434 bool loopback = false; 1435 1436 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1437 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1438 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1439 1440 /* If the packet is an internal loopback packet, the offsets will 1441 * have an extra 4 bytes. 1442 */ 1443 if (inner_mac_off == 4) { 1444 loopback = true; 1445 } else if (inner_mac_off > 4) { 1446 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1447 ETH_HLEN - 2)); 1448 1449 /* We only support inner iPv4/ipv6. If we don't see the 1450 * correct protocol ID, it must be a loopback packet where 1451 * the offsets are off by 4. 1452 */ 1453 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1454 loopback = true; 1455 } 1456 if (loopback) { 1457 /* internal loopback packet, subtract all offsets by 4 */ 1458 inner_ip_off -= 4; 1459 inner_mac_off -= 4; 1460 outer_ip_off -= 4; 1461 } 1462 1463 nw_off = inner_ip_off - ETH_HLEN; 1464 skb_set_network_header(skb, nw_off); 1465 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1466 struct ipv6hdr *iph = ipv6_hdr(skb); 1467 1468 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1469 len = skb->len - skb_transport_offset(skb); 1470 th = tcp_hdr(skb); 1471 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1472 } else { 1473 struct iphdr *iph = ip_hdr(skb); 1474 1475 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1476 len = skb->len - skb_transport_offset(skb); 1477 th = tcp_hdr(skb); 1478 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1479 } 1480 1481 if (inner_mac_off) { /* tunnel */ 1482 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1483 ETH_HLEN - 2)); 1484 1485 bnxt_gro_tunnel(skb, proto); 1486 } 1487 #endif 1488 return skb; 1489 } 1490 1491 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1492 int payload_off, int tcp_ts, 1493 struct sk_buff *skb) 1494 { 1495 #ifdef CONFIG_INET 1496 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1497 u32 hdr_info = tpa_info->hdr_info; 1498 int iphdr_len, nw_off; 1499 1500 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1501 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1502 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1503 1504 nw_off = inner_ip_off - ETH_HLEN; 1505 skb_set_network_header(skb, nw_off); 1506 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1507 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1508 skb_set_transport_header(skb, nw_off + iphdr_len); 1509 1510 if (inner_mac_off) { /* tunnel */ 1511 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1512 ETH_HLEN - 2)); 1513 1514 bnxt_gro_tunnel(skb, proto); 1515 } 1516 #endif 1517 return skb; 1518 } 1519 1520 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1521 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1522 1523 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1524 int payload_off, int tcp_ts, 1525 struct sk_buff *skb) 1526 { 1527 #ifdef CONFIG_INET 1528 struct tcphdr *th; 1529 int len, nw_off, tcp_opt_len = 0; 1530 1531 if (tcp_ts) 1532 tcp_opt_len = 12; 1533 1534 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1535 struct iphdr *iph; 1536 1537 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1538 ETH_HLEN; 1539 skb_set_network_header(skb, nw_off); 1540 iph = ip_hdr(skb); 1541 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1542 len = skb->len - skb_transport_offset(skb); 1543 th = tcp_hdr(skb); 1544 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1545 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1546 struct ipv6hdr *iph; 1547 1548 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1549 ETH_HLEN; 1550 skb_set_network_header(skb, nw_off); 1551 iph = ipv6_hdr(skb); 1552 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1553 len = skb->len - skb_transport_offset(skb); 1554 th = tcp_hdr(skb); 1555 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1556 } else { 1557 dev_kfree_skb_any(skb); 1558 return NULL; 1559 } 1560 1561 if (nw_off) /* tunnel */ 1562 bnxt_gro_tunnel(skb, skb->protocol); 1563 #endif 1564 return skb; 1565 } 1566 1567 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1568 struct bnxt_tpa_info *tpa_info, 1569 struct rx_tpa_end_cmp *tpa_end, 1570 struct rx_tpa_end_cmp_ext *tpa_end1, 1571 struct sk_buff *skb) 1572 { 1573 #ifdef CONFIG_INET 1574 int payload_off; 1575 u16 segs; 1576 1577 segs = TPA_END_TPA_SEGS(tpa_end); 1578 if (segs == 1) 1579 return skb; 1580 1581 NAPI_GRO_CB(skb)->count = segs; 1582 skb_shinfo(skb)->gso_size = 1583 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1584 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1585 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1586 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1587 else 1588 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1589 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1590 if (likely(skb)) 1591 tcp_gro_complete(skb); 1592 #endif 1593 return skb; 1594 } 1595 1596 /* Given the cfa_code of a received packet determine which 1597 * netdev (vf-rep or PF) the packet is destined to. 1598 */ 1599 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1600 { 1601 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1602 1603 /* if vf-rep dev is NULL, the must belongs to the PF */ 1604 return dev ? dev : bp->dev; 1605 } 1606 1607 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1608 struct bnxt_cp_ring_info *cpr, 1609 u32 *raw_cons, 1610 struct rx_tpa_end_cmp *tpa_end, 1611 struct rx_tpa_end_cmp_ext *tpa_end1, 1612 u8 *event) 1613 { 1614 struct bnxt_napi *bnapi = cpr->bnapi; 1615 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1616 u8 *data_ptr, agg_bufs; 1617 unsigned int len; 1618 struct bnxt_tpa_info *tpa_info; 1619 dma_addr_t mapping; 1620 struct sk_buff *skb; 1621 u16 idx = 0, agg_id; 1622 void *data; 1623 bool gro; 1624 1625 if (unlikely(bnapi->in_reset)) { 1626 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1627 1628 if (rc < 0) 1629 return ERR_PTR(-EBUSY); 1630 return NULL; 1631 } 1632 1633 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1634 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1635 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1636 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1637 tpa_info = &rxr->rx_tpa[agg_id]; 1638 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1639 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1640 agg_bufs, tpa_info->agg_count); 1641 agg_bufs = tpa_info->agg_count; 1642 } 1643 tpa_info->agg_count = 0; 1644 *event |= BNXT_AGG_EVENT; 1645 bnxt_free_agg_idx(rxr, agg_id); 1646 idx = agg_id; 1647 gro = !!(bp->flags & BNXT_FLAG_GRO); 1648 } else { 1649 agg_id = TPA_END_AGG_ID(tpa_end); 1650 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1651 tpa_info = &rxr->rx_tpa[agg_id]; 1652 idx = RING_CMP(*raw_cons); 1653 if (agg_bufs) { 1654 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1655 return ERR_PTR(-EBUSY); 1656 1657 *event |= BNXT_AGG_EVENT; 1658 idx = NEXT_CMP(idx); 1659 } 1660 gro = !!TPA_END_GRO(tpa_end); 1661 } 1662 data = tpa_info->data; 1663 data_ptr = tpa_info->data_ptr; 1664 prefetch(data_ptr); 1665 len = tpa_info->len; 1666 mapping = tpa_info->mapping; 1667 1668 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1669 bnxt_abort_tpa(cpr, idx, agg_bufs); 1670 if (agg_bufs > MAX_SKB_FRAGS) 1671 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1672 agg_bufs, (int)MAX_SKB_FRAGS); 1673 return NULL; 1674 } 1675 1676 if (len <= bp->rx_copy_thresh) { 1677 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1678 if (!skb) { 1679 bnxt_abort_tpa(cpr, idx, agg_bufs); 1680 cpr->sw_stats.rx.rx_oom_discards += 1; 1681 return NULL; 1682 } 1683 } else { 1684 u8 *new_data; 1685 dma_addr_t new_mapping; 1686 1687 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); 1688 if (!new_data) { 1689 bnxt_abort_tpa(cpr, idx, agg_bufs); 1690 cpr->sw_stats.rx.rx_oom_discards += 1; 1691 return NULL; 1692 } 1693 1694 tpa_info->data = new_data; 1695 tpa_info->data_ptr = new_data + bp->rx_offset; 1696 tpa_info->mapping = new_mapping; 1697 1698 skb = napi_build_skb(data, bp->rx_buf_size); 1699 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1700 bp->rx_buf_use_size, bp->rx_dir, 1701 DMA_ATTR_WEAK_ORDERING); 1702 1703 if (!skb) { 1704 skb_free_frag(data); 1705 bnxt_abort_tpa(cpr, idx, agg_bufs); 1706 cpr->sw_stats.rx.rx_oom_discards += 1; 1707 return NULL; 1708 } 1709 skb_reserve(skb, bp->rx_offset); 1710 skb_put(skb, len); 1711 } 1712 1713 if (agg_bufs) { 1714 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); 1715 if (!skb) { 1716 /* Page reuse already handled by bnxt_rx_pages(). */ 1717 cpr->sw_stats.rx.rx_oom_discards += 1; 1718 return NULL; 1719 } 1720 } 1721 1722 skb->protocol = 1723 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1724 1725 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1726 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1727 1728 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1729 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1730 __be16 vlan_proto = htons(tpa_info->metadata >> 1731 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1732 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1733 1734 if (eth_type_vlan(vlan_proto)) { 1735 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1736 } else { 1737 dev_kfree_skb(skb); 1738 return NULL; 1739 } 1740 } 1741 1742 skb_checksum_none_assert(skb); 1743 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1744 skb->ip_summed = CHECKSUM_UNNECESSARY; 1745 skb->csum_level = 1746 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1747 } 1748 1749 if (gro) 1750 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1751 1752 return skb; 1753 } 1754 1755 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1756 struct rx_agg_cmp *rx_agg) 1757 { 1758 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1759 struct bnxt_tpa_info *tpa_info; 1760 1761 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1762 tpa_info = &rxr->rx_tpa[agg_id]; 1763 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1764 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1765 } 1766 1767 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1768 struct sk_buff *skb) 1769 { 1770 if (skb->dev != bp->dev) { 1771 /* this packet belongs to a vf-rep */ 1772 bnxt_vf_rep_rx(bp, skb); 1773 return; 1774 } 1775 skb_record_rx_queue(skb, bnapi->index); 1776 skb_mark_for_recycle(skb); 1777 napi_gro_receive(&bnapi->napi, skb); 1778 } 1779 1780 /* returns the following: 1781 * 1 - 1 packet successfully received 1782 * 0 - successful TPA_START, packet not completed yet 1783 * -EBUSY - completion ring does not have all the agg buffers yet 1784 * -ENOMEM - packet aborted due to out of memory 1785 * -EIO - packet aborted due to hw error indicated in BD 1786 */ 1787 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1788 u32 *raw_cons, u8 *event) 1789 { 1790 struct bnxt_napi *bnapi = cpr->bnapi; 1791 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1792 struct net_device *dev = bp->dev; 1793 struct rx_cmp *rxcmp; 1794 struct rx_cmp_ext *rxcmp1; 1795 u32 tmp_raw_cons = *raw_cons; 1796 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1797 struct bnxt_sw_rx_bd *rx_buf; 1798 unsigned int len; 1799 u8 *data_ptr, agg_bufs, cmp_type; 1800 bool xdp_active = false; 1801 dma_addr_t dma_addr; 1802 struct sk_buff *skb; 1803 struct xdp_buff xdp; 1804 u32 flags, misc; 1805 void *data; 1806 int rc = 0; 1807 1808 rxcmp = (struct rx_cmp *) 1809 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1810 1811 cmp_type = RX_CMP_TYPE(rxcmp); 1812 1813 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1814 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1815 goto next_rx_no_prod_no_len; 1816 } 1817 1818 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1819 cp_cons = RING_CMP(tmp_raw_cons); 1820 rxcmp1 = (struct rx_cmp_ext *) 1821 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1822 1823 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1824 return -EBUSY; 1825 1826 /* The valid test of the entry must be done first before 1827 * reading any further. 1828 */ 1829 dma_rmb(); 1830 prod = rxr->rx_prod; 1831 1832 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1833 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1834 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1835 1836 *event |= BNXT_RX_EVENT; 1837 goto next_rx_no_prod_no_len; 1838 1839 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1840 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1841 (struct rx_tpa_end_cmp *)rxcmp, 1842 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1843 1844 if (IS_ERR(skb)) 1845 return -EBUSY; 1846 1847 rc = -ENOMEM; 1848 if (likely(skb)) { 1849 bnxt_deliver_skb(bp, bnapi, skb); 1850 rc = 1; 1851 } 1852 *event |= BNXT_RX_EVENT; 1853 goto next_rx_no_prod_no_len; 1854 } 1855 1856 cons = rxcmp->rx_cmp_opaque; 1857 if (unlikely(cons != rxr->rx_next_cons)) { 1858 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 1859 1860 /* 0xffff is forced error, don't print it */ 1861 if (rxr->rx_next_cons != 0xffff) 1862 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1863 cons, rxr->rx_next_cons); 1864 bnxt_sched_reset_rxr(bp, rxr); 1865 if (rc1) 1866 return rc1; 1867 goto next_rx_no_prod_no_len; 1868 } 1869 rx_buf = &rxr->rx_buf_ring[cons]; 1870 data = rx_buf->data; 1871 data_ptr = rx_buf->data_ptr; 1872 prefetch(data_ptr); 1873 1874 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1875 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1876 1877 if (agg_bufs) { 1878 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1879 return -EBUSY; 1880 1881 cp_cons = NEXT_CMP(cp_cons); 1882 *event |= BNXT_AGG_EVENT; 1883 } 1884 *event |= BNXT_RX_EVENT; 1885 1886 rx_buf->data = NULL; 1887 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1888 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1889 1890 bnxt_reuse_rx_data(rxr, cons, data); 1891 if (agg_bufs) 1892 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 1893 false); 1894 1895 rc = -EIO; 1896 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 1897 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; 1898 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 1899 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 1900 netdev_warn_once(bp->dev, "RX buffer error %x\n", 1901 rx_err); 1902 bnxt_sched_reset_rxr(bp, rxr); 1903 } 1904 } 1905 goto next_rx_no_len; 1906 } 1907 1908 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 1909 len = flags >> RX_CMP_LEN_SHIFT; 1910 dma_addr = rx_buf->mapping; 1911 1912 if (bnxt_xdp_attached(bp, rxr)) { 1913 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 1914 if (agg_bufs) { 1915 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, 1916 cp_cons, agg_bufs, 1917 false); 1918 if (!frag_len) { 1919 cpr->sw_stats.rx.rx_oom_discards += 1; 1920 rc = -ENOMEM; 1921 goto next_rx; 1922 } 1923 } 1924 xdp_active = true; 1925 } 1926 1927 if (xdp_active) { 1928 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { 1929 rc = 1; 1930 goto next_rx; 1931 } 1932 } 1933 1934 if (len <= bp->rx_copy_thresh) { 1935 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1936 bnxt_reuse_rx_data(rxr, cons, data); 1937 if (!skb) { 1938 if (agg_bufs) { 1939 if (!xdp_active) 1940 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 1941 agg_bufs, false); 1942 else 1943 bnxt_xdp_buff_frags_free(rxr, &xdp); 1944 } 1945 cpr->sw_stats.rx.rx_oom_discards += 1; 1946 rc = -ENOMEM; 1947 goto next_rx; 1948 } 1949 } else { 1950 u32 payload; 1951 1952 if (rx_buf->data_ptr == data_ptr) 1953 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1954 else 1955 payload = 0; 1956 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1957 payload | len); 1958 if (!skb) { 1959 cpr->sw_stats.rx.rx_oom_discards += 1; 1960 rc = -ENOMEM; 1961 goto next_rx; 1962 } 1963 } 1964 1965 if (agg_bufs) { 1966 if (!xdp_active) { 1967 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); 1968 if (!skb) { 1969 cpr->sw_stats.rx.rx_oom_discards += 1; 1970 rc = -ENOMEM; 1971 goto next_rx; 1972 } 1973 } else { 1974 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); 1975 if (!skb) { 1976 /* we should be able to free the old skb here */ 1977 bnxt_xdp_buff_frags_free(rxr, &xdp); 1978 cpr->sw_stats.rx.rx_oom_discards += 1; 1979 rc = -ENOMEM; 1980 goto next_rx; 1981 } 1982 } 1983 } 1984 1985 if (RX_CMP_HASH_VALID(rxcmp)) { 1986 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1987 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1988 1989 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1990 if (hash_type != 1 && hash_type != 3) 1991 type = PKT_HASH_TYPE_L3; 1992 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1993 } 1994 1995 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1996 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1997 1998 if ((rxcmp1->rx_cmp_flags2 & 1999 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 2000 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 2001 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 2002 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 2003 __be16 vlan_proto = htons(meta_data >> 2004 RX_CMP_FLAGS2_METADATA_TPID_SFT); 2005 2006 if (eth_type_vlan(vlan_proto)) { 2007 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 2008 } else { 2009 dev_kfree_skb(skb); 2010 goto next_rx; 2011 } 2012 } 2013 2014 skb_checksum_none_assert(skb); 2015 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2016 if (dev->features & NETIF_F_RXCSUM) { 2017 skb->ip_summed = CHECKSUM_UNNECESSARY; 2018 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2019 } 2020 } else { 2021 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2022 if (dev->features & NETIF_F_RXCSUM) 2023 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; 2024 } 2025 } 2026 2027 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == 2028 RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { 2029 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2030 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 2031 u64 ns, ts; 2032 2033 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2034 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2035 2036 spin_lock_bh(&ptp->ptp_lock); 2037 ns = timecounter_cyc2time(&ptp->tc, ts); 2038 spin_unlock_bh(&ptp->ptp_lock); 2039 memset(skb_hwtstamps(skb), 0, 2040 sizeof(*skb_hwtstamps(skb))); 2041 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2042 } 2043 } 2044 } 2045 bnxt_deliver_skb(bp, bnapi, skb); 2046 rc = 1; 2047 2048 next_rx: 2049 cpr->rx_packets += 1; 2050 cpr->rx_bytes += len; 2051 2052 next_rx_no_len: 2053 rxr->rx_prod = NEXT_RX(prod); 2054 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2055 2056 next_rx_no_prod_no_len: 2057 *raw_cons = tmp_raw_cons; 2058 2059 return rc; 2060 } 2061 2062 /* In netpoll mode, if we are using a combined completion ring, we need to 2063 * discard the rx packets and recycle the buffers. 2064 */ 2065 static int bnxt_force_rx_discard(struct bnxt *bp, 2066 struct bnxt_cp_ring_info *cpr, 2067 u32 *raw_cons, u8 *event) 2068 { 2069 u32 tmp_raw_cons = *raw_cons; 2070 struct rx_cmp_ext *rxcmp1; 2071 struct rx_cmp *rxcmp; 2072 u16 cp_cons; 2073 u8 cmp_type; 2074 int rc; 2075 2076 cp_cons = RING_CMP(tmp_raw_cons); 2077 rxcmp = (struct rx_cmp *) 2078 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2079 2080 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2081 cp_cons = RING_CMP(tmp_raw_cons); 2082 rxcmp1 = (struct rx_cmp_ext *) 2083 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2084 2085 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2086 return -EBUSY; 2087 2088 /* The valid test of the entry must be done first before 2089 * reading any further. 2090 */ 2091 dma_rmb(); 2092 cmp_type = RX_CMP_TYPE(rxcmp); 2093 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 2094 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2095 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2096 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2097 struct rx_tpa_end_cmp_ext *tpa_end1; 2098 2099 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2100 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2101 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2102 } 2103 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2104 if (rc && rc != -EBUSY) 2105 cpr->sw_stats.rx.rx_netpoll_discards += 1; 2106 return rc; 2107 } 2108 2109 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2110 { 2111 struct bnxt_fw_health *fw_health = bp->fw_health; 2112 u32 reg = fw_health->regs[reg_idx]; 2113 u32 reg_type, reg_off, val = 0; 2114 2115 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2116 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2117 switch (reg_type) { 2118 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2119 pci_read_config_dword(bp->pdev, reg_off, &val); 2120 break; 2121 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2122 reg_off = fw_health->mapped_regs[reg_idx]; 2123 fallthrough; 2124 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2125 val = readl(bp->bar0 + reg_off); 2126 break; 2127 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2128 val = readl(bp->bar1 + reg_off); 2129 break; 2130 } 2131 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2132 val &= fw_health->fw_reset_inprog_reg_mask; 2133 return val; 2134 } 2135 2136 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2137 { 2138 int i; 2139 2140 for (i = 0; i < bp->rx_nr_rings; i++) { 2141 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2142 struct bnxt_ring_grp_info *grp_info; 2143 2144 grp_info = &bp->grp_info[grp_idx]; 2145 if (grp_info->agg_fw_ring_id == ring_id) 2146 return grp_idx; 2147 } 2148 return INVALID_HW_RING_ID; 2149 } 2150 2151 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2152 { 2153 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2154 return link_info->force_pam4_link_speed; 2155 return link_info->force_link_speed; 2156 } 2157 2158 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2159 { 2160 link_info->req_link_speed = link_info->force_link_speed; 2161 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2162 if (link_info->force_pam4_link_speed) { 2163 link_info->req_link_speed = link_info->force_pam4_link_speed; 2164 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2165 } 2166 } 2167 2168 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2169 { 2170 link_info->advertising = link_info->auto_link_speeds; 2171 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2172 } 2173 2174 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2175 { 2176 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2177 link_info->req_link_speed != link_info->force_link_speed) 2178 return true; 2179 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2180 link_info->req_link_speed != link_info->force_pam4_link_speed) 2181 return true; 2182 return false; 2183 } 2184 2185 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2186 { 2187 if (link_info->advertising != link_info->auto_link_speeds || 2188 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2189 return true; 2190 return false; 2191 } 2192 2193 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2194 ((data2) & \ 2195 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2196 2197 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2198 (((data2) & \ 2199 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2200 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2201 2202 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2203 ((data1) & \ 2204 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2205 2206 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2207 (((data1) & \ 2208 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2209 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2210 2211 /* Return true if the workqueue has to be scheduled */ 2212 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2213 { 2214 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2215 2216 switch (err_type) { 2217 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2218 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2219 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2220 break; 2221 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2222 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2223 break; 2224 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2225 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2226 break; 2227 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2228 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2229 char *threshold_type; 2230 bool notify = false; 2231 char *dir_str; 2232 2233 switch (type) { 2234 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2235 threshold_type = "warning"; 2236 break; 2237 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2238 threshold_type = "critical"; 2239 break; 2240 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2241 threshold_type = "fatal"; 2242 break; 2243 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2244 threshold_type = "shutdown"; 2245 break; 2246 default: 2247 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2248 return false; 2249 } 2250 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2251 dir_str = "above"; 2252 notify = true; 2253 } else { 2254 dir_str = "below"; 2255 } 2256 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2257 dir_str, threshold_type); 2258 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2259 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2260 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2261 if (notify) { 2262 bp->thermal_threshold_type = type; 2263 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2264 return true; 2265 } 2266 return false; 2267 } 2268 default: 2269 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2270 err_type); 2271 break; 2272 } 2273 return false; 2274 } 2275 2276 #define BNXT_GET_EVENT_PORT(data) \ 2277 ((data) & \ 2278 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2279 2280 #define BNXT_EVENT_RING_TYPE(data2) \ 2281 ((data2) & \ 2282 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2283 2284 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2285 (BNXT_EVENT_RING_TYPE(data2) == \ 2286 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2287 2288 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2289 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2290 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2291 2292 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2293 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2294 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2295 2296 #define BNXT_PHC_BITS 48 2297 2298 static int bnxt_async_event_process(struct bnxt *bp, 2299 struct hwrm_async_event_cmpl *cmpl) 2300 { 2301 u16 event_id = le16_to_cpu(cmpl->event_id); 2302 u32 data1 = le32_to_cpu(cmpl->event_data1); 2303 u32 data2 = le32_to_cpu(cmpl->event_data2); 2304 2305 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2306 event_id, data1, data2); 2307 2308 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2309 switch (event_id) { 2310 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2311 struct bnxt_link_info *link_info = &bp->link_info; 2312 2313 if (BNXT_VF(bp)) 2314 goto async_event_process_exit; 2315 2316 /* print unsupported speed warning in forced speed mode only */ 2317 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2318 (data1 & 0x20000)) { 2319 u16 fw_speed = bnxt_get_force_speed(link_info); 2320 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2321 2322 if (speed != SPEED_UNKNOWN) 2323 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2324 speed); 2325 } 2326 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2327 } 2328 fallthrough; 2329 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2330 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2331 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2332 fallthrough; 2333 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2334 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2335 break; 2336 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2337 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2338 break; 2339 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2340 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2341 2342 if (BNXT_VF(bp)) 2343 break; 2344 2345 if (bp->pf.port_id != port_id) 2346 break; 2347 2348 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2349 break; 2350 } 2351 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2352 if (BNXT_PF(bp)) 2353 goto async_event_process_exit; 2354 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2355 break; 2356 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2357 char *type_str = "Solicited"; 2358 2359 if (!bp->fw_health) 2360 goto async_event_process_exit; 2361 2362 bp->fw_reset_timestamp = jiffies; 2363 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2364 if (!bp->fw_reset_min_dsecs) 2365 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2366 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2367 if (!bp->fw_reset_max_dsecs) 2368 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2369 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2370 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2371 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2372 type_str = "Fatal"; 2373 bp->fw_health->fatalities++; 2374 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2375 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2376 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2377 type_str = "Non-fatal"; 2378 bp->fw_health->survivals++; 2379 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2380 } 2381 netif_warn(bp, hw, bp->dev, 2382 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2383 type_str, data1, data2, 2384 bp->fw_reset_min_dsecs * 100, 2385 bp->fw_reset_max_dsecs * 100); 2386 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2387 break; 2388 } 2389 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2390 struct bnxt_fw_health *fw_health = bp->fw_health; 2391 char *status_desc = "healthy"; 2392 u32 status; 2393 2394 if (!fw_health) 2395 goto async_event_process_exit; 2396 2397 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2398 fw_health->enabled = false; 2399 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2400 break; 2401 } 2402 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2403 fw_health->tmr_multiplier = 2404 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2405 bp->current_interval * 10); 2406 fw_health->tmr_counter = fw_health->tmr_multiplier; 2407 if (!fw_health->enabled) 2408 fw_health->last_fw_heartbeat = 2409 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2410 fw_health->last_fw_reset_cnt = 2411 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2412 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2413 if (status != BNXT_FW_STATUS_HEALTHY) 2414 status_desc = "unhealthy"; 2415 netif_info(bp, drv, bp->dev, 2416 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2417 fw_health->primary ? "primary" : "backup", status, 2418 status_desc, fw_health->last_fw_reset_cnt); 2419 if (!fw_health->enabled) { 2420 /* Make sure tmr_counter is set and visible to 2421 * bnxt_health_check() before setting enabled to true. 2422 */ 2423 smp_wmb(); 2424 fw_health->enabled = true; 2425 } 2426 goto async_event_process_exit; 2427 } 2428 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2429 netif_notice(bp, hw, bp->dev, 2430 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2431 data1, data2); 2432 goto async_event_process_exit; 2433 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2434 struct bnxt_rx_ring_info *rxr; 2435 u16 grp_idx; 2436 2437 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2438 goto async_event_process_exit; 2439 2440 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2441 BNXT_EVENT_RING_TYPE(data2), data1); 2442 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2443 goto async_event_process_exit; 2444 2445 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2446 if (grp_idx == INVALID_HW_RING_ID) { 2447 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2448 data1); 2449 goto async_event_process_exit; 2450 } 2451 rxr = bp->bnapi[grp_idx]->rx_ring; 2452 bnxt_sched_reset_rxr(bp, rxr); 2453 goto async_event_process_exit; 2454 } 2455 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2456 struct bnxt_fw_health *fw_health = bp->fw_health; 2457 2458 netif_notice(bp, hw, bp->dev, 2459 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2460 data1, data2); 2461 if (fw_health) { 2462 fw_health->echo_req_data1 = data1; 2463 fw_health->echo_req_data2 = data2; 2464 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2465 break; 2466 } 2467 goto async_event_process_exit; 2468 } 2469 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2470 bnxt_ptp_pps_event(bp, data1, data2); 2471 goto async_event_process_exit; 2472 } 2473 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2474 if (bnxt_event_error_report(bp, data1, data2)) 2475 break; 2476 goto async_event_process_exit; 2477 } 2478 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2479 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2480 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2481 if (BNXT_PTP_USE_RTC(bp)) { 2482 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2483 u64 ns; 2484 2485 if (!ptp) 2486 goto async_event_process_exit; 2487 2488 spin_lock_bh(&ptp->ptp_lock); 2489 bnxt_ptp_update_current_time(bp); 2490 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2491 BNXT_PHC_BITS) | ptp->current_time); 2492 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2493 spin_unlock_bh(&ptp->ptp_lock); 2494 } 2495 break; 2496 } 2497 goto async_event_process_exit; 2498 } 2499 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2500 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2501 2502 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2503 goto async_event_process_exit; 2504 } 2505 default: 2506 goto async_event_process_exit; 2507 } 2508 __bnxt_queue_sp_work(bp); 2509 async_event_process_exit: 2510 return 0; 2511 } 2512 2513 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2514 { 2515 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2516 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2517 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2518 (struct hwrm_fwd_req_cmpl *)txcmp; 2519 2520 switch (cmpl_type) { 2521 case CMPL_BASE_TYPE_HWRM_DONE: 2522 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2523 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2524 break; 2525 2526 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2527 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2528 2529 if ((vf_id < bp->pf.first_vf_id) || 2530 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2531 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2532 vf_id); 2533 return -EINVAL; 2534 } 2535 2536 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2537 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2538 break; 2539 2540 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2541 bnxt_async_event_process(bp, 2542 (struct hwrm_async_event_cmpl *)txcmp); 2543 break; 2544 2545 default: 2546 break; 2547 } 2548 2549 return 0; 2550 } 2551 2552 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2553 { 2554 struct bnxt_napi *bnapi = dev_instance; 2555 struct bnxt *bp = bnapi->bp; 2556 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2557 u32 cons = RING_CMP(cpr->cp_raw_cons); 2558 2559 cpr->event_ctr++; 2560 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2561 napi_schedule(&bnapi->napi); 2562 return IRQ_HANDLED; 2563 } 2564 2565 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2566 { 2567 u32 raw_cons = cpr->cp_raw_cons; 2568 u16 cons = RING_CMP(raw_cons); 2569 struct tx_cmp *txcmp; 2570 2571 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2572 2573 return TX_CMP_VALID(txcmp, raw_cons); 2574 } 2575 2576 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2577 { 2578 struct bnxt_napi *bnapi = dev_instance; 2579 struct bnxt *bp = bnapi->bp; 2580 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2581 u32 cons = RING_CMP(cpr->cp_raw_cons); 2582 u32 int_status; 2583 2584 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2585 2586 if (!bnxt_has_work(bp, cpr)) { 2587 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2588 /* return if erroneous interrupt */ 2589 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2590 return IRQ_NONE; 2591 } 2592 2593 /* disable ring IRQ */ 2594 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2595 2596 /* Return here if interrupt is shared and is disabled. */ 2597 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2598 return IRQ_HANDLED; 2599 2600 napi_schedule(&bnapi->napi); 2601 return IRQ_HANDLED; 2602 } 2603 2604 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2605 int budget) 2606 { 2607 struct bnxt_napi *bnapi = cpr->bnapi; 2608 u32 raw_cons = cpr->cp_raw_cons; 2609 u32 cons; 2610 int rx_pkts = 0; 2611 u8 event = 0; 2612 struct tx_cmp *txcmp; 2613 2614 cpr->has_more_work = 0; 2615 cpr->had_work_done = 1; 2616 while (1) { 2617 int rc; 2618 2619 cons = RING_CMP(raw_cons); 2620 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2621 2622 if (!TX_CMP_VALID(txcmp, raw_cons)) 2623 break; 2624 2625 /* The valid test of the entry must be done first before 2626 * reading any further. 2627 */ 2628 dma_rmb(); 2629 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2630 u32 opaque = txcmp->tx_cmp_opaque; 2631 struct bnxt_tx_ring_info *txr; 2632 u16 tx_freed; 2633 2634 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 2635 event |= BNXT_TX_CMP_EVENT; 2636 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 2637 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 2638 bp->tx_ring_mask; 2639 /* return full budget so NAPI will complete. */ 2640 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 2641 rx_pkts = budget; 2642 raw_cons = NEXT_RAW_CMP(raw_cons); 2643 if (budget) 2644 cpr->has_more_work = 1; 2645 break; 2646 } 2647 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2648 if (likely(budget)) 2649 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2650 else 2651 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2652 &event); 2653 if (likely(rc >= 0)) 2654 rx_pkts += rc; 2655 /* Increment rx_pkts when rc is -ENOMEM to count towards 2656 * the NAPI budget. Otherwise, we may potentially loop 2657 * here forever if we consistently cannot allocate 2658 * buffers. 2659 */ 2660 else if (rc == -ENOMEM && budget) 2661 rx_pkts++; 2662 else if (rc == -EBUSY) /* partial completion */ 2663 break; 2664 } else if (unlikely((TX_CMP_TYPE(txcmp) == 2665 CMPL_BASE_TYPE_HWRM_DONE) || 2666 (TX_CMP_TYPE(txcmp) == 2667 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 2668 (TX_CMP_TYPE(txcmp) == 2669 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 2670 bnxt_hwrm_handler(bp, txcmp); 2671 } 2672 raw_cons = NEXT_RAW_CMP(raw_cons); 2673 2674 if (rx_pkts && rx_pkts == budget) { 2675 cpr->has_more_work = 1; 2676 break; 2677 } 2678 } 2679 2680 if (event & BNXT_REDIRECT_EVENT) 2681 xdp_do_flush(); 2682 2683 if (event & BNXT_TX_EVENT) { 2684 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 2685 u16 prod = txr->tx_prod; 2686 2687 /* Sync BD data before updating doorbell */ 2688 wmb(); 2689 2690 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2691 } 2692 2693 cpr->cp_raw_cons = raw_cons; 2694 bnapi->events |= event; 2695 return rx_pkts; 2696 } 2697 2698 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2699 int budget) 2700 { 2701 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 2702 bnapi->tx_int(bp, bnapi, budget); 2703 2704 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 2705 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2706 2707 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2708 } 2709 if (bnapi->events & BNXT_AGG_EVENT) { 2710 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2711 2712 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2713 } 2714 bnapi->events &= BNXT_TX_CMP_EVENT; 2715 } 2716 2717 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2718 int budget) 2719 { 2720 struct bnxt_napi *bnapi = cpr->bnapi; 2721 int rx_pkts; 2722 2723 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2724 2725 /* ACK completion ring before freeing tx ring and producing new 2726 * buffers in rx/agg rings to prevent overflowing the completion 2727 * ring. 2728 */ 2729 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2730 2731 __bnxt_poll_work_done(bp, bnapi, budget); 2732 return rx_pkts; 2733 } 2734 2735 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2736 { 2737 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2738 struct bnxt *bp = bnapi->bp; 2739 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2740 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2741 struct tx_cmp *txcmp; 2742 struct rx_cmp_ext *rxcmp1; 2743 u32 cp_cons, tmp_raw_cons; 2744 u32 raw_cons = cpr->cp_raw_cons; 2745 bool flush_xdp = false; 2746 u32 rx_pkts = 0; 2747 u8 event = 0; 2748 2749 while (1) { 2750 int rc; 2751 2752 cp_cons = RING_CMP(raw_cons); 2753 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2754 2755 if (!TX_CMP_VALID(txcmp, raw_cons)) 2756 break; 2757 2758 /* The valid test of the entry must be done first before 2759 * reading any further. 2760 */ 2761 dma_rmb(); 2762 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2763 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2764 cp_cons = RING_CMP(tmp_raw_cons); 2765 rxcmp1 = (struct rx_cmp_ext *) 2766 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2767 2768 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2769 break; 2770 2771 /* force an error to recycle the buffer */ 2772 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2773 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2774 2775 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2776 if (likely(rc == -EIO) && budget) 2777 rx_pkts++; 2778 else if (rc == -EBUSY) /* partial completion */ 2779 break; 2780 if (event & BNXT_REDIRECT_EVENT) 2781 flush_xdp = true; 2782 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2783 CMPL_BASE_TYPE_HWRM_DONE)) { 2784 bnxt_hwrm_handler(bp, txcmp); 2785 } else { 2786 netdev_err(bp->dev, 2787 "Invalid completion received on special ring\n"); 2788 } 2789 raw_cons = NEXT_RAW_CMP(raw_cons); 2790 2791 if (rx_pkts == budget) 2792 break; 2793 } 2794 2795 cpr->cp_raw_cons = raw_cons; 2796 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2797 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2798 2799 if (event & BNXT_AGG_EVENT) 2800 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2801 if (flush_xdp) 2802 xdp_do_flush(); 2803 2804 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2805 napi_complete_done(napi, rx_pkts); 2806 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2807 } 2808 return rx_pkts; 2809 } 2810 2811 static int bnxt_poll(struct napi_struct *napi, int budget) 2812 { 2813 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2814 struct bnxt *bp = bnapi->bp; 2815 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2816 int work_done = 0; 2817 2818 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 2819 napi_complete(napi); 2820 return 0; 2821 } 2822 while (1) { 2823 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2824 2825 if (work_done >= budget) { 2826 if (!budget) 2827 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2828 break; 2829 } 2830 2831 if (!bnxt_has_work(bp, cpr)) { 2832 if (napi_complete_done(napi, work_done)) 2833 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2834 break; 2835 } 2836 } 2837 if (bp->flags & BNXT_FLAG_DIM) { 2838 struct dim_sample dim_sample = {}; 2839 2840 dim_update_sample(cpr->event_ctr, 2841 cpr->rx_packets, 2842 cpr->rx_bytes, 2843 &dim_sample); 2844 net_dim(&cpr->dim, dim_sample); 2845 } 2846 return work_done; 2847 } 2848 2849 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2850 { 2851 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2852 int i, work_done = 0; 2853 2854 for (i = 0; i < cpr->cp_ring_count; i++) { 2855 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 2856 2857 if (cpr2->had_nqe_notify) { 2858 work_done += __bnxt_poll_work(bp, cpr2, 2859 budget - work_done); 2860 cpr->has_more_work |= cpr2->has_more_work; 2861 } 2862 } 2863 return work_done; 2864 } 2865 2866 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2867 u64 dbr_type, int budget) 2868 { 2869 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2870 int i; 2871 2872 for (i = 0; i < cpr->cp_ring_count; i++) { 2873 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 2874 struct bnxt_db_info *db; 2875 2876 if (cpr2->had_work_done) { 2877 db = &cpr2->cp_db; 2878 bnxt_writeq(bp, db->db_key64 | dbr_type | 2879 DB_RING_IDX(db, cpr2->cp_raw_cons), 2880 db->doorbell); 2881 cpr2->had_work_done = 0; 2882 if (dbr_type == DBR_TYPE_CQ_ARMALL) 2883 cpr2->had_nqe_notify = 0; 2884 } 2885 } 2886 __bnxt_poll_work_done(bp, bnapi, budget); 2887 } 2888 2889 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2890 { 2891 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2892 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2893 struct bnxt_cp_ring_info *cpr_rx; 2894 u32 raw_cons = cpr->cp_raw_cons; 2895 struct bnxt *bp = bnapi->bp; 2896 struct nqe_cn *nqcmp; 2897 int work_done = 0; 2898 u32 cons; 2899 2900 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 2901 napi_complete(napi); 2902 return 0; 2903 } 2904 if (cpr->has_more_work) { 2905 cpr->has_more_work = 0; 2906 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2907 } 2908 while (1) { 2909 cons = RING_CMP(raw_cons); 2910 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2911 2912 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2913 if (cpr->has_more_work) 2914 break; 2915 2916 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 2917 budget); 2918 cpr->cp_raw_cons = raw_cons; 2919 if (napi_complete_done(napi, work_done)) 2920 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2921 cpr->cp_raw_cons); 2922 goto poll_done; 2923 } 2924 2925 /* The valid test of the entry must be done first before 2926 * reading any further. 2927 */ 2928 dma_rmb(); 2929 2930 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2931 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2932 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 2933 struct bnxt_cp_ring_info *cpr2; 2934 2935 /* No more budget for RX work */ 2936 if (budget && work_done >= budget && 2937 cq_type == BNXT_NQ_HDL_TYPE_RX) 2938 break; 2939 2940 idx = BNXT_NQ_HDL_IDX(idx); 2941 cpr2 = &cpr->cp_ring_arr[idx]; 2942 cpr2->had_nqe_notify = 1; 2943 work_done += __bnxt_poll_work(bp, cpr2, 2944 budget - work_done); 2945 cpr->has_more_work |= cpr2->has_more_work; 2946 } else { 2947 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2948 } 2949 raw_cons = NEXT_RAW_CMP(raw_cons); 2950 } 2951 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 2952 if (raw_cons != cpr->cp_raw_cons) { 2953 cpr->cp_raw_cons = raw_cons; 2954 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 2955 } 2956 poll_done: 2957 cpr_rx = &cpr->cp_ring_arr[0]; 2958 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 2959 (bp->flags & BNXT_FLAG_DIM)) { 2960 struct dim_sample dim_sample = {}; 2961 2962 dim_update_sample(cpr->event_ctr, 2963 cpr_rx->rx_packets, 2964 cpr_rx->rx_bytes, 2965 &dim_sample); 2966 net_dim(&cpr->dim, dim_sample); 2967 } 2968 return work_done; 2969 } 2970 2971 static void bnxt_free_tx_skbs(struct bnxt *bp) 2972 { 2973 int i, max_idx; 2974 struct pci_dev *pdev = bp->pdev; 2975 2976 if (!bp->tx_ring) 2977 return; 2978 2979 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2980 for (i = 0; i < bp->tx_nr_rings; i++) { 2981 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2982 int j; 2983 2984 if (!txr->tx_buf_ring) 2985 continue; 2986 2987 for (j = 0; j < max_idx;) { 2988 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2989 struct sk_buff *skb; 2990 int k, last; 2991 2992 if (i < bp->tx_nr_rings_xdp && 2993 tx_buf->action == XDP_REDIRECT) { 2994 dma_unmap_single(&pdev->dev, 2995 dma_unmap_addr(tx_buf, mapping), 2996 dma_unmap_len(tx_buf, len), 2997 DMA_TO_DEVICE); 2998 xdp_return_frame(tx_buf->xdpf); 2999 tx_buf->action = 0; 3000 tx_buf->xdpf = NULL; 3001 j++; 3002 continue; 3003 } 3004 3005 skb = tx_buf->skb; 3006 if (!skb) { 3007 j++; 3008 continue; 3009 } 3010 3011 tx_buf->skb = NULL; 3012 3013 if (tx_buf->is_push) { 3014 dev_kfree_skb(skb); 3015 j += 2; 3016 continue; 3017 } 3018 3019 dma_unmap_single(&pdev->dev, 3020 dma_unmap_addr(tx_buf, mapping), 3021 skb_headlen(skb), 3022 DMA_TO_DEVICE); 3023 3024 last = tx_buf->nr_frags; 3025 j += 2; 3026 for (k = 0; k < last; k++, j++) { 3027 int ring_idx = j & bp->tx_ring_mask; 3028 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 3029 3030 tx_buf = &txr->tx_buf_ring[ring_idx]; 3031 dma_unmap_page( 3032 &pdev->dev, 3033 dma_unmap_addr(tx_buf, mapping), 3034 skb_frag_size(frag), DMA_TO_DEVICE); 3035 } 3036 dev_kfree_skb(skb); 3037 } 3038 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 3039 } 3040 } 3041 3042 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) 3043 { 3044 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3045 struct pci_dev *pdev = bp->pdev; 3046 struct bnxt_tpa_idx_map *map; 3047 int i, max_idx, max_agg_idx; 3048 3049 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3050 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3051 if (!rxr->rx_tpa) 3052 goto skip_rx_tpa_free; 3053 3054 for (i = 0; i < bp->max_tpa; i++) { 3055 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3056 u8 *data = tpa_info->data; 3057 3058 if (!data) 3059 continue; 3060 3061 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, 3062 bp->rx_buf_use_size, bp->rx_dir, 3063 DMA_ATTR_WEAK_ORDERING); 3064 3065 tpa_info->data = NULL; 3066 3067 skb_free_frag(data); 3068 } 3069 3070 skip_rx_tpa_free: 3071 if (!rxr->rx_buf_ring) 3072 goto skip_rx_buf_free; 3073 3074 for (i = 0; i < max_idx; i++) { 3075 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3076 dma_addr_t mapping = rx_buf->mapping; 3077 void *data = rx_buf->data; 3078 3079 if (!data) 3080 continue; 3081 3082 rx_buf->data = NULL; 3083 if (BNXT_RX_PAGE_MODE(bp)) { 3084 page_pool_recycle_direct(rxr->page_pool, data); 3085 } else { 3086 dma_unmap_single_attrs(&pdev->dev, mapping, 3087 bp->rx_buf_use_size, bp->rx_dir, 3088 DMA_ATTR_WEAK_ORDERING); 3089 skb_free_frag(data); 3090 } 3091 } 3092 3093 skip_rx_buf_free: 3094 if (!rxr->rx_agg_ring) 3095 goto skip_rx_agg_free; 3096 3097 for (i = 0; i < max_agg_idx; i++) { 3098 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3099 struct page *page = rx_agg_buf->page; 3100 3101 if (!page) 3102 continue; 3103 3104 rx_agg_buf->page = NULL; 3105 __clear_bit(i, rxr->rx_agg_bmap); 3106 3107 page_pool_recycle_direct(rxr->page_pool, page); 3108 } 3109 3110 skip_rx_agg_free: 3111 map = rxr->rx_tpa_idx_map; 3112 if (map) 3113 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3114 } 3115 3116 static void bnxt_free_rx_skbs(struct bnxt *bp) 3117 { 3118 int i; 3119 3120 if (!bp->rx_ring) 3121 return; 3122 3123 for (i = 0; i < bp->rx_nr_rings; i++) 3124 bnxt_free_one_rx_ring_skbs(bp, i); 3125 } 3126 3127 static void bnxt_free_skbs(struct bnxt *bp) 3128 { 3129 bnxt_free_tx_skbs(bp); 3130 bnxt_free_rx_skbs(bp); 3131 } 3132 3133 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3134 { 3135 u8 init_val = ctxm->init_value; 3136 u16 offset = ctxm->init_offset; 3137 u8 *p2 = p; 3138 int i; 3139 3140 if (!init_val) 3141 return; 3142 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3143 memset(p, init_val, len); 3144 return; 3145 } 3146 for (i = 0; i < len; i += ctxm->entry_size) 3147 *(p2 + i + offset) = init_val; 3148 } 3149 3150 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3151 { 3152 struct pci_dev *pdev = bp->pdev; 3153 int i; 3154 3155 if (!rmem->pg_arr) 3156 goto skip_pages; 3157 3158 for (i = 0; i < rmem->nr_pages; i++) { 3159 if (!rmem->pg_arr[i]) 3160 continue; 3161 3162 dma_free_coherent(&pdev->dev, rmem->page_size, 3163 rmem->pg_arr[i], rmem->dma_arr[i]); 3164 3165 rmem->pg_arr[i] = NULL; 3166 } 3167 skip_pages: 3168 if (rmem->pg_tbl) { 3169 size_t pg_tbl_size = rmem->nr_pages * 8; 3170 3171 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3172 pg_tbl_size = rmem->page_size; 3173 dma_free_coherent(&pdev->dev, pg_tbl_size, 3174 rmem->pg_tbl, rmem->pg_tbl_map); 3175 rmem->pg_tbl = NULL; 3176 } 3177 if (rmem->vmem_size && *rmem->vmem) { 3178 vfree(*rmem->vmem); 3179 *rmem->vmem = NULL; 3180 } 3181 } 3182 3183 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3184 { 3185 struct pci_dev *pdev = bp->pdev; 3186 u64 valid_bit = 0; 3187 int i; 3188 3189 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3190 valid_bit = PTU_PTE_VALID; 3191 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3192 size_t pg_tbl_size = rmem->nr_pages * 8; 3193 3194 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3195 pg_tbl_size = rmem->page_size; 3196 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3197 &rmem->pg_tbl_map, 3198 GFP_KERNEL); 3199 if (!rmem->pg_tbl) 3200 return -ENOMEM; 3201 } 3202 3203 for (i = 0; i < rmem->nr_pages; i++) { 3204 u64 extra_bits = valid_bit; 3205 3206 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3207 rmem->page_size, 3208 &rmem->dma_arr[i], 3209 GFP_KERNEL); 3210 if (!rmem->pg_arr[i]) 3211 return -ENOMEM; 3212 3213 if (rmem->ctx_mem) 3214 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3215 rmem->page_size); 3216 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3217 if (i == rmem->nr_pages - 2 && 3218 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3219 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3220 else if (i == rmem->nr_pages - 1 && 3221 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3222 extra_bits |= PTU_PTE_LAST; 3223 rmem->pg_tbl[i] = 3224 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3225 } 3226 } 3227 3228 if (rmem->vmem_size) { 3229 *rmem->vmem = vzalloc(rmem->vmem_size); 3230 if (!(*rmem->vmem)) 3231 return -ENOMEM; 3232 } 3233 return 0; 3234 } 3235 3236 static void bnxt_free_tpa_info(struct bnxt *bp) 3237 { 3238 int i, j; 3239 3240 for (i = 0; i < bp->rx_nr_rings; i++) { 3241 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3242 3243 kfree(rxr->rx_tpa_idx_map); 3244 rxr->rx_tpa_idx_map = NULL; 3245 if (rxr->rx_tpa) { 3246 for (j = 0; j < bp->max_tpa; j++) { 3247 kfree(rxr->rx_tpa[j].agg_arr); 3248 rxr->rx_tpa[j].agg_arr = NULL; 3249 } 3250 } 3251 kfree(rxr->rx_tpa); 3252 rxr->rx_tpa = NULL; 3253 } 3254 } 3255 3256 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3257 { 3258 int i, j; 3259 3260 bp->max_tpa = MAX_TPA; 3261 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3262 if (!bp->max_tpa_v2) 3263 return 0; 3264 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3265 } 3266 3267 for (i = 0; i < bp->rx_nr_rings; i++) { 3268 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3269 struct rx_agg_cmp *agg; 3270 3271 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3272 GFP_KERNEL); 3273 if (!rxr->rx_tpa) 3274 return -ENOMEM; 3275 3276 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3277 continue; 3278 for (j = 0; j < bp->max_tpa; j++) { 3279 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3280 if (!agg) 3281 return -ENOMEM; 3282 rxr->rx_tpa[j].agg_arr = agg; 3283 } 3284 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3285 GFP_KERNEL); 3286 if (!rxr->rx_tpa_idx_map) 3287 return -ENOMEM; 3288 } 3289 return 0; 3290 } 3291 3292 static void bnxt_free_rx_rings(struct bnxt *bp) 3293 { 3294 int i; 3295 3296 if (!bp->rx_ring) 3297 return; 3298 3299 bnxt_free_tpa_info(bp); 3300 for (i = 0; i < bp->rx_nr_rings; i++) { 3301 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3302 struct bnxt_ring_struct *ring; 3303 3304 if (rxr->xdp_prog) 3305 bpf_prog_put(rxr->xdp_prog); 3306 3307 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3308 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3309 3310 page_pool_destroy(rxr->page_pool); 3311 rxr->page_pool = NULL; 3312 3313 kfree(rxr->rx_agg_bmap); 3314 rxr->rx_agg_bmap = NULL; 3315 3316 ring = &rxr->rx_ring_struct; 3317 bnxt_free_ring(bp, &ring->ring_mem); 3318 3319 ring = &rxr->rx_agg_ring_struct; 3320 bnxt_free_ring(bp, &ring->ring_mem); 3321 } 3322 } 3323 3324 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3325 struct bnxt_rx_ring_info *rxr) 3326 { 3327 struct page_pool_params pp = { 0 }; 3328 3329 pp.pool_size = bp->rx_agg_ring_size; 3330 if (BNXT_RX_PAGE_MODE(bp)) 3331 pp.pool_size += bp->rx_ring_size; 3332 pp.nid = dev_to_node(&bp->pdev->dev); 3333 pp.napi = &rxr->bnapi->napi; 3334 pp.netdev = bp->dev; 3335 pp.dev = &bp->pdev->dev; 3336 pp.dma_dir = bp->rx_dir; 3337 pp.max_len = PAGE_SIZE; 3338 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3339 3340 rxr->page_pool = page_pool_create(&pp); 3341 if (IS_ERR(rxr->page_pool)) { 3342 int err = PTR_ERR(rxr->page_pool); 3343 3344 rxr->page_pool = NULL; 3345 return err; 3346 } 3347 return 0; 3348 } 3349 3350 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3351 { 3352 int i, rc = 0, agg_rings = 0; 3353 3354 if (!bp->rx_ring) 3355 return -ENOMEM; 3356 3357 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3358 agg_rings = 1; 3359 3360 for (i = 0; i < bp->rx_nr_rings; i++) { 3361 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3362 struct bnxt_ring_struct *ring; 3363 3364 ring = &rxr->rx_ring_struct; 3365 3366 rc = bnxt_alloc_rx_page_pool(bp, rxr); 3367 if (rc) 3368 return rc; 3369 3370 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3371 if (rc < 0) 3372 return rc; 3373 3374 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3375 MEM_TYPE_PAGE_POOL, 3376 rxr->page_pool); 3377 if (rc) { 3378 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3379 return rc; 3380 } 3381 3382 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3383 if (rc) 3384 return rc; 3385 3386 ring->grp_idx = i; 3387 if (agg_rings) { 3388 u16 mem_size; 3389 3390 ring = &rxr->rx_agg_ring_struct; 3391 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3392 if (rc) 3393 return rc; 3394 3395 ring->grp_idx = i; 3396 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3397 mem_size = rxr->rx_agg_bmap_size / 8; 3398 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3399 if (!rxr->rx_agg_bmap) 3400 return -ENOMEM; 3401 } 3402 } 3403 if (bp->flags & BNXT_FLAG_TPA) 3404 rc = bnxt_alloc_tpa_info(bp); 3405 return rc; 3406 } 3407 3408 static void bnxt_free_tx_rings(struct bnxt *bp) 3409 { 3410 int i; 3411 struct pci_dev *pdev = bp->pdev; 3412 3413 if (!bp->tx_ring) 3414 return; 3415 3416 for (i = 0; i < bp->tx_nr_rings; i++) { 3417 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3418 struct bnxt_ring_struct *ring; 3419 3420 if (txr->tx_push) { 3421 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3422 txr->tx_push, txr->tx_push_mapping); 3423 txr->tx_push = NULL; 3424 } 3425 3426 ring = &txr->tx_ring_struct; 3427 3428 bnxt_free_ring(bp, &ring->ring_mem); 3429 } 3430 } 3431 3432 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3433 ((tc) * (bp)->tx_nr_rings_per_tc) 3434 3435 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3436 ((tx) % (bp)->tx_nr_rings_per_tc) 3437 3438 #define BNXT_RING_TO_TC(bp, tx) \ 3439 ((tx) / (bp)->tx_nr_rings_per_tc) 3440 3441 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3442 { 3443 int i, j, rc; 3444 struct pci_dev *pdev = bp->pdev; 3445 3446 bp->tx_push_size = 0; 3447 if (bp->tx_push_thresh) { 3448 int push_size; 3449 3450 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3451 bp->tx_push_thresh); 3452 3453 if (push_size > 256) { 3454 push_size = 0; 3455 bp->tx_push_thresh = 0; 3456 } 3457 3458 bp->tx_push_size = push_size; 3459 } 3460 3461 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3462 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3463 struct bnxt_ring_struct *ring; 3464 u8 qidx; 3465 3466 ring = &txr->tx_ring_struct; 3467 3468 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3469 if (rc) 3470 return rc; 3471 3472 ring->grp_idx = txr->bnapi->index; 3473 if (bp->tx_push_size) { 3474 dma_addr_t mapping; 3475 3476 /* One pre-allocated DMA buffer to backup 3477 * TX push operation 3478 */ 3479 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3480 bp->tx_push_size, 3481 &txr->tx_push_mapping, 3482 GFP_KERNEL); 3483 3484 if (!txr->tx_push) 3485 return -ENOMEM; 3486 3487 mapping = txr->tx_push_mapping + 3488 sizeof(struct tx_push_bd); 3489 txr->data_mapping = cpu_to_le64(mapping); 3490 } 3491 qidx = bp->tc_to_qidx[j]; 3492 ring->queue_id = bp->q_info[qidx].queue_id; 3493 spin_lock_init(&txr->xdp_tx_lock); 3494 if (i < bp->tx_nr_rings_xdp) 3495 continue; 3496 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 3497 j++; 3498 } 3499 return 0; 3500 } 3501 3502 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3503 { 3504 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3505 3506 kfree(cpr->cp_desc_ring); 3507 cpr->cp_desc_ring = NULL; 3508 ring->ring_mem.pg_arr = NULL; 3509 kfree(cpr->cp_desc_mapping); 3510 cpr->cp_desc_mapping = NULL; 3511 ring->ring_mem.dma_arr = NULL; 3512 } 3513 3514 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 3515 { 3516 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 3517 if (!cpr->cp_desc_ring) 3518 return -ENOMEM; 3519 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 3520 GFP_KERNEL); 3521 if (!cpr->cp_desc_mapping) 3522 return -ENOMEM; 3523 return 0; 3524 } 3525 3526 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 3527 { 3528 int i; 3529 3530 if (!bp->bnapi) 3531 return; 3532 for (i = 0; i < bp->cp_nr_rings; i++) { 3533 struct bnxt_napi *bnapi = bp->bnapi[i]; 3534 3535 if (!bnapi) 3536 continue; 3537 bnxt_free_cp_arrays(&bnapi->cp_ring); 3538 } 3539 } 3540 3541 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 3542 { 3543 int i, n = bp->cp_nr_pages; 3544 3545 for (i = 0; i < bp->cp_nr_rings; i++) { 3546 struct bnxt_napi *bnapi = bp->bnapi[i]; 3547 int rc; 3548 3549 if (!bnapi) 3550 continue; 3551 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 3552 if (rc) 3553 return rc; 3554 } 3555 return 0; 3556 } 3557 3558 static void bnxt_free_cp_rings(struct bnxt *bp) 3559 { 3560 int i; 3561 3562 if (!bp->bnapi) 3563 return; 3564 3565 for (i = 0; i < bp->cp_nr_rings; i++) { 3566 struct bnxt_napi *bnapi = bp->bnapi[i]; 3567 struct bnxt_cp_ring_info *cpr; 3568 struct bnxt_ring_struct *ring; 3569 int j; 3570 3571 if (!bnapi) 3572 continue; 3573 3574 cpr = &bnapi->cp_ring; 3575 ring = &cpr->cp_ring_struct; 3576 3577 bnxt_free_ring(bp, &ring->ring_mem); 3578 3579 if (!cpr->cp_ring_arr) 3580 continue; 3581 3582 for (j = 0; j < cpr->cp_ring_count; j++) { 3583 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 3584 3585 ring = &cpr2->cp_ring_struct; 3586 bnxt_free_ring(bp, &ring->ring_mem); 3587 bnxt_free_cp_arrays(cpr2); 3588 } 3589 kfree(cpr->cp_ring_arr); 3590 cpr->cp_ring_arr = NULL; 3591 cpr->cp_ring_count = 0; 3592 } 3593 } 3594 3595 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 3596 struct bnxt_cp_ring_info *cpr) 3597 { 3598 struct bnxt_ring_mem_info *rmem; 3599 struct bnxt_ring_struct *ring; 3600 int rc; 3601 3602 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 3603 if (rc) { 3604 bnxt_free_cp_arrays(cpr); 3605 return -ENOMEM; 3606 } 3607 ring = &cpr->cp_ring_struct; 3608 rmem = &ring->ring_mem; 3609 rmem->nr_pages = bp->cp_nr_pages; 3610 rmem->page_size = HW_CMPD_RING_SIZE; 3611 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3612 rmem->dma_arr = cpr->cp_desc_mapping; 3613 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3614 rc = bnxt_alloc_ring(bp, rmem); 3615 if (rc) { 3616 bnxt_free_ring(bp, rmem); 3617 bnxt_free_cp_arrays(cpr); 3618 } 3619 return rc; 3620 } 3621 3622 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3623 { 3624 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3625 int i, j, rc, ulp_base_vec, ulp_msix; 3626 int tcs = netdev_get_num_tc(bp->dev); 3627 3628 if (!tcs) 3629 tcs = 1; 3630 ulp_msix = bnxt_get_ulp_msix_num(bp); 3631 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3632 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 3633 struct bnxt_napi *bnapi = bp->bnapi[i]; 3634 struct bnxt_cp_ring_info *cpr, *cpr2; 3635 struct bnxt_ring_struct *ring; 3636 int cp_count = 0, k; 3637 int rx = 0, tx = 0; 3638 3639 if (!bnapi) 3640 continue; 3641 3642 cpr = &bnapi->cp_ring; 3643 cpr->bnapi = bnapi; 3644 ring = &cpr->cp_ring_struct; 3645 3646 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3647 if (rc) 3648 return rc; 3649 3650 if (ulp_msix && i >= ulp_base_vec) 3651 ring->map_idx = i + ulp_msix; 3652 else 3653 ring->map_idx = i; 3654 3655 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3656 continue; 3657 3658 if (i < bp->rx_nr_rings) { 3659 cp_count++; 3660 rx = 1; 3661 } 3662 if (i < bp->tx_nr_rings_xdp) { 3663 cp_count++; 3664 tx = 1; 3665 } else if ((sh && i < bp->tx_nr_rings) || 3666 (!sh && i >= bp->rx_nr_rings)) { 3667 cp_count += tcs; 3668 tx = 1; 3669 } 3670 3671 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 3672 GFP_KERNEL); 3673 if (!cpr->cp_ring_arr) 3674 return -ENOMEM; 3675 cpr->cp_ring_count = cp_count; 3676 3677 for (k = 0; k < cp_count; k++) { 3678 cpr2 = &cpr->cp_ring_arr[k]; 3679 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 3680 if (rc) 3681 return rc; 3682 cpr2->bnapi = bnapi; 3683 cpr2->cp_idx = k; 3684 if (!k && rx) { 3685 bp->rx_ring[i].rx_cpr = cpr2; 3686 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 3687 } else { 3688 int n, tc = k - rx; 3689 3690 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 3691 bp->tx_ring[n].tx_cpr = cpr2; 3692 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 3693 } 3694 } 3695 if (tx) 3696 j++; 3697 } 3698 return 0; 3699 } 3700 3701 static void bnxt_init_ring_struct(struct bnxt *bp) 3702 { 3703 int i, j; 3704 3705 for (i = 0; i < bp->cp_nr_rings; i++) { 3706 struct bnxt_napi *bnapi = bp->bnapi[i]; 3707 struct bnxt_ring_mem_info *rmem; 3708 struct bnxt_cp_ring_info *cpr; 3709 struct bnxt_rx_ring_info *rxr; 3710 struct bnxt_tx_ring_info *txr; 3711 struct bnxt_ring_struct *ring; 3712 3713 if (!bnapi) 3714 continue; 3715 3716 cpr = &bnapi->cp_ring; 3717 ring = &cpr->cp_ring_struct; 3718 rmem = &ring->ring_mem; 3719 rmem->nr_pages = bp->cp_nr_pages; 3720 rmem->page_size = HW_CMPD_RING_SIZE; 3721 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3722 rmem->dma_arr = cpr->cp_desc_mapping; 3723 rmem->vmem_size = 0; 3724 3725 rxr = bnapi->rx_ring; 3726 if (!rxr) 3727 goto skip_rx; 3728 3729 ring = &rxr->rx_ring_struct; 3730 rmem = &ring->ring_mem; 3731 rmem->nr_pages = bp->rx_nr_pages; 3732 rmem->page_size = HW_RXBD_RING_SIZE; 3733 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3734 rmem->dma_arr = rxr->rx_desc_mapping; 3735 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3736 rmem->vmem = (void **)&rxr->rx_buf_ring; 3737 3738 ring = &rxr->rx_agg_ring_struct; 3739 rmem = &ring->ring_mem; 3740 rmem->nr_pages = bp->rx_agg_nr_pages; 3741 rmem->page_size = HW_RXBD_RING_SIZE; 3742 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3743 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3744 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3745 rmem->vmem = (void **)&rxr->rx_agg_ring; 3746 3747 skip_rx: 3748 bnxt_for_each_napi_tx(j, bnapi, txr) { 3749 ring = &txr->tx_ring_struct; 3750 rmem = &ring->ring_mem; 3751 rmem->nr_pages = bp->tx_nr_pages; 3752 rmem->page_size = HW_TXBD_RING_SIZE; 3753 rmem->pg_arr = (void **)txr->tx_desc_ring; 3754 rmem->dma_arr = txr->tx_desc_mapping; 3755 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3756 rmem->vmem = (void **)&txr->tx_buf_ring; 3757 } 3758 } 3759 } 3760 3761 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3762 { 3763 int i; 3764 u32 prod; 3765 struct rx_bd **rx_buf_ring; 3766 3767 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3768 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3769 int j; 3770 struct rx_bd *rxbd; 3771 3772 rxbd = rx_buf_ring[i]; 3773 if (!rxbd) 3774 continue; 3775 3776 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3777 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3778 rxbd->rx_bd_opaque = prod; 3779 } 3780 } 3781 } 3782 3783 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 3784 { 3785 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3786 struct net_device *dev = bp->dev; 3787 u32 prod; 3788 int i; 3789 3790 prod = rxr->rx_prod; 3791 for (i = 0; i < bp->rx_ring_size; i++) { 3792 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 3793 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3794 ring_nr, i, bp->rx_ring_size); 3795 break; 3796 } 3797 prod = NEXT_RX(prod); 3798 } 3799 rxr->rx_prod = prod; 3800 3801 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3802 return 0; 3803 3804 prod = rxr->rx_agg_prod; 3805 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3806 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 3807 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3808 ring_nr, i, bp->rx_ring_size); 3809 break; 3810 } 3811 prod = NEXT_RX_AGG(prod); 3812 } 3813 rxr->rx_agg_prod = prod; 3814 3815 if (rxr->rx_tpa) { 3816 dma_addr_t mapping; 3817 u8 *data; 3818 3819 for (i = 0; i < bp->max_tpa; i++) { 3820 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); 3821 if (!data) 3822 return -ENOMEM; 3823 3824 rxr->rx_tpa[i].data = data; 3825 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3826 rxr->rx_tpa[i].mapping = mapping; 3827 } 3828 } 3829 return 0; 3830 } 3831 3832 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3833 { 3834 struct bnxt_rx_ring_info *rxr; 3835 struct bnxt_ring_struct *ring; 3836 u32 type; 3837 3838 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 3839 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 3840 3841 if (NET_IP_ALIGN == 2) 3842 type |= RX_BD_FLAGS_SOP; 3843 3844 rxr = &bp->rx_ring[ring_nr]; 3845 ring = &rxr->rx_ring_struct; 3846 bnxt_init_rxbd_pages(ring, type); 3847 3848 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 3849 bpf_prog_add(bp->xdp_prog, 1); 3850 rxr->xdp_prog = bp->xdp_prog; 3851 } 3852 ring->fw_ring_id = INVALID_HW_RING_ID; 3853 3854 ring = &rxr->rx_agg_ring_struct; 3855 ring->fw_ring_id = INVALID_HW_RING_ID; 3856 3857 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 3858 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 3859 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 3860 3861 bnxt_init_rxbd_pages(ring, type); 3862 } 3863 3864 return bnxt_alloc_one_rx_ring(bp, ring_nr); 3865 } 3866 3867 static void bnxt_init_cp_rings(struct bnxt *bp) 3868 { 3869 int i, j; 3870 3871 for (i = 0; i < bp->cp_nr_rings; i++) { 3872 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 3873 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3874 3875 ring->fw_ring_id = INVALID_HW_RING_ID; 3876 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3877 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3878 if (!cpr->cp_ring_arr) 3879 continue; 3880 for (j = 0; j < cpr->cp_ring_count; j++) { 3881 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 3882 3883 ring = &cpr2->cp_ring_struct; 3884 ring->fw_ring_id = INVALID_HW_RING_ID; 3885 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3886 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3887 } 3888 } 3889 } 3890 3891 static int bnxt_init_rx_rings(struct bnxt *bp) 3892 { 3893 int i, rc = 0; 3894 3895 if (BNXT_RX_PAGE_MODE(bp)) { 3896 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 3897 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 3898 } else { 3899 bp->rx_offset = BNXT_RX_OFFSET; 3900 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 3901 } 3902 3903 for (i = 0; i < bp->rx_nr_rings; i++) { 3904 rc = bnxt_init_one_rx_ring(bp, i); 3905 if (rc) 3906 break; 3907 } 3908 3909 return rc; 3910 } 3911 3912 static int bnxt_init_tx_rings(struct bnxt *bp) 3913 { 3914 u16 i; 3915 3916 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3917 BNXT_MIN_TX_DESC_CNT); 3918 3919 for (i = 0; i < bp->tx_nr_rings; i++) { 3920 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3921 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3922 3923 ring->fw_ring_id = INVALID_HW_RING_ID; 3924 } 3925 3926 return 0; 3927 } 3928 3929 static void bnxt_free_ring_grps(struct bnxt *bp) 3930 { 3931 kfree(bp->grp_info); 3932 bp->grp_info = NULL; 3933 } 3934 3935 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 3936 { 3937 int i; 3938 3939 if (irq_re_init) { 3940 bp->grp_info = kcalloc(bp->cp_nr_rings, 3941 sizeof(struct bnxt_ring_grp_info), 3942 GFP_KERNEL); 3943 if (!bp->grp_info) 3944 return -ENOMEM; 3945 } 3946 for (i = 0; i < bp->cp_nr_rings; i++) { 3947 if (irq_re_init) 3948 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 3949 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3950 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 3951 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 3952 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3953 } 3954 return 0; 3955 } 3956 3957 static void bnxt_free_vnics(struct bnxt *bp) 3958 { 3959 kfree(bp->vnic_info); 3960 bp->vnic_info = NULL; 3961 bp->nr_vnics = 0; 3962 } 3963 3964 static int bnxt_alloc_vnics(struct bnxt *bp) 3965 { 3966 int num_vnics = 1; 3967 3968 #ifdef CONFIG_RFS_ACCEL 3969 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS) 3970 num_vnics += bp->rx_nr_rings; 3971 #endif 3972 3973 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3974 num_vnics++; 3975 3976 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 3977 GFP_KERNEL); 3978 if (!bp->vnic_info) 3979 return -ENOMEM; 3980 3981 bp->nr_vnics = num_vnics; 3982 return 0; 3983 } 3984 3985 static void bnxt_init_vnics(struct bnxt *bp) 3986 { 3987 int i; 3988 3989 for (i = 0; i < bp->nr_vnics; i++) { 3990 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3991 int j; 3992 3993 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3994 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3995 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3996 3997 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3998 3999 if (bp->vnic_info[i].rss_hash_key) { 4000 if (i == 0) 4001 get_random_bytes(vnic->rss_hash_key, 4002 HW_HASH_KEY_SIZE); 4003 else 4004 memcpy(vnic->rss_hash_key, 4005 bp->vnic_info[0].rss_hash_key, 4006 HW_HASH_KEY_SIZE); 4007 } 4008 } 4009 } 4010 4011 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4012 { 4013 int pages; 4014 4015 pages = ring_size / desc_per_pg; 4016 4017 if (!pages) 4018 return 1; 4019 4020 pages++; 4021 4022 while (pages & (pages - 1)) 4023 pages++; 4024 4025 return pages; 4026 } 4027 4028 void bnxt_set_tpa_flags(struct bnxt *bp) 4029 { 4030 bp->flags &= ~BNXT_FLAG_TPA; 4031 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4032 return; 4033 if (bp->dev->features & NETIF_F_LRO) 4034 bp->flags |= BNXT_FLAG_LRO; 4035 else if (bp->dev->features & NETIF_F_GRO_HW) 4036 bp->flags |= BNXT_FLAG_GRO; 4037 } 4038 4039 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4040 * be set on entry. 4041 */ 4042 void bnxt_set_ring_params(struct bnxt *bp) 4043 { 4044 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4045 u32 agg_factor = 0, agg_ring_size = 0; 4046 4047 /* 8 for CRC and VLAN */ 4048 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4049 4050 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4051 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4052 4053 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 4054 ring_size = bp->rx_ring_size; 4055 bp->rx_agg_ring_size = 0; 4056 bp->rx_agg_nr_pages = 0; 4057 4058 if (bp->flags & BNXT_FLAG_TPA) 4059 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4060 4061 bp->flags &= ~BNXT_FLAG_JUMBO; 4062 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4063 u32 jumbo_factor; 4064 4065 bp->flags |= BNXT_FLAG_JUMBO; 4066 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4067 if (jumbo_factor > agg_factor) 4068 agg_factor = jumbo_factor; 4069 } 4070 if (agg_factor) { 4071 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4072 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4073 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4074 bp->rx_ring_size, ring_size); 4075 bp->rx_ring_size = ring_size; 4076 } 4077 agg_ring_size = ring_size * agg_factor; 4078 4079 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4080 RX_DESC_CNT); 4081 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4082 u32 tmp = agg_ring_size; 4083 4084 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4085 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4086 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4087 tmp, agg_ring_size); 4088 } 4089 bp->rx_agg_ring_size = agg_ring_size; 4090 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4091 4092 if (BNXT_RX_PAGE_MODE(bp)) { 4093 rx_space = PAGE_SIZE; 4094 rx_size = PAGE_SIZE - 4095 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4096 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4097 } else { 4098 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 4099 rx_space = rx_size + NET_SKB_PAD + 4100 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4101 } 4102 } 4103 4104 bp->rx_buf_use_size = rx_size; 4105 bp->rx_buf_size = rx_space; 4106 4107 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4108 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4109 4110 ring_size = bp->tx_ring_size; 4111 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4112 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4113 4114 max_rx_cmpl = bp->rx_ring_size; 4115 /* MAX TPA needs to be added because TPA_START completions are 4116 * immediately recycled, so the TPA completions are not bound by 4117 * the RX ring size. 4118 */ 4119 if (bp->flags & BNXT_FLAG_TPA) 4120 max_rx_cmpl += bp->max_tpa; 4121 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4122 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4123 bp->cp_ring_size = ring_size; 4124 4125 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4126 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4127 bp->cp_nr_pages = MAX_CP_PAGES; 4128 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4129 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4130 ring_size, bp->cp_ring_size); 4131 } 4132 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4133 bp->cp_ring_mask = bp->cp_bit - 1; 4134 } 4135 4136 /* Changing allocation mode of RX rings. 4137 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4138 */ 4139 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4140 { 4141 struct net_device *dev = bp->dev; 4142 4143 if (page_mode) { 4144 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 4145 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4146 4147 if (bp->xdp_prog->aux->xdp_has_frags) 4148 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4149 else 4150 dev->max_mtu = 4151 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4152 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4153 bp->flags |= BNXT_FLAG_JUMBO; 4154 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4155 } else { 4156 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4157 bp->rx_skb_func = bnxt_rx_page_skb; 4158 } 4159 bp->rx_dir = DMA_BIDIRECTIONAL; 4160 /* Disable LRO or GRO_HW */ 4161 netdev_update_features(dev); 4162 } else { 4163 dev->max_mtu = bp->max_mtu; 4164 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4165 bp->rx_dir = DMA_FROM_DEVICE; 4166 bp->rx_skb_func = bnxt_rx_skb; 4167 } 4168 return 0; 4169 } 4170 4171 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4172 { 4173 int i; 4174 struct bnxt_vnic_info *vnic; 4175 struct pci_dev *pdev = bp->pdev; 4176 4177 if (!bp->vnic_info) 4178 return; 4179 4180 for (i = 0; i < bp->nr_vnics; i++) { 4181 vnic = &bp->vnic_info[i]; 4182 4183 kfree(vnic->fw_grp_ids); 4184 vnic->fw_grp_ids = NULL; 4185 4186 kfree(vnic->uc_list); 4187 vnic->uc_list = NULL; 4188 4189 if (vnic->mc_list) { 4190 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4191 vnic->mc_list, vnic->mc_list_mapping); 4192 vnic->mc_list = NULL; 4193 } 4194 4195 if (vnic->rss_table) { 4196 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4197 vnic->rss_table, 4198 vnic->rss_table_dma_addr); 4199 vnic->rss_table = NULL; 4200 } 4201 4202 vnic->rss_hash_key = NULL; 4203 vnic->flags = 0; 4204 } 4205 } 4206 4207 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4208 { 4209 int i, rc = 0, size; 4210 struct bnxt_vnic_info *vnic; 4211 struct pci_dev *pdev = bp->pdev; 4212 int max_rings; 4213 4214 for (i = 0; i < bp->nr_vnics; i++) { 4215 vnic = &bp->vnic_info[i]; 4216 4217 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4218 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4219 4220 if (mem_size > 0) { 4221 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4222 if (!vnic->uc_list) { 4223 rc = -ENOMEM; 4224 goto out; 4225 } 4226 } 4227 } 4228 4229 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4230 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4231 vnic->mc_list = 4232 dma_alloc_coherent(&pdev->dev, 4233 vnic->mc_list_size, 4234 &vnic->mc_list_mapping, 4235 GFP_KERNEL); 4236 if (!vnic->mc_list) { 4237 rc = -ENOMEM; 4238 goto out; 4239 } 4240 } 4241 4242 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4243 goto vnic_skip_grps; 4244 4245 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4246 max_rings = bp->rx_nr_rings; 4247 else 4248 max_rings = 1; 4249 4250 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4251 if (!vnic->fw_grp_ids) { 4252 rc = -ENOMEM; 4253 goto out; 4254 } 4255 vnic_skip_grps: 4256 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 4257 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4258 continue; 4259 4260 /* Allocate rss table and hash key */ 4261 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4262 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4263 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4264 4265 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4266 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4267 vnic->rss_table_size, 4268 &vnic->rss_table_dma_addr, 4269 GFP_KERNEL); 4270 if (!vnic->rss_table) { 4271 rc = -ENOMEM; 4272 goto out; 4273 } 4274 4275 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4276 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4277 } 4278 return 0; 4279 4280 out: 4281 return rc; 4282 } 4283 4284 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4285 { 4286 struct bnxt_hwrm_wait_token *token; 4287 4288 dma_pool_destroy(bp->hwrm_dma_pool); 4289 bp->hwrm_dma_pool = NULL; 4290 4291 rcu_read_lock(); 4292 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4293 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4294 rcu_read_unlock(); 4295 } 4296 4297 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4298 { 4299 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4300 BNXT_HWRM_DMA_SIZE, 4301 BNXT_HWRM_DMA_ALIGN, 0); 4302 if (!bp->hwrm_dma_pool) 4303 return -ENOMEM; 4304 4305 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4306 4307 return 0; 4308 } 4309 4310 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 4311 { 4312 kfree(stats->hw_masks); 4313 stats->hw_masks = NULL; 4314 kfree(stats->sw_stats); 4315 stats->sw_stats = NULL; 4316 if (stats->hw_stats) { 4317 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 4318 stats->hw_stats_map); 4319 stats->hw_stats = NULL; 4320 } 4321 } 4322 4323 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 4324 bool alloc_masks) 4325 { 4326 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 4327 &stats->hw_stats_map, GFP_KERNEL); 4328 if (!stats->hw_stats) 4329 return -ENOMEM; 4330 4331 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 4332 if (!stats->sw_stats) 4333 goto stats_mem_err; 4334 4335 if (alloc_masks) { 4336 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 4337 if (!stats->hw_masks) 4338 goto stats_mem_err; 4339 } 4340 return 0; 4341 4342 stats_mem_err: 4343 bnxt_free_stats_mem(bp, stats); 4344 return -ENOMEM; 4345 } 4346 4347 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 4348 { 4349 int i; 4350 4351 for (i = 0; i < count; i++) 4352 mask_arr[i] = mask; 4353 } 4354 4355 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 4356 { 4357 int i; 4358 4359 for (i = 0; i < count; i++) 4360 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 4361 } 4362 4363 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 4364 struct bnxt_stats_mem *stats) 4365 { 4366 struct hwrm_func_qstats_ext_output *resp; 4367 struct hwrm_func_qstats_ext_input *req; 4368 __le64 *hw_masks; 4369 int rc; 4370 4371 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 4372 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4373 return -EOPNOTSUPP; 4374 4375 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 4376 if (rc) 4377 return rc; 4378 4379 req->fid = cpu_to_le16(0xffff); 4380 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4381 4382 resp = hwrm_req_hold(bp, req); 4383 rc = hwrm_req_send(bp, req); 4384 if (!rc) { 4385 hw_masks = &resp->rx_ucast_pkts; 4386 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 4387 } 4388 hwrm_req_drop(bp, req); 4389 return rc; 4390 } 4391 4392 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 4393 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 4394 4395 static void bnxt_init_stats(struct bnxt *bp) 4396 { 4397 struct bnxt_napi *bnapi = bp->bnapi[0]; 4398 struct bnxt_cp_ring_info *cpr; 4399 struct bnxt_stats_mem *stats; 4400 __le64 *rx_stats, *tx_stats; 4401 int rc, rx_count, tx_count; 4402 u64 *rx_masks, *tx_masks; 4403 u64 mask; 4404 u8 flags; 4405 4406 cpr = &bnapi->cp_ring; 4407 stats = &cpr->stats; 4408 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 4409 if (rc) { 4410 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4411 mask = (1ULL << 48) - 1; 4412 else 4413 mask = -1ULL; 4414 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 4415 } 4416 if (bp->flags & BNXT_FLAG_PORT_STATS) { 4417 stats = &bp->port_stats; 4418 rx_stats = stats->hw_stats; 4419 rx_masks = stats->hw_masks; 4420 rx_count = sizeof(struct rx_port_stats) / 8; 4421 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4422 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4423 tx_count = sizeof(struct tx_port_stats) / 8; 4424 4425 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 4426 rc = bnxt_hwrm_port_qstats(bp, flags); 4427 if (rc) { 4428 mask = (1ULL << 40) - 1; 4429 4430 bnxt_fill_masks(rx_masks, mask, rx_count); 4431 bnxt_fill_masks(tx_masks, mask, tx_count); 4432 } else { 4433 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4434 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 4435 bnxt_hwrm_port_qstats(bp, 0); 4436 } 4437 } 4438 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 4439 stats = &bp->rx_port_stats_ext; 4440 rx_stats = stats->hw_stats; 4441 rx_masks = stats->hw_masks; 4442 rx_count = sizeof(struct rx_port_stats_ext) / 8; 4443 stats = &bp->tx_port_stats_ext; 4444 tx_stats = stats->hw_stats; 4445 tx_masks = stats->hw_masks; 4446 tx_count = sizeof(struct tx_port_stats_ext) / 8; 4447 4448 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 4449 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 4450 if (rc) { 4451 mask = (1ULL << 40) - 1; 4452 4453 bnxt_fill_masks(rx_masks, mask, rx_count); 4454 if (tx_stats) 4455 bnxt_fill_masks(tx_masks, mask, tx_count); 4456 } else { 4457 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 4458 if (tx_stats) 4459 bnxt_copy_hw_masks(tx_masks, tx_stats, 4460 tx_count); 4461 bnxt_hwrm_port_qstats_ext(bp, 0); 4462 } 4463 } 4464 } 4465 4466 static void bnxt_free_port_stats(struct bnxt *bp) 4467 { 4468 bp->flags &= ~BNXT_FLAG_PORT_STATS; 4469 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 4470 4471 bnxt_free_stats_mem(bp, &bp->port_stats); 4472 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 4473 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 4474 } 4475 4476 static void bnxt_free_ring_stats(struct bnxt *bp) 4477 { 4478 int i; 4479 4480 if (!bp->bnapi) 4481 return; 4482 4483 for (i = 0; i < bp->cp_nr_rings; i++) { 4484 struct bnxt_napi *bnapi = bp->bnapi[i]; 4485 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4486 4487 bnxt_free_stats_mem(bp, &cpr->stats); 4488 } 4489 } 4490 4491 static int bnxt_alloc_stats(struct bnxt *bp) 4492 { 4493 u32 size, i; 4494 int rc; 4495 4496 size = bp->hw_ring_stats_size; 4497 4498 for (i = 0; i < bp->cp_nr_rings; i++) { 4499 struct bnxt_napi *bnapi = bp->bnapi[i]; 4500 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4501 4502 cpr->stats.len = size; 4503 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 4504 if (rc) 4505 return rc; 4506 4507 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4508 } 4509 4510 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 4511 return 0; 4512 4513 if (bp->port_stats.hw_stats) 4514 goto alloc_ext_stats; 4515 4516 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 4517 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 4518 if (rc) 4519 return rc; 4520 4521 bp->flags |= BNXT_FLAG_PORT_STATS; 4522 4523 alloc_ext_stats: 4524 /* Display extended statistics only if FW supports it */ 4525 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 4526 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 4527 return 0; 4528 4529 if (bp->rx_port_stats_ext.hw_stats) 4530 goto alloc_tx_ext_stats; 4531 4532 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 4533 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 4534 /* Extended stats are optional */ 4535 if (rc) 4536 return 0; 4537 4538 alloc_tx_ext_stats: 4539 if (bp->tx_port_stats_ext.hw_stats) 4540 return 0; 4541 4542 if (bp->hwrm_spec_code >= 0x10902 || 4543 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 4544 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 4545 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 4546 /* Extended stats are optional */ 4547 if (rc) 4548 return 0; 4549 } 4550 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 4551 return 0; 4552 } 4553 4554 static void bnxt_clear_ring_indices(struct bnxt *bp) 4555 { 4556 int i, j; 4557 4558 if (!bp->bnapi) 4559 return; 4560 4561 for (i = 0; i < bp->cp_nr_rings; i++) { 4562 struct bnxt_napi *bnapi = bp->bnapi[i]; 4563 struct bnxt_cp_ring_info *cpr; 4564 struct bnxt_rx_ring_info *rxr; 4565 struct bnxt_tx_ring_info *txr; 4566 4567 if (!bnapi) 4568 continue; 4569 4570 cpr = &bnapi->cp_ring; 4571 cpr->cp_raw_cons = 0; 4572 4573 bnxt_for_each_napi_tx(j, bnapi, txr) { 4574 txr->tx_prod = 0; 4575 txr->tx_cons = 0; 4576 txr->tx_hw_cons = 0; 4577 } 4578 4579 rxr = bnapi->rx_ring; 4580 if (rxr) { 4581 rxr->rx_prod = 0; 4582 rxr->rx_agg_prod = 0; 4583 rxr->rx_sw_agg_prod = 0; 4584 rxr->rx_next_cons = 0; 4585 } 4586 bnapi->events = 0; 4587 } 4588 } 4589 4590 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 4591 { 4592 #ifdef CONFIG_RFS_ACCEL 4593 int i; 4594 4595 /* Under rtnl_lock and all our NAPIs have been disabled. It's 4596 * safe to delete the hash table. 4597 */ 4598 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 4599 struct hlist_head *head; 4600 struct hlist_node *tmp; 4601 struct bnxt_ntuple_filter *fltr; 4602 4603 head = &bp->ntp_fltr_hash_tbl[i]; 4604 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 4605 hlist_del(&fltr->hash); 4606 kfree(fltr); 4607 } 4608 } 4609 if (irq_reinit) { 4610 bitmap_free(bp->ntp_fltr_bmap); 4611 bp->ntp_fltr_bmap = NULL; 4612 } 4613 bp->ntp_fltr_count = 0; 4614 #endif 4615 } 4616 4617 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 4618 { 4619 #ifdef CONFIG_RFS_ACCEL 4620 int i, rc = 0; 4621 4622 if (!(bp->flags & BNXT_FLAG_RFS)) 4623 return 0; 4624 4625 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 4626 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 4627 4628 bp->ntp_fltr_count = 0; 4629 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL); 4630 4631 if (!bp->ntp_fltr_bmap) 4632 rc = -ENOMEM; 4633 4634 return rc; 4635 #else 4636 return 0; 4637 #endif 4638 } 4639 4640 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 4641 { 4642 bnxt_free_vnic_attributes(bp); 4643 bnxt_free_tx_rings(bp); 4644 bnxt_free_rx_rings(bp); 4645 bnxt_free_cp_rings(bp); 4646 bnxt_free_all_cp_arrays(bp); 4647 bnxt_free_ntp_fltrs(bp, irq_re_init); 4648 if (irq_re_init) { 4649 bnxt_free_ring_stats(bp); 4650 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 4651 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 4652 bnxt_free_port_stats(bp); 4653 bnxt_free_ring_grps(bp); 4654 bnxt_free_vnics(bp); 4655 kfree(bp->tx_ring_map); 4656 bp->tx_ring_map = NULL; 4657 kfree(bp->tx_ring); 4658 bp->tx_ring = NULL; 4659 kfree(bp->rx_ring); 4660 bp->rx_ring = NULL; 4661 kfree(bp->bnapi); 4662 bp->bnapi = NULL; 4663 } else { 4664 bnxt_clear_ring_indices(bp); 4665 } 4666 } 4667 4668 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 4669 { 4670 int i, j, rc, size, arr_size; 4671 void *bnapi; 4672 4673 if (irq_re_init) { 4674 /* Allocate bnapi mem pointer array and mem block for 4675 * all queues 4676 */ 4677 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 4678 bp->cp_nr_rings); 4679 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 4680 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 4681 if (!bnapi) 4682 return -ENOMEM; 4683 4684 bp->bnapi = bnapi; 4685 bnapi += arr_size; 4686 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 4687 bp->bnapi[i] = bnapi; 4688 bp->bnapi[i]->index = i; 4689 bp->bnapi[i]->bp = bp; 4690 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 4691 struct bnxt_cp_ring_info *cpr = 4692 &bp->bnapi[i]->cp_ring; 4693 4694 cpr->cp_ring_struct.ring_mem.flags = 4695 BNXT_RMEM_RING_PTE_FLAG; 4696 } 4697 } 4698 4699 bp->rx_ring = kcalloc(bp->rx_nr_rings, 4700 sizeof(struct bnxt_rx_ring_info), 4701 GFP_KERNEL); 4702 if (!bp->rx_ring) 4703 return -ENOMEM; 4704 4705 for (i = 0; i < bp->rx_nr_rings; i++) { 4706 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4707 4708 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 4709 rxr->rx_ring_struct.ring_mem.flags = 4710 BNXT_RMEM_RING_PTE_FLAG; 4711 rxr->rx_agg_ring_struct.ring_mem.flags = 4712 BNXT_RMEM_RING_PTE_FLAG; 4713 } else { 4714 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 4715 } 4716 rxr->bnapi = bp->bnapi[i]; 4717 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4718 } 4719 4720 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4721 sizeof(struct bnxt_tx_ring_info), 4722 GFP_KERNEL); 4723 if (!bp->tx_ring) 4724 return -ENOMEM; 4725 4726 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4727 GFP_KERNEL); 4728 4729 if (!bp->tx_ring_map) 4730 return -ENOMEM; 4731 4732 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4733 j = 0; 4734 else 4735 j = bp->rx_nr_rings; 4736 4737 for (i = 0; i < bp->tx_nr_rings; i++) { 4738 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4739 struct bnxt_napi *bnapi2; 4740 4741 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4742 txr->tx_ring_struct.ring_mem.flags = 4743 BNXT_RMEM_RING_PTE_FLAG; 4744 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4745 if (i >= bp->tx_nr_rings_xdp) { 4746 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 4747 4748 bnapi2 = bp->bnapi[k]; 4749 txr->txq_index = i - bp->tx_nr_rings_xdp; 4750 txr->tx_napi_idx = 4751 BNXT_RING_TO_TC(bp, txr->txq_index); 4752 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 4753 bnapi2->tx_int = bnxt_tx_int; 4754 } else { 4755 bnapi2 = bp->bnapi[j]; 4756 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 4757 bnapi2->tx_ring[0] = txr; 4758 bnapi2->tx_int = bnxt_tx_int_xdp; 4759 j++; 4760 } 4761 txr->bnapi = bnapi2; 4762 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4763 txr->tx_cpr = &bnapi2->cp_ring; 4764 } 4765 4766 rc = bnxt_alloc_stats(bp); 4767 if (rc) 4768 goto alloc_mem_err; 4769 bnxt_init_stats(bp); 4770 4771 rc = bnxt_alloc_ntp_fltrs(bp); 4772 if (rc) 4773 goto alloc_mem_err; 4774 4775 rc = bnxt_alloc_vnics(bp); 4776 if (rc) 4777 goto alloc_mem_err; 4778 } 4779 4780 rc = bnxt_alloc_all_cp_arrays(bp); 4781 if (rc) 4782 goto alloc_mem_err; 4783 4784 bnxt_init_ring_struct(bp); 4785 4786 rc = bnxt_alloc_rx_rings(bp); 4787 if (rc) 4788 goto alloc_mem_err; 4789 4790 rc = bnxt_alloc_tx_rings(bp); 4791 if (rc) 4792 goto alloc_mem_err; 4793 4794 rc = bnxt_alloc_cp_rings(bp); 4795 if (rc) 4796 goto alloc_mem_err; 4797 4798 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4799 BNXT_VNIC_UCAST_FLAG; 4800 rc = bnxt_alloc_vnic_attributes(bp); 4801 if (rc) 4802 goto alloc_mem_err; 4803 return 0; 4804 4805 alloc_mem_err: 4806 bnxt_free_mem(bp, true); 4807 return rc; 4808 } 4809 4810 static void bnxt_disable_int(struct bnxt *bp) 4811 { 4812 int i; 4813 4814 if (!bp->bnapi) 4815 return; 4816 4817 for (i = 0; i < bp->cp_nr_rings; i++) { 4818 struct bnxt_napi *bnapi = bp->bnapi[i]; 4819 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4820 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4821 4822 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4823 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4824 } 4825 } 4826 4827 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4828 { 4829 struct bnxt_napi *bnapi = bp->bnapi[n]; 4830 struct bnxt_cp_ring_info *cpr; 4831 4832 cpr = &bnapi->cp_ring; 4833 return cpr->cp_ring_struct.map_idx; 4834 } 4835 4836 static void bnxt_disable_int_sync(struct bnxt *bp) 4837 { 4838 int i; 4839 4840 if (!bp->irq_tbl) 4841 return; 4842 4843 atomic_inc(&bp->intr_sem); 4844 4845 bnxt_disable_int(bp); 4846 for (i = 0; i < bp->cp_nr_rings; i++) { 4847 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 4848 4849 synchronize_irq(bp->irq_tbl[map_idx].vector); 4850 } 4851 } 4852 4853 static void bnxt_enable_int(struct bnxt *bp) 4854 { 4855 int i; 4856 4857 atomic_set(&bp->intr_sem, 0); 4858 for (i = 0; i < bp->cp_nr_rings; i++) { 4859 struct bnxt_napi *bnapi = bp->bnapi[i]; 4860 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4861 4862 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 4863 } 4864 } 4865 4866 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 4867 bool async_only) 4868 { 4869 DECLARE_BITMAP(async_events_bmap, 256); 4870 u32 *events = (u32 *)async_events_bmap; 4871 struct hwrm_func_drv_rgtr_output *resp; 4872 struct hwrm_func_drv_rgtr_input *req; 4873 u32 flags; 4874 int rc, i; 4875 4876 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 4877 if (rc) 4878 return rc; 4879 4880 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 4881 FUNC_DRV_RGTR_REQ_ENABLES_VER | 4882 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4883 4884 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 4885 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 4886 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 4887 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 4888 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 4889 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 4890 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 4891 req->flags = cpu_to_le32(flags); 4892 req->ver_maj_8b = DRV_VER_MAJ; 4893 req->ver_min_8b = DRV_VER_MIN; 4894 req->ver_upd_8b = DRV_VER_UPD; 4895 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 4896 req->ver_min = cpu_to_le16(DRV_VER_MIN); 4897 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 4898 4899 if (BNXT_PF(bp)) { 4900 u32 data[8]; 4901 int i; 4902 4903 memset(data, 0, sizeof(data)); 4904 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 4905 u16 cmd = bnxt_vf_req_snif[i]; 4906 unsigned int bit, idx; 4907 4908 idx = cmd / 32; 4909 bit = cmd % 32; 4910 data[idx] |= 1 << bit; 4911 } 4912 4913 for (i = 0; i < 8; i++) 4914 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 4915 4916 req->enables |= 4917 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 4918 } 4919 4920 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 4921 req->flags |= cpu_to_le32( 4922 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 4923 4924 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 4925 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 4926 u16 event_id = bnxt_async_events_arr[i]; 4927 4928 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4929 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4930 continue; 4931 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 4932 !bp->ptp_cfg) 4933 continue; 4934 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4935 } 4936 if (bmap && bmap_size) { 4937 for (i = 0; i < bmap_size; i++) { 4938 if (test_bit(i, bmap)) 4939 __set_bit(i, async_events_bmap); 4940 } 4941 } 4942 for (i = 0; i < 8; i++) 4943 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 4944 4945 if (async_only) 4946 req->enables = 4947 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4948 4949 resp = hwrm_req_hold(bp, req); 4950 rc = hwrm_req_send(bp, req); 4951 if (!rc) { 4952 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 4953 if (resp->flags & 4954 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 4955 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4956 } 4957 hwrm_req_drop(bp, req); 4958 return rc; 4959 } 4960 4961 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4962 { 4963 struct hwrm_func_drv_unrgtr_input *req; 4964 int rc; 4965 4966 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 4967 return 0; 4968 4969 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 4970 if (rc) 4971 return rc; 4972 return hwrm_req_send(bp, req); 4973 } 4974 4975 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4976 { 4977 struct hwrm_tunnel_dst_port_free_input *req; 4978 int rc; 4979 4980 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 4981 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 4982 return 0; 4983 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 4984 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 4985 return 0; 4986 4987 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 4988 if (rc) 4989 return rc; 4990 4991 req->tunnel_type = tunnel_type; 4992 4993 switch (tunnel_type) { 4994 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4995 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 4996 bp->vxlan_port = 0; 4997 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 4998 break; 4999 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5000 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5001 bp->nge_port = 0; 5002 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5003 break; 5004 default: 5005 break; 5006 } 5007 5008 rc = hwrm_req_send(bp, req); 5009 if (rc) 5010 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5011 rc); 5012 return rc; 5013 } 5014 5015 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5016 u8 tunnel_type) 5017 { 5018 struct hwrm_tunnel_dst_port_alloc_output *resp; 5019 struct hwrm_tunnel_dst_port_alloc_input *req; 5020 int rc; 5021 5022 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5023 if (rc) 5024 return rc; 5025 5026 req->tunnel_type = tunnel_type; 5027 req->tunnel_dst_port_val = port; 5028 5029 resp = hwrm_req_hold(bp, req); 5030 rc = hwrm_req_send(bp, req); 5031 if (rc) { 5032 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5033 rc); 5034 goto err_out; 5035 } 5036 5037 switch (tunnel_type) { 5038 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5039 bp->vxlan_port = port; 5040 bp->vxlan_fw_dst_port_id = 5041 le16_to_cpu(resp->tunnel_dst_port_id); 5042 break; 5043 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5044 bp->nge_port = port; 5045 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5046 break; 5047 default: 5048 break; 5049 } 5050 5051 err_out: 5052 hwrm_req_drop(bp, req); 5053 return rc; 5054 } 5055 5056 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5057 { 5058 struct hwrm_cfa_l2_set_rx_mask_input *req; 5059 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5060 int rc; 5061 5062 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5063 if (rc) 5064 return rc; 5065 5066 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5067 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5068 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5069 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5070 } 5071 req->mask = cpu_to_le32(vnic->rx_mask); 5072 return hwrm_req_send_silent(bp, req); 5073 } 5074 5075 #ifdef CONFIG_RFS_ACCEL 5076 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 5077 struct bnxt_ntuple_filter *fltr) 5078 { 5079 struct hwrm_cfa_ntuple_filter_free_input *req; 5080 int rc; 5081 5082 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 5083 if (rc) 5084 return rc; 5085 5086 req->ntuple_filter_id = fltr->filter_id; 5087 return hwrm_req_send(bp, req); 5088 } 5089 5090 #define BNXT_NTP_FLTR_FLAGS \ 5091 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 5092 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 5093 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 5094 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 5095 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 5096 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 5097 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 5098 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 5099 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 5100 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 5101 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 5102 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 5103 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 5104 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 5105 5106 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 5107 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 5108 5109 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 5110 struct bnxt_ntuple_filter *fltr) 5111 { 5112 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 5113 struct hwrm_cfa_ntuple_filter_alloc_input *req; 5114 struct flow_keys *keys = &fltr->fkeys; 5115 struct bnxt_vnic_info *vnic; 5116 u32 flags = 0; 5117 int rc; 5118 5119 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 5120 if (rc) 5121 return rc; 5122 5123 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 5124 5125 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 5126 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 5127 req->dst_id = cpu_to_le16(fltr->rxq); 5128 } else { 5129 vnic = &bp->vnic_info[fltr->rxq + 1]; 5130 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5131 } 5132 req->flags = cpu_to_le32(flags); 5133 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 5134 5135 req->ethertype = htons(ETH_P_IP); 5136 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN); 5137 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 5138 req->ip_protocol = keys->basic.ip_proto; 5139 5140 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 5141 int i; 5142 5143 req->ethertype = htons(ETH_P_IPV6); 5144 req->ip_addr_type = 5145 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 5146 *(struct in6_addr *)&req->src_ipaddr[0] = 5147 keys->addrs.v6addrs.src; 5148 *(struct in6_addr *)&req->dst_ipaddr[0] = 5149 keys->addrs.v6addrs.dst; 5150 for (i = 0; i < 4; i++) { 5151 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 5152 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 5153 } 5154 } else { 5155 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 5156 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 5157 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 5158 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 5159 } 5160 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 5161 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 5162 req->tunnel_type = 5163 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 5164 } 5165 5166 req->src_port = keys->ports.src; 5167 req->src_port_mask = cpu_to_be16(0xffff); 5168 req->dst_port = keys->ports.dst; 5169 req->dst_port_mask = cpu_to_be16(0xffff); 5170 5171 resp = hwrm_req_hold(bp, req); 5172 rc = hwrm_req_send(bp, req); 5173 if (!rc) 5174 fltr->filter_id = resp->ntuple_filter_id; 5175 hwrm_req_drop(bp, req); 5176 return rc; 5177 } 5178 #endif 5179 5180 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 5181 const u8 *mac_addr) 5182 { 5183 struct hwrm_cfa_l2_filter_alloc_output *resp; 5184 struct hwrm_cfa_l2_filter_alloc_input *req; 5185 int rc; 5186 5187 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 5188 if (rc) 5189 return rc; 5190 5191 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 5192 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 5193 req->flags |= 5194 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 5195 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 5196 req->enables = 5197 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 5198 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 5199 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 5200 memcpy(req->l2_addr, mac_addr, ETH_ALEN); 5201 req->l2_addr_mask[0] = 0xff; 5202 req->l2_addr_mask[1] = 0xff; 5203 req->l2_addr_mask[2] = 0xff; 5204 req->l2_addr_mask[3] = 0xff; 5205 req->l2_addr_mask[4] = 0xff; 5206 req->l2_addr_mask[5] = 0xff; 5207 5208 resp = hwrm_req_hold(bp, req); 5209 rc = hwrm_req_send(bp, req); 5210 if (!rc) 5211 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 5212 resp->l2_filter_id; 5213 hwrm_req_drop(bp, req); 5214 return rc; 5215 } 5216 5217 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 5218 { 5219 struct hwrm_cfa_l2_filter_free_input *req; 5220 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 5221 int rc; 5222 5223 /* Any associated ntuple filters will also be cleared by firmware. */ 5224 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 5225 if (rc) 5226 return rc; 5227 hwrm_req_hold(bp, req); 5228 for (i = 0; i < num_of_vnics; i++) { 5229 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5230 5231 for (j = 0; j < vnic->uc_filter_count; j++) { 5232 req->l2_filter_id = vnic->fw_l2_filter_id[j]; 5233 5234 rc = hwrm_req_send(bp, req); 5235 } 5236 vnic->uc_filter_count = 0; 5237 } 5238 hwrm_req_drop(bp, req); 5239 return rc; 5240 } 5241 5242 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 5243 { 5244 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5245 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 5246 struct hwrm_vnic_tpa_cfg_input *req; 5247 int rc; 5248 5249 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 5250 return 0; 5251 5252 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 5253 if (rc) 5254 return rc; 5255 5256 if (tpa_flags) { 5257 u16 mss = bp->dev->mtu - 40; 5258 u32 nsegs, n, segs = 0, flags; 5259 5260 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 5261 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 5262 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 5263 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 5264 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 5265 if (tpa_flags & BNXT_FLAG_GRO) 5266 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 5267 5268 req->flags = cpu_to_le32(flags); 5269 5270 req->enables = 5271 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 5272 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 5273 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 5274 5275 /* Number of segs are log2 units, and first packet is not 5276 * included as part of this units. 5277 */ 5278 if (mss <= BNXT_RX_PAGE_SIZE) { 5279 n = BNXT_RX_PAGE_SIZE / mss; 5280 nsegs = (MAX_SKB_FRAGS - 1) * n; 5281 } else { 5282 n = mss / BNXT_RX_PAGE_SIZE; 5283 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 5284 n++; 5285 nsegs = (MAX_SKB_FRAGS - n) / n; 5286 } 5287 5288 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5289 segs = MAX_TPA_SEGS_P5; 5290 max_aggs = bp->max_tpa; 5291 } else { 5292 segs = ilog2(nsegs); 5293 } 5294 req->max_agg_segs = cpu_to_le16(segs); 5295 req->max_aggs = cpu_to_le16(max_aggs); 5296 5297 req->min_agg_len = cpu_to_le32(512); 5298 } 5299 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5300 5301 return hwrm_req_send(bp, req); 5302 } 5303 5304 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 5305 { 5306 struct bnxt_ring_grp_info *grp_info; 5307 5308 grp_info = &bp->grp_info[ring->grp_idx]; 5309 return grp_info->cp_fw_ring_id; 5310 } 5311 5312 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 5313 { 5314 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5315 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 5316 else 5317 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 5318 } 5319 5320 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 5321 { 5322 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5323 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 5324 else 5325 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 5326 } 5327 5328 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 5329 { 5330 int entries; 5331 5332 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5333 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 5334 else 5335 entries = HW_HASH_INDEX_SIZE; 5336 5337 bp->rss_indir_tbl_entries = entries; 5338 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), 5339 GFP_KERNEL); 5340 if (!bp->rss_indir_tbl) 5341 return -ENOMEM; 5342 return 0; 5343 } 5344 5345 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) 5346 { 5347 u16 max_rings, max_entries, pad, i; 5348 5349 if (!bp->rx_nr_rings) 5350 return; 5351 5352 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5353 max_rings = bp->rx_nr_rings - 1; 5354 else 5355 max_rings = bp->rx_nr_rings; 5356 5357 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 5358 5359 for (i = 0; i < max_entries; i++) 5360 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 5361 5362 pad = bp->rss_indir_tbl_entries - max_entries; 5363 if (pad) 5364 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 5365 } 5366 5367 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 5368 { 5369 u16 i, tbl_size, max_ring = 0; 5370 5371 if (!bp->rss_indir_tbl) 5372 return 0; 5373 5374 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5375 for (i = 0; i < tbl_size; i++) 5376 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 5377 return max_ring; 5378 } 5379 5380 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 5381 { 5382 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5383 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); 5384 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5385 return 2; 5386 return 1; 5387 } 5388 5389 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 5390 { 5391 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 5392 u16 i, j; 5393 5394 /* Fill the RSS indirection table with ring group ids */ 5395 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 5396 if (!no_rss) 5397 j = bp->rss_indir_tbl[i]; 5398 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 5399 } 5400 } 5401 5402 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 5403 struct bnxt_vnic_info *vnic) 5404 { 5405 __le16 *ring_tbl = vnic->rss_table; 5406 struct bnxt_rx_ring_info *rxr; 5407 u16 tbl_size, i; 5408 5409 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5410 5411 for (i = 0; i < tbl_size; i++) { 5412 u16 ring_id, j; 5413 5414 j = bp->rss_indir_tbl[i]; 5415 rxr = &bp->rx_ring[j]; 5416 5417 ring_id = rxr->rx_ring_struct.fw_ring_id; 5418 *ring_tbl++ = cpu_to_le16(ring_id); 5419 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5420 *ring_tbl++ = cpu_to_le16(ring_id); 5421 } 5422 } 5423 5424 static void 5425 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 5426 struct bnxt_vnic_info *vnic) 5427 { 5428 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5429 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 5430 else 5431 bnxt_fill_hw_rss_tbl(bp, vnic); 5432 5433 if (bp->rss_hash_delta) { 5434 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 5435 if (bp->rss_hash_cfg & bp->rss_hash_delta) 5436 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 5437 else 5438 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 5439 } else { 5440 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 5441 } 5442 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 5443 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 5444 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 5445 } 5446 5447 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 5448 { 5449 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5450 struct hwrm_vnic_rss_cfg_input *req; 5451 int rc; 5452 5453 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 5454 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 5455 return 0; 5456 5457 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 5458 if (rc) 5459 return rc; 5460 5461 if (set_rss) 5462 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 5463 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5464 return hwrm_req_send(bp, req); 5465 } 5466 5467 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 5468 { 5469 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5470 struct hwrm_vnic_rss_cfg_input *req; 5471 dma_addr_t ring_tbl_map; 5472 u32 i, nr_ctxs; 5473 int rc; 5474 5475 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 5476 if (rc) 5477 return rc; 5478 5479 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5480 if (!set_rss) 5481 return hwrm_req_send(bp, req); 5482 5483 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 5484 ring_tbl_map = vnic->rss_table_dma_addr; 5485 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 5486 5487 hwrm_req_hold(bp, req); 5488 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 5489 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 5490 req->ring_table_pair_index = i; 5491 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 5492 rc = hwrm_req_send(bp, req); 5493 if (rc) 5494 goto exit; 5495 } 5496 5497 exit: 5498 hwrm_req_drop(bp, req); 5499 return rc; 5500 } 5501 5502 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 5503 { 5504 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5505 struct hwrm_vnic_rss_qcfg_output *resp; 5506 struct hwrm_vnic_rss_qcfg_input *req; 5507 5508 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 5509 return; 5510 5511 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5512 /* all contexts configured to same hash_type, zero always exists */ 5513 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5514 resp = hwrm_req_hold(bp, req); 5515 if (!hwrm_req_send(bp, req)) { 5516 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 5517 bp->rss_hash_delta = 0; 5518 } 5519 hwrm_req_drop(bp, req); 5520 } 5521 5522 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 5523 { 5524 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5525 struct hwrm_vnic_plcmodes_cfg_input *req; 5526 int rc; 5527 5528 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 5529 if (rc) 5530 return rc; 5531 5532 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 5533 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 5534 5535 if (BNXT_RX_PAGE_MODE(bp)) { 5536 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 5537 } else { 5538 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 5539 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 5540 req->enables |= 5541 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 5542 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 5543 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 5544 } 5545 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5546 return hwrm_req_send(bp, req); 5547 } 5548 5549 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 5550 u16 ctx_idx) 5551 { 5552 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 5553 5554 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 5555 return; 5556 5557 req->rss_cos_lb_ctx_id = 5558 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 5559 5560 hwrm_req_send(bp, req); 5561 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 5562 } 5563 5564 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 5565 { 5566 int i, j; 5567 5568 for (i = 0; i < bp->nr_vnics; i++) { 5569 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5570 5571 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 5572 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 5573 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 5574 } 5575 } 5576 bp->rsscos_nr_ctxs = 0; 5577 } 5578 5579 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 5580 { 5581 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 5582 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 5583 int rc; 5584 5585 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 5586 if (rc) 5587 return rc; 5588 5589 resp = hwrm_req_hold(bp, req); 5590 rc = hwrm_req_send(bp, req); 5591 if (!rc) 5592 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 5593 le16_to_cpu(resp->rss_cos_lb_ctx_id); 5594 hwrm_req_drop(bp, req); 5595 5596 return rc; 5597 } 5598 5599 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 5600 { 5601 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 5602 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 5603 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 5604 } 5605 5606 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 5607 { 5608 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5609 struct hwrm_vnic_cfg_input *req; 5610 unsigned int ring = 0, grp_idx; 5611 u16 def_vlan = 0; 5612 int rc; 5613 5614 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 5615 if (rc) 5616 return rc; 5617 5618 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5619 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5620 5621 req->default_rx_ring_id = 5622 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5623 req->default_cmpl_ring_id = 5624 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5625 req->enables = 5626 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5627 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5628 goto vnic_mru; 5629 } 5630 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5631 /* Only RSS support for now TBD: COS & LB */ 5632 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5633 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5634 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5635 VNIC_CFG_REQ_ENABLES_MRU); 5636 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5637 req->rss_rule = 5638 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5639 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5640 VNIC_CFG_REQ_ENABLES_MRU); 5641 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5642 } else { 5643 req->rss_rule = cpu_to_le16(0xffff); 5644 } 5645 5646 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5647 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5648 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5649 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5650 } else { 5651 req->cos_rule = cpu_to_le16(0xffff); 5652 } 5653 5654 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5655 ring = 0; 5656 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5657 ring = vnic_id - 1; 5658 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5659 ring = bp->rx_nr_rings - 1; 5660 5661 grp_idx = bp->rx_ring[ring].bnapi->index; 5662 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5663 req->lb_rule = cpu_to_le16(0xffff); 5664 vnic_mru: 5665 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); 5666 5667 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5668 #ifdef CONFIG_BNXT_SRIOV 5669 if (BNXT_VF(bp)) 5670 def_vlan = bp->vf.vlan; 5671 #endif 5672 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5673 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5674 if (!vnic_id && bnxt_ulp_registered(bp->edev)) 5675 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5676 5677 return hwrm_req_send(bp, req); 5678 } 5679 5680 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5681 { 5682 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5683 struct hwrm_vnic_free_input *req; 5684 5685 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 5686 return; 5687 5688 req->vnic_id = 5689 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5690 5691 hwrm_req_send(bp, req); 5692 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5693 } 5694 } 5695 5696 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5697 { 5698 u16 i; 5699 5700 for (i = 0; i < bp->nr_vnics; i++) 5701 bnxt_hwrm_vnic_free_one(bp, i); 5702 } 5703 5704 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5705 unsigned int start_rx_ring_idx, 5706 unsigned int nr_rings) 5707 { 5708 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5709 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5710 struct hwrm_vnic_alloc_output *resp; 5711 struct hwrm_vnic_alloc_input *req; 5712 int rc; 5713 5714 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 5715 if (rc) 5716 return rc; 5717 5718 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5719 goto vnic_no_ring_grps; 5720 5721 /* map ring groups to this vnic */ 5722 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5723 grp_idx = bp->rx_ring[i].bnapi->index; 5724 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5725 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5726 j, nr_rings); 5727 break; 5728 } 5729 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5730 } 5731 5732 vnic_no_ring_grps: 5733 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5734 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5735 if (vnic_id == 0) 5736 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5737 5738 resp = hwrm_req_hold(bp, req); 5739 rc = hwrm_req_send(bp, req); 5740 if (!rc) 5741 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5742 hwrm_req_drop(bp, req); 5743 return rc; 5744 } 5745 5746 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5747 { 5748 struct hwrm_vnic_qcaps_output *resp; 5749 struct hwrm_vnic_qcaps_input *req; 5750 int rc; 5751 5752 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5753 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); 5754 if (bp->hwrm_spec_code < 0x10600) 5755 return 0; 5756 5757 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 5758 if (rc) 5759 return rc; 5760 5761 resp = hwrm_req_hold(bp, req); 5762 rc = hwrm_req_send(bp, req); 5763 if (!rc) { 5764 u32 flags = le32_to_cpu(resp->flags); 5765 5766 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 5767 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5768 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 5769 if (flags & 5770 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5771 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5772 5773 /* Older P5 fw before EXT_HW_STATS support did not set 5774 * VLAN_STRIP_CAP properly. 5775 */ 5776 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 5777 (BNXT_CHIP_P5(bp) && 5778 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 5779 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 5780 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 5781 bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA; 5782 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5783 if (bp->max_tpa_v2) { 5784 if (BNXT_CHIP_P5(bp)) 5785 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 5786 else 5787 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; 5788 } 5789 } 5790 hwrm_req_drop(bp, req); 5791 return rc; 5792 } 5793 5794 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5795 { 5796 struct hwrm_ring_grp_alloc_output *resp; 5797 struct hwrm_ring_grp_alloc_input *req; 5798 int rc; 5799 u16 i; 5800 5801 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5802 return 0; 5803 5804 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 5805 if (rc) 5806 return rc; 5807 5808 resp = hwrm_req_hold(bp, req); 5809 for (i = 0; i < bp->rx_nr_rings; i++) { 5810 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5811 5812 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5813 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5814 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5815 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5816 5817 rc = hwrm_req_send(bp, req); 5818 5819 if (rc) 5820 break; 5821 5822 bp->grp_info[grp_idx].fw_grp_id = 5823 le32_to_cpu(resp->ring_group_id); 5824 } 5825 hwrm_req_drop(bp, req); 5826 return rc; 5827 } 5828 5829 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 5830 { 5831 struct hwrm_ring_grp_free_input *req; 5832 u16 i; 5833 5834 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5835 return; 5836 5837 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 5838 return; 5839 5840 hwrm_req_hold(bp, req); 5841 for (i = 0; i < bp->cp_nr_rings; i++) { 5842 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 5843 continue; 5844 req->ring_group_id = 5845 cpu_to_le32(bp->grp_info[i].fw_grp_id); 5846 5847 hwrm_req_send(bp, req); 5848 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 5849 } 5850 hwrm_req_drop(bp, req); 5851 } 5852 5853 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 5854 struct bnxt_ring_struct *ring, 5855 u32 ring_type, u32 map_index) 5856 { 5857 struct hwrm_ring_alloc_output *resp; 5858 struct hwrm_ring_alloc_input *req; 5859 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 5860 struct bnxt_ring_grp_info *grp_info; 5861 int rc, err = 0; 5862 u16 ring_id; 5863 5864 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 5865 if (rc) 5866 goto exit; 5867 5868 req->enables = 0; 5869 if (rmem->nr_pages > 1) { 5870 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 5871 /* Page size is in log2 units */ 5872 req->page_size = BNXT_PAGE_SHIFT; 5873 req->page_tbl_depth = 1; 5874 } else { 5875 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 5876 } 5877 req->fbo = 0; 5878 /* Association of ring index with doorbell index and MSIX number */ 5879 req->logical_id = cpu_to_le16(map_index); 5880 5881 switch (ring_type) { 5882 case HWRM_RING_ALLOC_TX: { 5883 struct bnxt_tx_ring_info *txr; 5884 5885 txr = container_of(ring, struct bnxt_tx_ring_info, 5886 tx_ring_struct); 5887 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 5888 /* Association of transmit ring with completion ring */ 5889 grp_info = &bp->grp_info[ring->grp_idx]; 5890 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 5891 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 5892 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5893 req->queue_id = cpu_to_le16(ring->queue_id); 5894 break; 5895 } 5896 case HWRM_RING_ALLOC_RX: 5897 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5898 req->length = cpu_to_le32(bp->rx_ring_mask + 1); 5899 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5900 u16 flags = 0; 5901 5902 /* Association of rx ring with stats context */ 5903 grp_info = &bp->grp_info[ring->grp_idx]; 5904 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 5905 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5906 req->enables |= cpu_to_le32( 5907 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5908 if (NET_IP_ALIGN == 2) 5909 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 5910 req->flags = cpu_to_le16(flags); 5911 } 5912 break; 5913 case HWRM_RING_ALLOC_AGG: 5914 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5915 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 5916 /* Association of agg ring with rx ring */ 5917 grp_info = &bp->grp_info[ring->grp_idx]; 5918 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 5919 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 5920 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5921 req->enables |= cpu_to_le32( 5922 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 5923 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5924 } else { 5925 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5926 } 5927 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 5928 break; 5929 case HWRM_RING_ALLOC_CMPL: 5930 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 5931 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 5932 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5933 /* Association of cp ring with nq */ 5934 grp_info = &bp->grp_info[map_index]; 5935 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 5936 req->cq_handle = cpu_to_le64(ring->handle); 5937 req->enables |= cpu_to_le32( 5938 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 5939 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 5940 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5941 } 5942 break; 5943 case HWRM_RING_ALLOC_NQ: 5944 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 5945 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 5946 if (bp->flags & BNXT_FLAG_USING_MSIX) 5947 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5948 break; 5949 default: 5950 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 5951 ring_type); 5952 return -1; 5953 } 5954 5955 resp = hwrm_req_hold(bp, req); 5956 rc = hwrm_req_send(bp, req); 5957 err = le16_to_cpu(resp->error_code); 5958 ring_id = le16_to_cpu(resp->ring_id); 5959 hwrm_req_drop(bp, req); 5960 5961 exit: 5962 if (rc || err) { 5963 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 5964 ring_type, rc, err); 5965 return -EIO; 5966 } 5967 ring->fw_ring_id = ring_id; 5968 return rc; 5969 } 5970 5971 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 5972 { 5973 int rc; 5974 5975 if (BNXT_PF(bp)) { 5976 struct hwrm_func_cfg_input *req; 5977 5978 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 5979 if (rc) 5980 return rc; 5981 5982 req->fid = cpu_to_le16(0xffff); 5983 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5984 req->async_event_cr = cpu_to_le16(idx); 5985 return hwrm_req_send(bp, req); 5986 } else { 5987 struct hwrm_func_vf_cfg_input *req; 5988 5989 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 5990 if (rc) 5991 return rc; 5992 5993 req->enables = 5994 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5995 req->async_event_cr = cpu_to_le16(idx); 5996 return hwrm_req_send(bp, req); 5997 } 5998 } 5999 6000 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 6001 u32 ring_type) 6002 { 6003 switch (ring_type) { 6004 case HWRM_RING_ALLOC_TX: 6005 db->db_ring_mask = bp->tx_ring_mask; 6006 break; 6007 case HWRM_RING_ALLOC_RX: 6008 db->db_ring_mask = bp->rx_ring_mask; 6009 break; 6010 case HWRM_RING_ALLOC_AGG: 6011 db->db_ring_mask = bp->rx_agg_ring_mask; 6012 break; 6013 case HWRM_RING_ALLOC_CMPL: 6014 case HWRM_RING_ALLOC_NQ: 6015 db->db_ring_mask = bp->cp_ring_mask; 6016 break; 6017 } 6018 } 6019 6020 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 6021 u32 map_idx, u32 xid) 6022 { 6023 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6024 if (BNXT_PF(bp)) 6025 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; 6026 else 6027 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; 6028 switch (ring_type) { 6029 case HWRM_RING_ALLOC_TX: 6030 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 6031 break; 6032 case HWRM_RING_ALLOC_RX: 6033 case HWRM_RING_ALLOC_AGG: 6034 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 6035 break; 6036 case HWRM_RING_ALLOC_CMPL: 6037 db->db_key64 = DBR_PATH_L2; 6038 break; 6039 case HWRM_RING_ALLOC_NQ: 6040 db->db_key64 = DBR_PATH_L2; 6041 break; 6042 } 6043 db->db_key64 |= (u64)xid << DBR_XID_SFT; 6044 } else { 6045 db->doorbell = bp->bar1 + map_idx * 0x80; 6046 switch (ring_type) { 6047 case HWRM_RING_ALLOC_TX: 6048 db->db_key32 = DB_KEY_TX; 6049 break; 6050 case HWRM_RING_ALLOC_RX: 6051 case HWRM_RING_ALLOC_AGG: 6052 db->db_key32 = DB_KEY_RX; 6053 break; 6054 case HWRM_RING_ALLOC_CMPL: 6055 db->db_key32 = DB_KEY_CP; 6056 break; 6057 } 6058 } 6059 bnxt_set_db_mask(bp, db, ring_type); 6060 } 6061 6062 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 6063 { 6064 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 6065 int i, rc = 0; 6066 u32 type; 6067 6068 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6069 type = HWRM_RING_ALLOC_NQ; 6070 else 6071 type = HWRM_RING_ALLOC_CMPL; 6072 for (i = 0; i < bp->cp_nr_rings; i++) { 6073 struct bnxt_napi *bnapi = bp->bnapi[i]; 6074 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6075 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 6076 u32 map_idx = ring->map_idx; 6077 unsigned int vector; 6078 6079 vector = bp->irq_tbl[map_idx].vector; 6080 disable_irq_nosync(vector); 6081 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6082 if (rc) { 6083 enable_irq(vector); 6084 goto err_out; 6085 } 6086 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 6087 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 6088 enable_irq(vector); 6089 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 6090 6091 if (!i) { 6092 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 6093 if (rc) 6094 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 6095 } 6096 } 6097 6098 type = HWRM_RING_ALLOC_TX; 6099 for (i = 0; i < bp->tx_nr_rings; i++) { 6100 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6101 struct bnxt_ring_struct *ring; 6102 u32 map_idx; 6103 6104 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6105 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; 6106 struct bnxt_napi *bnapi = txr->bnapi; 6107 u32 type2 = HWRM_RING_ALLOC_CMPL; 6108 6109 ring = &cpr2->cp_ring_struct; 6110 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6111 map_idx = bnapi->index; 6112 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6113 if (rc) 6114 goto err_out; 6115 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6116 ring->fw_ring_id); 6117 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6118 } 6119 ring = &txr->tx_ring_struct; 6120 map_idx = i; 6121 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6122 if (rc) 6123 goto err_out; 6124 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 6125 } 6126 6127 type = HWRM_RING_ALLOC_RX; 6128 for (i = 0; i < bp->rx_nr_rings; i++) { 6129 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6130 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6131 struct bnxt_napi *bnapi = rxr->bnapi; 6132 u32 map_idx = bnapi->index; 6133 6134 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6135 if (rc) 6136 goto err_out; 6137 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 6138 /* If we have agg rings, post agg buffers first. */ 6139 if (!agg_rings) 6140 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6141 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 6142 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6143 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; 6144 u32 type2 = HWRM_RING_ALLOC_CMPL; 6145 6146 ring = &cpr2->cp_ring_struct; 6147 ring->handle = BNXT_SET_NQ_HDL(cpr2); 6148 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 6149 if (rc) 6150 goto err_out; 6151 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 6152 ring->fw_ring_id); 6153 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 6154 } 6155 } 6156 6157 if (agg_rings) { 6158 type = HWRM_RING_ALLOC_AGG; 6159 for (i = 0; i < bp->rx_nr_rings; i++) { 6160 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6161 struct bnxt_ring_struct *ring = 6162 &rxr->rx_agg_ring_struct; 6163 u32 grp_idx = ring->grp_idx; 6164 u32 map_idx = grp_idx + bp->rx_nr_rings; 6165 6166 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 6167 if (rc) 6168 goto err_out; 6169 6170 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 6171 ring->fw_ring_id); 6172 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 6173 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 6174 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 6175 } 6176 } 6177 err_out: 6178 return rc; 6179 } 6180 6181 static int hwrm_ring_free_send_msg(struct bnxt *bp, 6182 struct bnxt_ring_struct *ring, 6183 u32 ring_type, int cmpl_ring_id) 6184 { 6185 struct hwrm_ring_free_output *resp; 6186 struct hwrm_ring_free_input *req; 6187 u16 error_code = 0; 6188 int rc; 6189 6190 if (BNXT_NO_FW_ACCESS(bp)) 6191 return 0; 6192 6193 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 6194 if (rc) 6195 goto exit; 6196 6197 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 6198 req->ring_type = ring_type; 6199 req->ring_id = cpu_to_le16(ring->fw_ring_id); 6200 6201 resp = hwrm_req_hold(bp, req); 6202 rc = hwrm_req_send(bp, req); 6203 error_code = le16_to_cpu(resp->error_code); 6204 hwrm_req_drop(bp, req); 6205 exit: 6206 if (rc || error_code) { 6207 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 6208 ring_type, rc, error_code); 6209 return -EIO; 6210 } 6211 return 0; 6212 } 6213 6214 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 6215 { 6216 u32 type; 6217 int i; 6218 6219 if (!bp->bnapi) 6220 return; 6221 6222 for (i = 0; i < bp->tx_nr_rings; i++) { 6223 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6224 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 6225 6226 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6227 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 6228 6229 hwrm_ring_free_send_msg(bp, ring, 6230 RING_FREE_REQ_RING_TYPE_TX, 6231 close_path ? cmpl_ring_id : 6232 INVALID_HW_RING_ID); 6233 ring->fw_ring_id = INVALID_HW_RING_ID; 6234 } 6235 } 6236 6237 for (i = 0; i < bp->rx_nr_rings; i++) { 6238 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6239 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 6240 u32 grp_idx = rxr->bnapi->index; 6241 6242 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6243 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6244 6245 hwrm_ring_free_send_msg(bp, ring, 6246 RING_FREE_REQ_RING_TYPE_RX, 6247 close_path ? cmpl_ring_id : 6248 INVALID_HW_RING_ID); 6249 ring->fw_ring_id = INVALID_HW_RING_ID; 6250 bp->grp_info[grp_idx].rx_fw_ring_id = 6251 INVALID_HW_RING_ID; 6252 } 6253 } 6254 6255 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6256 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 6257 else 6258 type = RING_FREE_REQ_RING_TYPE_RX; 6259 for (i = 0; i < bp->rx_nr_rings; i++) { 6260 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 6261 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 6262 u32 grp_idx = rxr->bnapi->index; 6263 6264 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6265 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6266 6267 hwrm_ring_free_send_msg(bp, ring, type, 6268 close_path ? cmpl_ring_id : 6269 INVALID_HW_RING_ID); 6270 ring->fw_ring_id = INVALID_HW_RING_ID; 6271 bp->grp_info[grp_idx].agg_fw_ring_id = 6272 INVALID_HW_RING_ID; 6273 } 6274 } 6275 6276 /* The completion rings are about to be freed. After that the 6277 * IRQ doorbell will not work anymore. So we need to disable 6278 * IRQ here. 6279 */ 6280 bnxt_disable_int_sync(bp); 6281 6282 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6283 type = RING_FREE_REQ_RING_TYPE_NQ; 6284 else 6285 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 6286 for (i = 0; i < bp->cp_nr_rings; i++) { 6287 struct bnxt_napi *bnapi = bp->bnapi[i]; 6288 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6289 struct bnxt_ring_struct *ring; 6290 int j; 6291 6292 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { 6293 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 6294 6295 ring = &cpr2->cp_ring_struct; 6296 if (ring->fw_ring_id == INVALID_HW_RING_ID) 6297 continue; 6298 hwrm_ring_free_send_msg(bp, ring, 6299 RING_FREE_REQ_RING_TYPE_L2_CMPL, 6300 INVALID_HW_RING_ID); 6301 ring->fw_ring_id = INVALID_HW_RING_ID; 6302 } 6303 ring = &cpr->cp_ring_struct; 6304 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 6305 hwrm_ring_free_send_msg(bp, ring, type, 6306 INVALID_HW_RING_ID); 6307 ring->fw_ring_id = INVALID_HW_RING_ID; 6308 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 6309 } 6310 } 6311 } 6312 6313 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 6314 bool shared); 6315 6316 static int bnxt_hwrm_get_rings(struct bnxt *bp) 6317 { 6318 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6319 struct hwrm_func_qcfg_output *resp; 6320 struct hwrm_func_qcfg_input *req; 6321 int rc; 6322 6323 if (bp->hwrm_spec_code < 0x10601) 6324 return 0; 6325 6326 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 6327 if (rc) 6328 return rc; 6329 6330 req->fid = cpu_to_le16(0xffff); 6331 resp = hwrm_req_hold(bp, req); 6332 rc = hwrm_req_send(bp, req); 6333 if (rc) { 6334 hwrm_req_drop(bp, req); 6335 return rc; 6336 } 6337 6338 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6339 if (BNXT_NEW_RM(bp)) { 6340 u16 cp, stats; 6341 6342 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 6343 hw_resc->resv_hw_ring_grps = 6344 le32_to_cpu(resp->alloc_hw_ring_grps); 6345 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 6346 cp = le16_to_cpu(resp->alloc_cmpl_rings); 6347 stats = le16_to_cpu(resp->alloc_stat_ctx); 6348 hw_resc->resv_irqs = cp; 6349 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6350 int rx = hw_resc->resv_rx_rings; 6351 int tx = hw_resc->resv_tx_rings; 6352 6353 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6354 rx >>= 1; 6355 if (cp < (rx + tx)) { 6356 rx = cp / 2; 6357 tx = rx; 6358 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6359 rx <<= 1; 6360 hw_resc->resv_rx_rings = rx; 6361 hw_resc->resv_tx_rings = tx; 6362 } 6363 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 6364 hw_resc->resv_hw_ring_grps = rx; 6365 } 6366 hw_resc->resv_cp_rings = cp; 6367 hw_resc->resv_stat_ctxs = stats; 6368 } 6369 hwrm_req_drop(bp, req); 6370 return 0; 6371 } 6372 6373 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 6374 { 6375 struct hwrm_func_qcfg_output *resp; 6376 struct hwrm_func_qcfg_input *req; 6377 int rc; 6378 6379 if (bp->hwrm_spec_code < 0x10601) 6380 return 0; 6381 6382 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 6383 if (rc) 6384 return rc; 6385 6386 req->fid = cpu_to_le16(fid); 6387 resp = hwrm_req_hold(bp, req); 6388 rc = hwrm_req_send(bp, req); 6389 if (!rc) 6390 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6391 6392 hwrm_req_drop(bp, req); 6393 return rc; 6394 } 6395 6396 static bool bnxt_rfs_supported(struct bnxt *bp); 6397 6398 static struct hwrm_func_cfg_input * 6399 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6400 int ring_grps, int cp_rings, int stats, int vnics) 6401 { 6402 struct hwrm_func_cfg_input *req; 6403 u32 enables = 0; 6404 6405 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 6406 return NULL; 6407 6408 req->fid = cpu_to_le16(0xffff); 6409 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6410 req->num_tx_rings = cpu_to_le16(tx_rings); 6411 if (BNXT_NEW_RM(bp)) { 6412 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 6413 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6414 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6415 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 6416 enables |= tx_rings + ring_grps ? 6417 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6418 enables |= rx_rings ? 6419 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6420 } else { 6421 enables |= cp_rings ? 6422 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6423 enables |= ring_grps ? 6424 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 6425 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6426 } 6427 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 6428 6429 req->num_rx_rings = cpu_to_le16(rx_rings); 6430 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6431 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6432 req->num_msix = cpu_to_le16(cp_rings); 6433 req->num_rsscos_ctxs = 6434 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6435 } else { 6436 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6437 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6438 req->num_rsscos_ctxs = cpu_to_le16(1); 6439 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 6440 bnxt_rfs_supported(bp)) 6441 req->num_rsscos_ctxs = 6442 cpu_to_le16(ring_grps + 1); 6443 } 6444 req->num_stat_ctxs = cpu_to_le16(stats); 6445 req->num_vnics = cpu_to_le16(vnics); 6446 } 6447 req->enables = cpu_to_le32(enables); 6448 return req; 6449 } 6450 6451 static struct hwrm_func_vf_cfg_input * 6452 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6453 int ring_grps, int cp_rings, int stats, int vnics) 6454 { 6455 struct hwrm_func_vf_cfg_input *req; 6456 u32 enables = 0; 6457 6458 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 6459 return NULL; 6460 6461 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6462 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 6463 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6464 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6465 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6466 enables |= tx_rings + ring_grps ? 6467 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6468 } else { 6469 enables |= cp_rings ? 6470 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6471 enables |= ring_grps ? 6472 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 6473 } 6474 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 6475 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 6476 6477 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 6478 req->num_tx_rings = cpu_to_le16(tx_rings); 6479 req->num_rx_rings = cpu_to_le16(rx_rings); 6480 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6481 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6482 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6483 } else { 6484 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6485 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6486 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 6487 } 6488 req->num_stat_ctxs = cpu_to_le16(stats); 6489 req->num_vnics = cpu_to_le16(vnics); 6490 6491 req->enables = cpu_to_le32(enables); 6492 return req; 6493 } 6494 6495 static int 6496 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6497 int ring_grps, int cp_rings, int stats, int vnics) 6498 { 6499 struct hwrm_func_cfg_input *req; 6500 int rc; 6501 6502 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 6503 cp_rings, stats, vnics); 6504 if (!req) 6505 return -ENOMEM; 6506 6507 if (!req->enables) { 6508 hwrm_req_drop(bp, req); 6509 return 0; 6510 } 6511 6512 rc = hwrm_req_send(bp, req); 6513 if (rc) 6514 return rc; 6515 6516 if (bp->hwrm_spec_code < 0x10601) 6517 bp->hw_resc.resv_tx_rings = tx_rings; 6518 6519 return bnxt_hwrm_get_rings(bp); 6520 } 6521 6522 static int 6523 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6524 int ring_grps, int cp_rings, int stats, int vnics) 6525 { 6526 struct hwrm_func_vf_cfg_input *req; 6527 int rc; 6528 6529 if (!BNXT_NEW_RM(bp)) { 6530 bp->hw_resc.resv_tx_rings = tx_rings; 6531 return 0; 6532 } 6533 6534 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6535 cp_rings, stats, vnics); 6536 if (!req) 6537 return -ENOMEM; 6538 6539 rc = hwrm_req_send(bp, req); 6540 if (rc) 6541 return rc; 6542 6543 return bnxt_hwrm_get_rings(bp); 6544 } 6545 6546 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 6547 int cp, int stat, int vnic) 6548 { 6549 if (BNXT_PF(bp)) 6550 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 6551 vnic); 6552 else 6553 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 6554 vnic); 6555 } 6556 6557 int bnxt_nq_rings_in_use(struct bnxt *bp) 6558 { 6559 int cp = bp->cp_nr_rings; 6560 int ulp_msix, ulp_base; 6561 6562 ulp_msix = bnxt_get_ulp_msix_num(bp); 6563 if (ulp_msix) { 6564 ulp_base = bnxt_get_ulp_msix_base(bp); 6565 cp += ulp_msix; 6566 if ((ulp_base + ulp_msix) > cp) 6567 cp = ulp_base + ulp_msix; 6568 } 6569 return cp; 6570 } 6571 6572 static int bnxt_cp_rings_in_use(struct bnxt *bp) 6573 { 6574 int cp; 6575 6576 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6577 return bnxt_nq_rings_in_use(bp); 6578 6579 cp = bp->tx_nr_rings + bp->rx_nr_rings; 6580 return cp; 6581 } 6582 6583 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 6584 { 6585 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 6586 int cp = bp->cp_nr_rings; 6587 6588 if (!ulp_stat) 6589 return cp; 6590 6591 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 6592 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 6593 6594 return cp + ulp_stat; 6595 } 6596 6597 /* Check if a default RSS map needs to be setup. This function is only 6598 * used on older firmware that does not require reserving RX rings. 6599 */ 6600 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 6601 { 6602 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6603 6604 /* The RSS map is valid for RX rings set to resv_rx_rings */ 6605 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 6606 hw_resc->resv_rx_rings = bp->rx_nr_rings; 6607 if (!netif_is_rxfh_configured(bp->dev)) 6608 bnxt_set_dflt_rss_indir_tbl(bp); 6609 } 6610 } 6611 6612 static bool bnxt_need_reserve_rings(struct bnxt *bp) 6613 { 6614 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6615 int cp = bnxt_cp_rings_in_use(bp); 6616 int nq = bnxt_nq_rings_in_use(bp); 6617 int rx = bp->rx_nr_rings, stat; 6618 int vnic = 1, grp = rx; 6619 6620 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 6621 bp->hwrm_spec_code >= 0x10601) 6622 return true; 6623 6624 /* Old firmware does not need RX ring reservations but we still 6625 * need to setup a default RSS map when needed. With new firmware 6626 * we go through RX ring reservations first and then set up the 6627 * RSS map for the successfully reserved RX rings when needed. 6628 */ 6629 if (!BNXT_NEW_RM(bp)) { 6630 bnxt_check_rss_tbl_no_rmgr(bp); 6631 return false; 6632 } 6633 if ((bp->flags & BNXT_FLAG_RFS) && 6634 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6635 vnic = rx + 1; 6636 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6637 rx <<= 1; 6638 stat = bnxt_get_func_stat_ctxs(bp); 6639 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 6640 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 6641 (hw_resc->resv_hw_ring_grps != grp && 6642 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 6643 return true; 6644 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 6645 hw_resc->resv_irqs != nq) 6646 return true; 6647 return false; 6648 } 6649 6650 static int __bnxt_reserve_rings(struct bnxt *bp) 6651 { 6652 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6653 int cp = bnxt_nq_rings_in_use(bp); 6654 int tx = bp->tx_nr_rings; 6655 int rx = bp->rx_nr_rings; 6656 int grp, rx_rings, rc; 6657 int vnic = 1, stat; 6658 bool sh = false; 6659 int tx_cp; 6660 6661 if (!bnxt_need_reserve_rings(bp)) 6662 return 0; 6663 6664 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6665 sh = true; 6666 if ((bp->flags & BNXT_FLAG_RFS) && 6667 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6668 vnic = rx + 1; 6669 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6670 rx <<= 1; 6671 grp = bp->rx_nr_rings; 6672 stat = bnxt_get_func_stat_ctxs(bp); 6673 6674 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 6675 if (rc) 6676 return rc; 6677 6678 tx = hw_resc->resv_tx_rings; 6679 if (BNXT_NEW_RM(bp)) { 6680 rx = hw_resc->resv_rx_rings; 6681 cp = hw_resc->resv_irqs; 6682 grp = hw_resc->resv_hw_ring_grps; 6683 vnic = hw_resc->resv_vnics; 6684 stat = hw_resc->resv_stat_ctxs; 6685 } 6686 6687 rx_rings = rx; 6688 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 6689 if (rx >= 2) { 6690 rx_rings = rx >> 1; 6691 } else { 6692 if (netif_running(bp->dev)) 6693 return -ENOMEM; 6694 6695 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 6696 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 6697 bp->dev->hw_features &= ~NETIF_F_LRO; 6698 bp->dev->features &= ~NETIF_F_LRO; 6699 bnxt_set_ring_params(bp); 6700 } 6701 } 6702 rx_rings = min_t(int, rx_rings, grp); 6703 cp = min_t(int, cp, bp->cp_nr_rings); 6704 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 6705 stat -= bnxt_get_ulp_stat_ctxs(bp); 6706 cp = min_t(int, cp, stat); 6707 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 6708 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6709 rx = rx_rings << 1; 6710 tx_cp = bnxt_num_tx_to_cp(bp, tx); 6711 cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 6712 bp->tx_nr_rings = tx; 6713 6714 /* If we cannot reserve all the RX rings, reset the RSS map only 6715 * if absolutely necessary 6716 */ 6717 if (rx_rings != bp->rx_nr_rings) { 6718 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 6719 rx_rings, bp->rx_nr_rings); 6720 if (netif_is_rxfh_configured(bp->dev) && 6721 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 6722 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 6723 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 6724 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 6725 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 6726 } 6727 } 6728 bp->rx_nr_rings = rx_rings; 6729 bp->cp_nr_rings = cp; 6730 6731 if (!tx || !rx || !cp || !grp || !vnic || !stat) 6732 return -ENOMEM; 6733 6734 if (!netif_is_rxfh_configured(bp->dev)) 6735 bnxt_set_dflt_rss_indir_tbl(bp); 6736 6737 return rc; 6738 } 6739 6740 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6741 int ring_grps, int cp_rings, int stats, 6742 int vnics) 6743 { 6744 struct hwrm_func_vf_cfg_input *req; 6745 u32 flags; 6746 6747 if (!BNXT_NEW_RM(bp)) 6748 return 0; 6749 6750 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6751 cp_rings, stats, vnics); 6752 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6753 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6754 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6755 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6756 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6757 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6758 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6759 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6760 6761 req->flags = cpu_to_le32(flags); 6762 return hwrm_req_send_silent(bp, req); 6763 } 6764 6765 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6766 int ring_grps, int cp_rings, int stats, 6767 int vnics) 6768 { 6769 struct hwrm_func_cfg_input *req; 6770 u32 flags; 6771 6772 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 6773 cp_rings, stats, vnics); 6774 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6775 if (BNXT_NEW_RM(bp)) { 6776 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6777 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6778 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6779 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6780 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6781 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6782 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6783 else 6784 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6785 } 6786 6787 req->flags = cpu_to_le32(flags); 6788 return hwrm_req_send_silent(bp, req); 6789 } 6790 6791 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6792 int ring_grps, int cp_rings, int stats, 6793 int vnics) 6794 { 6795 if (bp->hwrm_spec_code < 0x10801) 6796 return 0; 6797 6798 if (BNXT_PF(bp)) 6799 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6800 ring_grps, cp_rings, stats, 6801 vnics); 6802 6803 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6804 cp_rings, stats, vnics); 6805 } 6806 6807 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6808 { 6809 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6810 struct hwrm_ring_aggint_qcaps_output *resp; 6811 struct hwrm_ring_aggint_qcaps_input *req; 6812 int rc; 6813 6814 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6815 coal_cap->num_cmpl_dma_aggr_max = 63; 6816 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6817 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6818 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6819 coal_cap->int_lat_tmr_min_max = 65535; 6820 coal_cap->int_lat_tmr_max_max = 65535; 6821 coal_cap->num_cmpl_aggr_int_max = 65535; 6822 coal_cap->timer_units = 80; 6823 6824 if (bp->hwrm_spec_code < 0x10902) 6825 return; 6826 6827 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 6828 return; 6829 6830 resp = hwrm_req_hold(bp, req); 6831 rc = hwrm_req_send_silent(bp, req); 6832 if (!rc) { 6833 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 6834 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 6835 coal_cap->num_cmpl_dma_aggr_max = 6836 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 6837 coal_cap->num_cmpl_dma_aggr_during_int_max = 6838 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 6839 coal_cap->cmpl_aggr_dma_tmr_max = 6840 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 6841 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 6842 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 6843 coal_cap->int_lat_tmr_min_max = 6844 le16_to_cpu(resp->int_lat_tmr_min_max); 6845 coal_cap->int_lat_tmr_max_max = 6846 le16_to_cpu(resp->int_lat_tmr_max_max); 6847 coal_cap->num_cmpl_aggr_int_max = 6848 le16_to_cpu(resp->num_cmpl_aggr_int_max); 6849 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 6850 } 6851 hwrm_req_drop(bp, req); 6852 } 6853 6854 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 6855 { 6856 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6857 6858 return usec * 1000 / coal_cap->timer_units; 6859 } 6860 6861 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 6862 struct bnxt_coal *hw_coal, 6863 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6864 { 6865 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6866 u16 val, tmr, max, flags = hw_coal->flags; 6867 u32 cmpl_params = coal_cap->cmpl_params; 6868 6869 max = hw_coal->bufs_per_record * 128; 6870 if (hw_coal->budget) 6871 max = hw_coal->bufs_per_record * hw_coal->budget; 6872 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 6873 6874 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 6875 req->num_cmpl_aggr_int = cpu_to_le16(val); 6876 6877 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 6878 req->num_cmpl_dma_aggr = cpu_to_le16(val); 6879 6880 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 6881 coal_cap->num_cmpl_dma_aggr_during_int_max); 6882 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 6883 6884 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 6885 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 6886 req->int_lat_tmr_max = cpu_to_le16(tmr); 6887 6888 /* min timer set to 1/2 of interrupt timer */ 6889 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 6890 val = tmr / 2; 6891 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 6892 req->int_lat_tmr_min = cpu_to_le16(val); 6893 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6894 } 6895 6896 /* buf timer set to 1/4 of interrupt timer */ 6897 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 6898 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 6899 6900 if (cmpl_params & 6901 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 6902 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 6903 val = clamp_t(u16, tmr, 1, 6904 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 6905 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 6906 req->enables |= 6907 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 6908 } 6909 6910 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 6911 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 6912 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 6913 req->flags = cpu_to_le16(flags); 6914 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 6915 } 6916 6917 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 6918 struct bnxt_coal *hw_coal) 6919 { 6920 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 6921 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6922 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6923 u32 nq_params = coal_cap->nq_params; 6924 u16 tmr; 6925 int rc; 6926 6927 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 6928 return 0; 6929 6930 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 6931 if (rc) 6932 return rc; 6933 6934 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 6935 req->flags = 6936 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 6937 6938 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 6939 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 6940 req->int_lat_tmr_min = cpu_to_le16(tmr); 6941 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6942 return hwrm_req_send(bp, req); 6943 } 6944 6945 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 6946 { 6947 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 6948 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6949 struct bnxt_coal coal; 6950 int rc; 6951 6952 /* Tick values in micro seconds. 6953 * 1 coal_buf x bufs_per_record = 1 completion record. 6954 */ 6955 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 6956 6957 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 6958 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 6959 6960 if (!bnapi->rx_ring) 6961 return -ENODEV; 6962 6963 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 6964 if (rc) 6965 return rc; 6966 6967 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 6968 6969 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 6970 6971 return hwrm_req_send(bp, req_rx); 6972 } 6973 6974 static int 6975 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 6976 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6977 { 6978 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 6979 6980 req->ring_id = cpu_to_le16(ring_id); 6981 return hwrm_req_send(bp, req); 6982 } 6983 6984 static int 6985 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 6986 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6987 { 6988 struct bnxt_tx_ring_info *txr; 6989 int i, rc; 6990 6991 bnxt_for_each_napi_tx(i, bnapi, txr) { 6992 u16 ring_id; 6993 6994 ring_id = bnxt_cp_ring_for_tx(bp, txr); 6995 req->ring_id = cpu_to_le16(ring_id); 6996 rc = hwrm_req_send(bp, req); 6997 if (rc) 6998 return rc; 6999 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7000 return 0; 7001 } 7002 return 0; 7003 } 7004 7005 int bnxt_hwrm_set_coal(struct bnxt *bp) 7006 { 7007 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 7008 int i, rc; 7009 7010 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7011 if (rc) 7012 return rc; 7013 7014 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 7015 if (rc) { 7016 hwrm_req_drop(bp, req_rx); 7017 return rc; 7018 } 7019 7020 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 7021 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 7022 7023 hwrm_req_hold(bp, req_rx); 7024 hwrm_req_hold(bp, req_tx); 7025 for (i = 0; i < bp->cp_nr_rings; i++) { 7026 struct bnxt_napi *bnapi = bp->bnapi[i]; 7027 struct bnxt_coal *hw_coal; 7028 7029 if (!bnapi->rx_ring) 7030 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7031 else 7032 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 7033 if (rc) 7034 break; 7035 7036 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7037 continue; 7038 7039 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 7040 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 7041 if (rc) 7042 break; 7043 } 7044 if (bnapi->rx_ring) 7045 hw_coal = &bp->rx_coal; 7046 else 7047 hw_coal = &bp->tx_coal; 7048 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 7049 } 7050 hwrm_req_drop(bp, req_rx); 7051 hwrm_req_drop(bp, req_tx); 7052 return rc; 7053 } 7054 7055 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 7056 { 7057 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 7058 struct hwrm_stat_ctx_free_input *req; 7059 int i; 7060 7061 if (!bp->bnapi) 7062 return; 7063 7064 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7065 return; 7066 7067 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 7068 return; 7069 if (BNXT_FW_MAJ(bp) <= 20) { 7070 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 7071 hwrm_req_drop(bp, req); 7072 return; 7073 } 7074 hwrm_req_hold(bp, req0); 7075 } 7076 hwrm_req_hold(bp, req); 7077 for (i = 0; i < bp->cp_nr_rings; i++) { 7078 struct bnxt_napi *bnapi = bp->bnapi[i]; 7079 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7080 7081 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 7082 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 7083 if (req0) { 7084 req0->stat_ctx_id = req->stat_ctx_id; 7085 hwrm_req_send(bp, req0); 7086 } 7087 hwrm_req_send(bp, req); 7088 7089 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 7090 } 7091 } 7092 hwrm_req_drop(bp, req); 7093 if (req0) 7094 hwrm_req_drop(bp, req0); 7095 } 7096 7097 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 7098 { 7099 struct hwrm_stat_ctx_alloc_output *resp; 7100 struct hwrm_stat_ctx_alloc_input *req; 7101 int rc, i; 7102 7103 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7104 return 0; 7105 7106 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 7107 if (rc) 7108 return rc; 7109 7110 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 7111 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 7112 7113 resp = hwrm_req_hold(bp, req); 7114 for (i = 0; i < bp->cp_nr_rings; i++) { 7115 struct bnxt_napi *bnapi = bp->bnapi[i]; 7116 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7117 7118 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 7119 7120 rc = hwrm_req_send(bp, req); 7121 if (rc) 7122 break; 7123 7124 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 7125 7126 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 7127 } 7128 hwrm_req_drop(bp, req); 7129 return rc; 7130 } 7131 7132 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 7133 { 7134 struct hwrm_func_qcfg_output *resp; 7135 struct hwrm_func_qcfg_input *req; 7136 u32 min_db_offset = 0; 7137 u16 flags; 7138 int rc; 7139 7140 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7141 if (rc) 7142 return rc; 7143 7144 req->fid = cpu_to_le16(0xffff); 7145 resp = hwrm_req_hold(bp, req); 7146 rc = hwrm_req_send(bp, req); 7147 if (rc) 7148 goto func_qcfg_exit; 7149 7150 #ifdef CONFIG_BNXT_SRIOV 7151 if (BNXT_VF(bp)) { 7152 struct bnxt_vf_info *vf = &bp->vf; 7153 7154 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 7155 } else { 7156 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 7157 } 7158 #endif 7159 flags = le16_to_cpu(resp->flags); 7160 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 7161 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 7162 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 7163 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 7164 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 7165 } 7166 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 7167 bp->flags |= BNXT_FLAG_MULTI_HOST; 7168 7169 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 7170 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 7171 7172 switch (resp->port_partition_type) { 7173 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 7174 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 7175 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 7176 bp->port_partition_type = resp->port_partition_type; 7177 break; 7178 } 7179 if (bp->hwrm_spec_code < 0x10707 || 7180 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 7181 bp->br_mode = BRIDGE_MODE_VEB; 7182 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 7183 bp->br_mode = BRIDGE_MODE_VEPA; 7184 else 7185 bp->br_mode = BRIDGE_MODE_UNDEF; 7186 7187 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 7188 if (!bp->max_mtu) 7189 bp->max_mtu = BNXT_MAX_MTU; 7190 7191 if (bp->db_size) 7192 goto func_qcfg_exit; 7193 7194 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7195 if (BNXT_PF(bp)) 7196 min_db_offset = DB_PF_OFFSET_P5; 7197 else 7198 min_db_offset = DB_VF_OFFSET_P5; 7199 } 7200 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 7201 1024); 7202 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 7203 bp->db_size <= min_db_offset) 7204 bp->db_size = pci_resource_len(bp->pdev, 2); 7205 7206 func_qcfg_exit: 7207 hwrm_req_drop(bp, req); 7208 return rc; 7209 } 7210 7211 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 7212 u8 init_val, u8 init_offset, 7213 bool init_mask_set) 7214 { 7215 ctxm->init_value = init_val; 7216 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 7217 if (init_mask_set) 7218 ctxm->init_offset = init_offset * 4; 7219 else 7220 ctxm->init_value = 0; 7221 } 7222 7223 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 7224 { 7225 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7226 u16 type; 7227 7228 for (type = 0; type < ctx_max; type++) { 7229 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7230 int n = 1; 7231 7232 if (!ctxm->max_entries) 7233 continue; 7234 7235 if (ctxm->instance_bmap) 7236 n = hweight32(ctxm->instance_bmap); 7237 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 7238 if (!ctxm->pg_info) 7239 return -ENOMEM; 7240 } 7241 return 0; 7242 } 7243 7244 #define BNXT_CTX_INIT_VALID(flags) \ 7245 (!!((flags) & \ 7246 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 7247 7248 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 7249 { 7250 struct hwrm_func_backing_store_qcaps_v2_output *resp; 7251 struct hwrm_func_backing_store_qcaps_v2_input *req; 7252 u16 last_valid_type = BNXT_CTX_INV; 7253 struct bnxt_ctx_mem_info *ctx; 7254 u16 type; 7255 int rc; 7256 7257 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 7258 if (rc) 7259 return rc; 7260 7261 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 7262 if (!ctx) 7263 return -ENOMEM; 7264 bp->ctx = ctx; 7265 7266 resp = hwrm_req_hold(bp, req); 7267 7268 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 7269 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7270 u8 init_val, init_off, i; 7271 __le32 *p; 7272 u32 flags; 7273 7274 req->type = cpu_to_le16(type); 7275 rc = hwrm_req_send(bp, req); 7276 if (rc) 7277 goto ctx_done; 7278 flags = le32_to_cpu(resp->flags); 7279 type = le16_to_cpu(resp->next_valid_type); 7280 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) 7281 continue; 7282 7283 ctxm->type = le16_to_cpu(resp->type); 7284 last_valid_type = ctxm->type; 7285 ctxm->entry_size = le16_to_cpu(resp->entry_size); 7286 ctxm->flags = flags; 7287 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 7288 ctxm->entry_multiple = resp->entry_multiple; 7289 ctxm->max_entries = le32_to_cpu(resp->max_num_entries); 7290 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 7291 init_val = resp->ctx_init_value; 7292 init_off = resp->ctx_init_offset; 7293 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 7294 BNXT_CTX_INIT_VALID(flags)); 7295 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 7296 BNXT_MAX_SPLIT_ENTRY); 7297 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 7298 i++, p++) 7299 ctxm->split[i] = le32_to_cpu(*p); 7300 } 7301 if (last_valid_type < BNXT_CTX_V2_MAX) 7302 ctx->ctx_arr[last_valid_type].last = true; 7303 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 7304 7305 ctx_done: 7306 hwrm_req_drop(bp, req); 7307 return rc; 7308 } 7309 7310 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 7311 { 7312 struct hwrm_func_backing_store_qcaps_output *resp; 7313 struct hwrm_func_backing_store_qcaps_input *req; 7314 int rc; 7315 7316 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 7317 return 0; 7318 7319 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 7320 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 7321 7322 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 7323 if (rc) 7324 return rc; 7325 7326 resp = hwrm_req_hold(bp, req); 7327 rc = hwrm_req_send_silent(bp, req); 7328 if (!rc) { 7329 struct bnxt_ctx_mem_type *ctxm; 7330 struct bnxt_ctx_mem_info *ctx; 7331 u8 init_val, init_idx = 0; 7332 u16 init_mask; 7333 7334 ctx = bp->ctx; 7335 if (!ctx) { 7336 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 7337 if (!ctx) { 7338 rc = -ENOMEM; 7339 goto ctx_err; 7340 } 7341 bp->ctx = ctx; 7342 } 7343 init_val = resp->ctx_kind_initializer; 7344 init_mask = le16_to_cpu(resp->ctx_init_mask); 7345 7346 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 7347 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 7348 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 7349 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 7350 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 7351 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 7352 (init_mask & (1 << init_idx++)) != 0); 7353 7354 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 7355 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 7356 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 7357 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 7358 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 7359 (init_mask & (1 << init_idx++)) != 0); 7360 7361 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 7362 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 7363 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 7364 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 7365 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 7366 (init_mask & (1 << init_idx++)) != 0); 7367 7368 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 7369 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 7370 ctxm->max_entries = ctxm->vnic_entries + 7371 le16_to_cpu(resp->vnic_max_ring_table_entries); 7372 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 7373 bnxt_init_ctx_initializer(ctxm, init_val, 7374 resp->vnic_init_offset, 7375 (init_mask & (1 << init_idx++)) != 0); 7376 7377 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 7378 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 7379 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 7380 bnxt_init_ctx_initializer(ctxm, init_val, 7381 resp->stat_init_offset, 7382 (init_mask & (1 << init_idx++)) != 0); 7383 7384 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 7385 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 7386 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 7387 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 7388 ctxm->entry_multiple = resp->tqm_entries_multiple; 7389 if (!ctxm->entry_multiple) 7390 ctxm->entry_multiple = 1; 7391 7392 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 7393 7394 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 7395 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 7396 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 7397 ctxm->mrav_num_entries_units = 7398 le16_to_cpu(resp->mrav_num_entries_units); 7399 bnxt_init_ctx_initializer(ctxm, init_val, 7400 resp->mrav_init_offset, 7401 (init_mask & (1 << init_idx++)) != 0); 7402 7403 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 7404 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 7405 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 7406 7407 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 7408 if (!ctx->tqm_fp_rings_count) 7409 ctx->tqm_fp_rings_count = bp->max_q; 7410 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 7411 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 7412 7413 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 7414 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 7415 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 7416 7417 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 7418 } else { 7419 rc = 0; 7420 } 7421 ctx_err: 7422 hwrm_req_drop(bp, req); 7423 return rc; 7424 } 7425 7426 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 7427 __le64 *pg_dir) 7428 { 7429 if (!rmem->nr_pages) 7430 return; 7431 7432 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 7433 if (rmem->depth >= 1) { 7434 if (rmem->depth == 2) 7435 *pg_attr |= 2; 7436 else 7437 *pg_attr |= 1; 7438 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 7439 } else { 7440 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 7441 } 7442 } 7443 7444 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 7445 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 7446 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 7447 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 7448 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 7449 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 7450 7451 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 7452 { 7453 struct hwrm_func_backing_store_cfg_input *req; 7454 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7455 struct bnxt_ctx_pg_info *ctx_pg; 7456 struct bnxt_ctx_mem_type *ctxm; 7457 void **__req = (void **)&req; 7458 u32 req_len = sizeof(*req); 7459 __le32 *num_entries; 7460 __le64 *pg_dir; 7461 u32 flags = 0; 7462 u8 *pg_attr; 7463 u32 ena; 7464 int rc; 7465 int i; 7466 7467 if (!ctx) 7468 return 0; 7469 7470 if (req_len > bp->hwrm_max_ext_req_len) 7471 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 7472 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 7473 if (rc) 7474 return rc; 7475 7476 req->enables = cpu_to_le32(enables); 7477 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 7478 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 7479 ctx_pg = ctxm->pg_info; 7480 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 7481 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 7482 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 7483 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 7484 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7485 &req->qpc_pg_size_qpc_lvl, 7486 &req->qpc_page_dir); 7487 } 7488 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 7489 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 7490 ctx_pg = ctxm->pg_info; 7491 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 7492 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 7493 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 7494 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7495 &req->srq_pg_size_srq_lvl, 7496 &req->srq_page_dir); 7497 } 7498 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 7499 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 7500 ctx_pg = ctxm->pg_info; 7501 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 7502 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 7503 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 7504 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7505 &req->cq_pg_size_cq_lvl, 7506 &req->cq_page_dir); 7507 } 7508 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 7509 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 7510 ctx_pg = ctxm->pg_info; 7511 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 7512 req->vnic_num_ring_table_entries = 7513 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 7514 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 7515 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7516 &req->vnic_pg_size_vnic_lvl, 7517 &req->vnic_page_dir); 7518 } 7519 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 7520 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 7521 ctx_pg = ctxm->pg_info; 7522 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 7523 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 7524 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7525 &req->stat_pg_size_stat_lvl, 7526 &req->stat_page_dir); 7527 } 7528 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 7529 u32 units; 7530 7531 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 7532 ctx_pg = ctxm->pg_info; 7533 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 7534 units = ctxm->mrav_num_entries_units; 7535 if (units) { 7536 u32 num_mr, num_ah = ctxm->mrav_av_entries; 7537 u32 entries; 7538 7539 num_mr = ctx_pg->entries - num_ah; 7540 entries = ((num_mr / units) << 16) | (num_ah / units); 7541 req->mrav_num_entries = cpu_to_le32(entries); 7542 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 7543 } 7544 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 7545 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7546 &req->mrav_pg_size_mrav_lvl, 7547 &req->mrav_page_dir); 7548 } 7549 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 7550 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 7551 ctx_pg = ctxm->pg_info; 7552 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 7553 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 7554 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7555 &req->tim_pg_size_tim_lvl, 7556 &req->tim_page_dir); 7557 } 7558 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 7559 for (i = 0, num_entries = &req->tqm_sp_num_entries, 7560 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 7561 pg_dir = &req->tqm_sp_page_dir, 7562 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 7563 ctx_pg = ctxm->pg_info; 7564 i < BNXT_MAX_TQM_RINGS; 7565 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 7566 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 7567 if (!(enables & ena)) 7568 continue; 7569 7570 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 7571 *num_entries = cpu_to_le32(ctx_pg->entries); 7572 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 7573 } 7574 req->flags = cpu_to_le32(flags); 7575 return hwrm_req_send(bp, req); 7576 } 7577 7578 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 7579 struct bnxt_ctx_pg_info *ctx_pg) 7580 { 7581 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7582 7583 rmem->page_size = BNXT_PAGE_SIZE; 7584 rmem->pg_arr = ctx_pg->ctx_pg_arr; 7585 rmem->dma_arr = ctx_pg->ctx_dma_arr; 7586 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 7587 if (rmem->depth >= 1) 7588 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 7589 return bnxt_alloc_ring(bp, rmem); 7590 } 7591 7592 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 7593 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 7594 u8 depth, struct bnxt_ctx_mem_type *ctxm) 7595 { 7596 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7597 int rc; 7598 7599 if (!mem_size) 7600 return -EINVAL; 7601 7602 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 7603 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 7604 ctx_pg->nr_pages = 0; 7605 return -EINVAL; 7606 } 7607 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 7608 int nr_tbls, i; 7609 7610 rmem->depth = 2; 7611 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 7612 GFP_KERNEL); 7613 if (!ctx_pg->ctx_pg_tbl) 7614 return -ENOMEM; 7615 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 7616 rmem->nr_pages = nr_tbls; 7617 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 7618 if (rc) 7619 return rc; 7620 for (i = 0; i < nr_tbls; i++) { 7621 struct bnxt_ctx_pg_info *pg_tbl; 7622 7623 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 7624 if (!pg_tbl) 7625 return -ENOMEM; 7626 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 7627 rmem = &pg_tbl->ring_mem; 7628 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 7629 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 7630 rmem->depth = 1; 7631 rmem->nr_pages = MAX_CTX_PAGES; 7632 rmem->ctx_mem = ctxm; 7633 if (i == (nr_tbls - 1)) { 7634 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 7635 7636 if (rem) 7637 rmem->nr_pages = rem; 7638 } 7639 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 7640 if (rc) 7641 break; 7642 } 7643 } else { 7644 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 7645 if (rmem->nr_pages > 1 || depth) 7646 rmem->depth = 1; 7647 rmem->ctx_mem = ctxm; 7648 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 7649 } 7650 return rc; 7651 } 7652 7653 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 7654 struct bnxt_ctx_pg_info *ctx_pg) 7655 { 7656 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7657 7658 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 7659 ctx_pg->ctx_pg_tbl) { 7660 int i, nr_tbls = rmem->nr_pages; 7661 7662 for (i = 0; i < nr_tbls; i++) { 7663 struct bnxt_ctx_pg_info *pg_tbl; 7664 struct bnxt_ring_mem_info *rmem2; 7665 7666 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 7667 if (!pg_tbl) 7668 continue; 7669 rmem2 = &pg_tbl->ring_mem; 7670 bnxt_free_ring(bp, rmem2); 7671 ctx_pg->ctx_pg_arr[i] = NULL; 7672 kfree(pg_tbl); 7673 ctx_pg->ctx_pg_tbl[i] = NULL; 7674 } 7675 kfree(ctx_pg->ctx_pg_tbl); 7676 ctx_pg->ctx_pg_tbl = NULL; 7677 } 7678 bnxt_free_ring(bp, rmem); 7679 ctx_pg->nr_pages = 0; 7680 } 7681 7682 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 7683 struct bnxt_ctx_mem_type *ctxm, u32 entries, 7684 u8 pg_lvl) 7685 { 7686 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 7687 int i, rc = 0, n = 1; 7688 u32 mem_size; 7689 7690 if (!ctxm->entry_size || !ctx_pg) 7691 return -EINVAL; 7692 if (ctxm->instance_bmap) 7693 n = hweight32(ctxm->instance_bmap); 7694 if (ctxm->entry_multiple) 7695 entries = roundup(entries, ctxm->entry_multiple); 7696 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 7697 mem_size = entries * ctxm->entry_size; 7698 for (i = 0; i < n && !rc; i++) { 7699 ctx_pg[i].entries = entries; 7700 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 7701 ctxm->init_value ? ctxm : NULL); 7702 } 7703 return rc; 7704 } 7705 7706 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 7707 struct bnxt_ctx_mem_type *ctxm, 7708 bool last) 7709 { 7710 struct hwrm_func_backing_store_cfg_v2_input *req; 7711 u32 instance_bmap = ctxm->instance_bmap; 7712 int i, j, rc = 0, n = 1; 7713 __le32 *p; 7714 7715 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 7716 return 0; 7717 7718 if (instance_bmap) 7719 n = hweight32(ctxm->instance_bmap); 7720 else 7721 instance_bmap = 1; 7722 7723 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 7724 if (rc) 7725 return rc; 7726 hwrm_req_hold(bp, req); 7727 req->type = cpu_to_le16(ctxm->type); 7728 req->entry_size = cpu_to_le16(ctxm->entry_size); 7729 req->subtype_valid_cnt = ctxm->split_entry_cnt; 7730 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 7731 p[i] = cpu_to_le32(ctxm->split[i]); 7732 for (i = 0, j = 0; j < n && !rc; i++) { 7733 struct bnxt_ctx_pg_info *ctx_pg; 7734 7735 if (!(instance_bmap & (1 << i))) 7736 continue; 7737 req->instance = cpu_to_le16(i); 7738 ctx_pg = &ctxm->pg_info[j++]; 7739 if (!ctx_pg->entries) 7740 continue; 7741 req->num_entries = cpu_to_le32(ctx_pg->entries); 7742 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7743 &req->page_size_pbl_level, 7744 &req->page_dir); 7745 if (last && j == n) 7746 req->flags = 7747 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 7748 rc = hwrm_req_send(bp, req); 7749 } 7750 hwrm_req_drop(bp, req); 7751 return rc; 7752 } 7753 7754 static int bnxt_backing_store_cfg_v2(struct bnxt *bp) 7755 { 7756 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7757 struct bnxt_ctx_mem_type *ctxm; 7758 int rc = 0; 7759 u16 type; 7760 7761 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 7762 ctxm = &ctx->ctx_arr[type]; 7763 7764 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 7765 if (rc) 7766 return rc; 7767 } 7768 return 0; 7769 } 7770 7771 void bnxt_free_ctx_mem(struct bnxt *bp) 7772 { 7773 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7774 u16 type; 7775 7776 if (!ctx) 7777 return; 7778 7779 for (type = 0; type < BNXT_CTX_V2_MAX; type++) { 7780 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 7781 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 7782 int i, n = 1; 7783 7784 if (!ctx_pg) 7785 continue; 7786 if (ctxm->instance_bmap) 7787 n = hweight32(ctxm->instance_bmap); 7788 for (i = 0; i < n; i++) 7789 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 7790 7791 kfree(ctx_pg); 7792 ctxm->pg_info = NULL; 7793 } 7794 7795 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 7796 kfree(ctx); 7797 bp->ctx = NULL; 7798 } 7799 7800 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 7801 { 7802 struct bnxt_ctx_mem_type *ctxm; 7803 struct bnxt_ctx_mem_info *ctx; 7804 u32 l2_qps, qp1_qps, max_qps; 7805 u32 ena, entries_sp, entries; 7806 u32 srqs, max_srqs, min; 7807 u32 num_mr, num_ah; 7808 u32 extra_srqs = 0; 7809 u32 extra_qps = 0; 7810 u8 pg_lvl = 1; 7811 int i, rc; 7812 7813 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 7814 if (rc) { 7815 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 7816 rc); 7817 return rc; 7818 } 7819 ctx = bp->ctx; 7820 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 7821 return 0; 7822 7823 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 7824 l2_qps = ctxm->qp_l2_entries; 7825 qp1_qps = ctxm->qp_qp1_entries; 7826 max_qps = ctxm->max_entries; 7827 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 7828 srqs = ctxm->srq_l2_entries; 7829 max_srqs = ctxm->max_entries; 7830 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 7831 pg_lvl = 2; 7832 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); 7833 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 7834 } 7835 7836 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 7837 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 7838 pg_lvl); 7839 if (rc) 7840 return rc; 7841 7842 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 7843 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 7844 if (rc) 7845 return rc; 7846 7847 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 7848 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 7849 extra_qps * 2, pg_lvl); 7850 if (rc) 7851 return rc; 7852 7853 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 7854 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 7855 if (rc) 7856 return rc; 7857 7858 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 7859 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 7860 if (rc) 7861 return rc; 7862 7863 ena = 0; 7864 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 7865 goto skip_rdma; 7866 7867 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 7868 /* 128K extra is needed to accommodate static AH context 7869 * allocation by f/w. 7870 */ 7871 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 7872 num_ah = min_t(u32, num_mr, 1024 * 128); 7873 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 7874 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 7875 ctxm->mrav_av_entries = num_ah; 7876 7877 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 7878 if (rc) 7879 return rc; 7880 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 7881 7882 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 7883 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 7884 if (rc) 7885 return rc; 7886 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 7887 7888 skip_rdma: 7889 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 7890 min = ctxm->min_entries; 7891 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 7892 2 * (extra_qps + qp1_qps) + min; 7893 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 7894 if (rc) 7895 return rc; 7896 7897 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 7898 entries = l2_qps + 2 * (extra_qps + qp1_qps); 7899 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 7900 if (rc) 7901 return rc; 7902 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 7903 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 7904 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 7905 7906 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 7907 rc = bnxt_backing_store_cfg_v2(bp); 7908 else 7909 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 7910 if (rc) { 7911 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 7912 rc); 7913 return rc; 7914 } 7915 ctx->flags |= BNXT_CTX_FLAG_INITED; 7916 return 0; 7917 } 7918 7919 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 7920 { 7921 struct hwrm_func_resource_qcaps_output *resp; 7922 struct hwrm_func_resource_qcaps_input *req; 7923 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7924 int rc; 7925 7926 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 7927 if (rc) 7928 return rc; 7929 7930 req->fid = cpu_to_le16(0xffff); 7931 resp = hwrm_req_hold(bp, req); 7932 rc = hwrm_req_send_silent(bp, req); 7933 if (rc) 7934 goto hwrm_func_resc_qcaps_exit; 7935 7936 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 7937 if (!all) 7938 goto hwrm_func_resc_qcaps_exit; 7939 7940 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 7941 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 7942 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 7943 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 7944 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 7945 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 7946 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 7947 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 7948 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 7949 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 7950 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 7951 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 7952 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 7953 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 7954 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 7955 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 7956 7957 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7958 u16 max_msix = le16_to_cpu(resp->max_msix); 7959 7960 hw_resc->max_nqs = max_msix; 7961 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 7962 } 7963 7964 if (BNXT_PF(bp)) { 7965 struct bnxt_pf_info *pf = &bp->pf; 7966 7967 pf->vf_resv_strategy = 7968 le16_to_cpu(resp->vf_reservation_strategy); 7969 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 7970 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 7971 } 7972 hwrm_func_resc_qcaps_exit: 7973 hwrm_req_drop(bp, req); 7974 return rc; 7975 } 7976 7977 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 7978 { 7979 struct hwrm_port_mac_ptp_qcfg_output *resp; 7980 struct hwrm_port_mac_ptp_qcfg_input *req; 7981 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 7982 bool phc_cfg; 7983 u8 flags; 7984 int rc; 7985 7986 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { 7987 rc = -ENODEV; 7988 goto no_ptp; 7989 } 7990 7991 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 7992 if (rc) 7993 goto no_ptp; 7994 7995 req->port_id = cpu_to_le16(bp->pf.port_id); 7996 resp = hwrm_req_hold(bp, req); 7997 rc = hwrm_req_send(bp, req); 7998 if (rc) 7999 goto exit; 8000 8001 flags = resp->flags; 8002 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 8003 rc = -ENODEV; 8004 goto exit; 8005 } 8006 if (!ptp) { 8007 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 8008 if (!ptp) { 8009 rc = -ENOMEM; 8010 goto exit; 8011 } 8012 ptp->bp = bp; 8013 bp->ptp_cfg = ptp; 8014 } 8015 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { 8016 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 8017 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 8018 } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 8019 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 8020 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 8021 } else { 8022 rc = -ENODEV; 8023 goto exit; 8024 } 8025 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 8026 rc = bnxt_ptp_init(bp, phc_cfg); 8027 if (rc) 8028 netdev_warn(bp->dev, "PTP initialization failed.\n"); 8029 exit: 8030 hwrm_req_drop(bp, req); 8031 if (!rc) 8032 return 0; 8033 8034 no_ptp: 8035 bnxt_ptp_clear(bp); 8036 kfree(ptp); 8037 bp->ptp_cfg = NULL; 8038 return rc; 8039 } 8040 8041 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 8042 { 8043 struct hwrm_func_qcaps_output *resp; 8044 struct hwrm_func_qcaps_input *req; 8045 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8046 u32 flags, flags_ext, flags_ext2; 8047 int rc; 8048 8049 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 8050 if (rc) 8051 return rc; 8052 8053 req->fid = cpu_to_le16(0xffff); 8054 resp = hwrm_req_hold(bp, req); 8055 rc = hwrm_req_send(bp, req); 8056 if (rc) 8057 goto hwrm_func_qcaps_exit; 8058 8059 flags = le32_to_cpu(resp->flags); 8060 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 8061 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 8062 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 8063 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 8064 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 8065 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 8066 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 8067 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 8068 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 8069 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 8070 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 8071 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 8072 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 8073 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 8074 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 8075 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 8076 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 8077 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 8078 8079 flags_ext = le32_to_cpu(resp->flags_ext); 8080 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 8081 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 8082 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 8083 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 8084 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 8085 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 8086 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 8087 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 8088 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 8089 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 8090 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 8091 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 8092 8093 flags_ext2 = le32_to_cpu(resp->flags_ext2); 8094 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 8095 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 8096 8097 bp->tx_push_thresh = 0; 8098 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 8099 BNXT_FW_MAJ(bp) > 217) 8100 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 8101 8102 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 8103 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 8104 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 8105 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 8106 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 8107 if (!hw_resc->max_hw_ring_grps) 8108 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 8109 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 8110 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 8111 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 8112 8113 if (BNXT_PF(bp)) { 8114 struct bnxt_pf_info *pf = &bp->pf; 8115 8116 pf->fw_fid = le16_to_cpu(resp->fid); 8117 pf->port_id = le16_to_cpu(resp->port_id); 8118 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 8119 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 8120 pf->max_vfs = le16_to_cpu(resp->max_vfs); 8121 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 8122 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 8123 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 8124 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 8125 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 8126 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 8127 bp->flags &= ~BNXT_FLAG_WOL_CAP; 8128 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 8129 bp->flags |= BNXT_FLAG_WOL_CAP; 8130 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 8131 bp->fw_cap |= BNXT_FW_CAP_PTP; 8132 } else { 8133 bnxt_ptp_clear(bp); 8134 kfree(bp->ptp_cfg); 8135 bp->ptp_cfg = NULL; 8136 } 8137 } else { 8138 #ifdef CONFIG_BNXT_SRIOV 8139 struct bnxt_vf_info *vf = &bp->vf; 8140 8141 vf->fw_fid = le16_to_cpu(resp->fid); 8142 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 8143 #endif 8144 } 8145 8146 hwrm_func_qcaps_exit: 8147 hwrm_req_drop(bp, req); 8148 return rc; 8149 } 8150 8151 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 8152 { 8153 struct hwrm_dbg_qcaps_output *resp; 8154 struct hwrm_dbg_qcaps_input *req; 8155 int rc; 8156 8157 bp->fw_dbg_cap = 0; 8158 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 8159 return; 8160 8161 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 8162 if (rc) 8163 return; 8164 8165 req->fid = cpu_to_le16(0xffff); 8166 resp = hwrm_req_hold(bp, req); 8167 rc = hwrm_req_send(bp, req); 8168 if (rc) 8169 goto hwrm_dbg_qcaps_exit; 8170 8171 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 8172 8173 hwrm_dbg_qcaps_exit: 8174 hwrm_req_drop(bp, req); 8175 } 8176 8177 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 8178 8179 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 8180 { 8181 int rc; 8182 8183 rc = __bnxt_hwrm_func_qcaps(bp); 8184 if (rc) 8185 return rc; 8186 8187 bnxt_hwrm_dbg_qcaps(bp); 8188 8189 rc = bnxt_hwrm_queue_qportcfg(bp); 8190 if (rc) { 8191 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 8192 return rc; 8193 } 8194 if (bp->hwrm_spec_code >= 0x10803) { 8195 rc = bnxt_alloc_ctx_mem(bp); 8196 if (rc) 8197 return rc; 8198 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 8199 if (!rc) 8200 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 8201 } 8202 return 0; 8203 } 8204 8205 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 8206 { 8207 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 8208 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 8209 u32 flags; 8210 int rc; 8211 8212 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 8213 return 0; 8214 8215 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 8216 if (rc) 8217 return rc; 8218 8219 resp = hwrm_req_hold(bp, req); 8220 rc = hwrm_req_send(bp, req); 8221 if (rc) 8222 goto hwrm_cfa_adv_qcaps_exit; 8223 8224 flags = le32_to_cpu(resp->flags); 8225 if (flags & 8226 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 8227 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 8228 8229 hwrm_cfa_adv_qcaps_exit: 8230 hwrm_req_drop(bp, req); 8231 return rc; 8232 } 8233 8234 static int __bnxt_alloc_fw_health(struct bnxt *bp) 8235 { 8236 if (bp->fw_health) 8237 return 0; 8238 8239 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 8240 if (!bp->fw_health) 8241 return -ENOMEM; 8242 8243 mutex_init(&bp->fw_health->lock); 8244 return 0; 8245 } 8246 8247 static int bnxt_alloc_fw_health(struct bnxt *bp) 8248 { 8249 int rc; 8250 8251 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 8252 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 8253 return 0; 8254 8255 rc = __bnxt_alloc_fw_health(bp); 8256 if (rc) { 8257 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 8258 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 8259 return rc; 8260 } 8261 8262 return 0; 8263 } 8264 8265 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 8266 { 8267 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 8268 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8269 BNXT_FW_HEALTH_WIN_MAP_OFF); 8270 } 8271 8272 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 8273 { 8274 struct bnxt_fw_health *fw_health = bp->fw_health; 8275 u32 reg_type; 8276 8277 if (!fw_health) 8278 return; 8279 8280 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 8281 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 8282 fw_health->status_reliable = false; 8283 8284 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 8285 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 8286 fw_health->resets_reliable = false; 8287 } 8288 8289 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 8290 { 8291 void __iomem *hs; 8292 u32 status_loc; 8293 u32 reg_type; 8294 u32 sig; 8295 8296 if (bp->fw_health) 8297 bp->fw_health->status_reliable = false; 8298 8299 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 8300 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 8301 8302 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 8303 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 8304 if (!bp->chip_num) { 8305 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 8306 bp->chip_num = readl(bp->bar0 + 8307 BNXT_FW_HEALTH_WIN_BASE + 8308 BNXT_GRC_REG_CHIP_NUM); 8309 } 8310 if (!BNXT_CHIP_P5(bp)) 8311 return; 8312 8313 status_loc = BNXT_GRC_REG_STATUS_P5 | 8314 BNXT_FW_HEALTH_REG_TYPE_BAR0; 8315 } else { 8316 status_loc = readl(hs + offsetof(struct hcomm_status, 8317 fw_status_loc)); 8318 } 8319 8320 if (__bnxt_alloc_fw_health(bp)) { 8321 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 8322 return; 8323 } 8324 8325 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 8326 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 8327 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 8328 __bnxt_map_fw_health_reg(bp, status_loc); 8329 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 8330 BNXT_FW_HEALTH_WIN_OFF(status_loc); 8331 } 8332 8333 bp->fw_health->status_reliable = true; 8334 } 8335 8336 static int bnxt_map_fw_health_regs(struct bnxt *bp) 8337 { 8338 struct bnxt_fw_health *fw_health = bp->fw_health; 8339 u32 reg_base = 0xffffffff; 8340 int i; 8341 8342 bp->fw_health->status_reliable = false; 8343 bp->fw_health->resets_reliable = false; 8344 /* Only pre-map the monitoring GRC registers using window 3 */ 8345 for (i = 0; i < 4; i++) { 8346 u32 reg = fw_health->regs[i]; 8347 8348 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 8349 continue; 8350 if (reg_base == 0xffffffff) 8351 reg_base = reg & BNXT_GRC_BASE_MASK; 8352 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 8353 return -ERANGE; 8354 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 8355 } 8356 bp->fw_health->status_reliable = true; 8357 bp->fw_health->resets_reliable = true; 8358 if (reg_base == 0xffffffff) 8359 return 0; 8360 8361 __bnxt_map_fw_health_reg(bp, reg_base); 8362 return 0; 8363 } 8364 8365 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 8366 { 8367 if (!bp->fw_health) 8368 return; 8369 8370 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 8371 bp->fw_health->status_reliable = true; 8372 bp->fw_health->resets_reliable = true; 8373 } else { 8374 bnxt_try_map_fw_health_reg(bp); 8375 } 8376 } 8377 8378 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 8379 { 8380 struct bnxt_fw_health *fw_health = bp->fw_health; 8381 struct hwrm_error_recovery_qcfg_output *resp; 8382 struct hwrm_error_recovery_qcfg_input *req; 8383 int rc, i; 8384 8385 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 8386 return 0; 8387 8388 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 8389 if (rc) 8390 return rc; 8391 8392 resp = hwrm_req_hold(bp, req); 8393 rc = hwrm_req_send(bp, req); 8394 if (rc) 8395 goto err_recovery_out; 8396 fw_health->flags = le32_to_cpu(resp->flags); 8397 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 8398 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 8399 rc = -EINVAL; 8400 goto err_recovery_out; 8401 } 8402 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 8403 fw_health->master_func_wait_dsecs = 8404 le32_to_cpu(resp->master_func_wait_period); 8405 fw_health->normal_func_wait_dsecs = 8406 le32_to_cpu(resp->normal_func_wait_period); 8407 fw_health->post_reset_wait_dsecs = 8408 le32_to_cpu(resp->master_func_wait_period_after_reset); 8409 fw_health->post_reset_max_wait_dsecs = 8410 le32_to_cpu(resp->max_bailout_time_after_reset); 8411 fw_health->regs[BNXT_FW_HEALTH_REG] = 8412 le32_to_cpu(resp->fw_health_status_reg); 8413 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 8414 le32_to_cpu(resp->fw_heartbeat_reg); 8415 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 8416 le32_to_cpu(resp->fw_reset_cnt_reg); 8417 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 8418 le32_to_cpu(resp->reset_inprogress_reg); 8419 fw_health->fw_reset_inprog_reg_mask = 8420 le32_to_cpu(resp->reset_inprogress_reg_mask); 8421 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 8422 if (fw_health->fw_reset_seq_cnt >= 16) { 8423 rc = -EINVAL; 8424 goto err_recovery_out; 8425 } 8426 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 8427 fw_health->fw_reset_seq_regs[i] = 8428 le32_to_cpu(resp->reset_reg[i]); 8429 fw_health->fw_reset_seq_vals[i] = 8430 le32_to_cpu(resp->reset_reg_val[i]); 8431 fw_health->fw_reset_seq_delay_msec[i] = 8432 resp->delay_after_reset[i]; 8433 } 8434 err_recovery_out: 8435 hwrm_req_drop(bp, req); 8436 if (!rc) 8437 rc = bnxt_map_fw_health_regs(bp); 8438 if (rc) 8439 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 8440 return rc; 8441 } 8442 8443 static int bnxt_hwrm_func_reset(struct bnxt *bp) 8444 { 8445 struct hwrm_func_reset_input *req; 8446 int rc; 8447 8448 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 8449 if (rc) 8450 return rc; 8451 8452 req->enables = 0; 8453 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 8454 return hwrm_req_send(bp, req); 8455 } 8456 8457 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 8458 { 8459 struct hwrm_nvm_get_dev_info_output nvm_info; 8460 8461 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 8462 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 8463 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 8464 nvm_info.nvm_cfg_ver_upd); 8465 } 8466 8467 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 8468 { 8469 struct hwrm_queue_qportcfg_output *resp; 8470 struct hwrm_queue_qportcfg_input *req; 8471 u8 i, j, *qptr; 8472 bool no_rdma; 8473 int rc = 0; 8474 8475 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 8476 if (rc) 8477 return rc; 8478 8479 resp = hwrm_req_hold(bp, req); 8480 rc = hwrm_req_send(bp, req); 8481 if (rc) 8482 goto qportcfg_exit; 8483 8484 if (!resp->max_configurable_queues) { 8485 rc = -EINVAL; 8486 goto qportcfg_exit; 8487 } 8488 bp->max_tc = resp->max_configurable_queues; 8489 bp->max_lltc = resp->max_configurable_lossless_queues; 8490 if (bp->max_tc > BNXT_MAX_QUEUE) 8491 bp->max_tc = BNXT_MAX_QUEUE; 8492 8493 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 8494 qptr = &resp->queue_id0; 8495 for (i = 0, j = 0; i < bp->max_tc; i++) { 8496 bp->q_info[j].queue_id = *qptr; 8497 bp->q_ids[i] = *qptr++; 8498 bp->q_info[j].queue_profile = *qptr++; 8499 bp->tc_to_qidx[j] = j; 8500 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 8501 (no_rdma && BNXT_PF(bp))) 8502 j++; 8503 } 8504 bp->max_q = bp->max_tc; 8505 bp->max_tc = max_t(u8, j, 1); 8506 8507 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 8508 bp->max_tc = 1; 8509 8510 if (bp->max_lltc > bp->max_tc) 8511 bp->max_lltc = bp->max_tc; 8512 8513 qportcfg_exit: 8514 hwrm_req_drop(bp, req); 8515 return rc; 8516 } 8517 8518 static int bnxt_hwrm_poll(struct bnxt *bp) 8519 { 8520 struct hwrm_ver_get_input *req; 8521 int rc; 8522 8523 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 8524 if (rc) 8525 return rc; 8526 8527 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 8528 req->hwrm_intf_min = HWRM_VERSION_MINOR; 8529 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 8530 8531 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 8532 rc = hwrm_req_send(bp, req); 8533 return rc; 8534 } 8535 8536 static int bnxt_hwrm_ver_get(struct bnxt *bp) 8537 { 8538 struct hwrm_ver_get_output *resp; 8539 struct hwrm_ver_get_input *req; 8540 u16 fw_maj, fw_min, fw_bld, fw_rsv; 8541 u32 dev_caps_cfg, hwrm_ver; 8542 int rc, len; 8543 8544 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 8545 if (rc) 8546 return rc; 8547 8548 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 8549 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 8550 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 8551 req->hwrm_intf_min = HWRM_VERSION_MINOR; 8552 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 8553 8554 resp = hwrm_req_hold(bp, req); 8555 rc = hwrm_req_send(bp, req); 8556 if (rc) 8557 goto hwrm_ver_get_exit; 8558 8559 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 8560 8561 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 8562 resp->hwrm_intf_min_8b << 8 | 8563 resp->hwrm_intf_upd_8b; 8564 if (resp->hwrm_intf_maj_8b < 1) { 8565 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 8566 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 8567 resp->hwrm_intf_upd_8b); 8568 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 8569 } 8570 8571 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 8572 HWRM_VERSION_UPDATE; 8573 8574 if (bp->hwrm_spec_code > hwrm_ver) 8575 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 8576 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 8577 HWRM_VERSION_UPDATE); 8578 else 8579 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 8580 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 8581 resp->hwrm_intf_upd_8b); 8582 8583 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 8584 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 8585 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 8586 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 8587 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 8588 len = FW_VER_STR_LEN; 8589 } else { 8590 fw_maj = resp->hwrm_fw_maj_8b; 8591 fw_min = resp->hwrm_fw_min_8b; 8592 fw_bld = resp->hwrm_fw_bld_8b; 8593 fw_rsv = resp->hwrm_fw_rsvd_8b; 8594 len = BC_HWRM_STR_LEN; 8595 } 8596 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 8597 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 8598 fw_rsv); 8599 8600 if (strlen(resp->active_pkg_name)) { 8601 int fw_ver_len = strlen(bp->fw_ver_str); 8602 8603 snprintf(bp->fw_ver_str + fw_ver_len, 8604 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 8605 resp->active_pkg_name); 8606 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 8607 } 8608 8609 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 8610 if (!bp->hwrm_cmd_timeout) 8611 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 8612 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 8613 if (!bp->hwrm_cmd_max_timeout) 8614 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 8615 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) 8616 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", 8617 bp->hwrm_cmd_max_timeout / 1000); 8618 8619 if (resp->hwrm_intf_maj_8b >= 1) { 8620 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 8621 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 8622 } 8623 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 8624 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 8625 8626 bp->chip_num = le16_to_cpu(resp->chip_num); 8627 bp->chip_rev = resp->chip_rev; 8628 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 8629 !resp->chip_metal) 8630 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 8631 8632 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 8633 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 8634 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 8635 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 8636 8637 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 8638 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 8639 8640 if (dev_caps_cfg & 8641 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 8642 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 8643 8644 if (dev_caps_cfg & 8645 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 8646 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 8647 8648 if (dev_caps_cfg & 8649 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 8650 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 8651 8652 hwrm_ver_get_exit: 8653 hwrm_req_drop(bp, req); 8654 return rc; 8655 } 8656 8657 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 8658 { 8659 struct hwrm_fw_set_time_input *req; 8660 struct tm tm; 8661 time64_t now = ktime_get_real_seconds(); 8662 int rc; 8663 8664 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 8665 bp->hwrm_spec_code < 0x10400) 8666 return -EOPNOTSUPP; 8667 8668 time64_to_tm(now, 0, &tm); 8669 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 8670 if (rc) 8671 return rc; 8672 8673 req->year = cpu_to_le16(1900 + tm.tm_year); 8674 req->month = 1 + tm.tm_mon; 8675 req->day = tm.tm_mday; 8676 req->hour = tm.tm_hour; 8677 req->minute = tm.tm_min; 8678 req->second = tm.tm_sec; 8679 return hwrm_req_send(bp, req); 8680 } 8681 8682 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 8683 { 8684 u64 sw_tmp; 8685 8686 hw &= mask; 8687 sw_tmp = (*sw & ~mask) | hw; 8688 if (hw < (*sw & mask)) 8689 sw_tmp += mask + 1; 8690 WRITE_ONCE(*sw, sw_tmp); 8691 } 8692 8693 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 8694 int count, bool ignore_zero) 8695 { 8696 int i; 8697 8698 for (i = 0; i < count; i++) { 8699 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 8700 8701 if (ignore_zero && !hw) 8702 continue; 8703 8704 if (masks[i] == -1ULL) 8705 sw_stats[i] = hw; 8706 else 8707 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 8708 } 8709 } 8710 8711 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 8712 { 8713 if (!stats->hw_stats) 8714 return; 8715 8716 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 8717 stats->hw_masks, stats->len / 8, false); 8718 } 8719 8720 static void bnxt_accumulate_all_stats(struct bnxt *bp) 8721 { 8722 struct bnxt_stats_mem *ring0_stats; 8723 bool ignore_zero = false; 8724 int i; 8725 8726 /* Chip bug. Counter intermittently becomes 0. */ 8727 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 8728 ignore_zero = true; 8729 8730 for (i = 0; i < bp->cp_nr_rings; i++) { 8731 struct bnxt_napi *bnapi = bp->bnapi[i]; 8732 struct bnxt_cp_ring_info *cpr; 8733 struct bnxt_stats_mem *stats; 8734 8735 cpr = &bnapi->cp_ring; 8736 stats = &cpr->stats; 8737 if (!i) 8738 ring0_stats = stats; 8739 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 8740 ring0_stats->hw_masks, 8741 ring0_stats->len / 8, ignore_zero); 8742 } 8743 if (bp->flags & BNXT_FLAG_PORT_STATS) { 8744 struct bnxt_stats_mem *stats = &bp->port_stats; 8745 __le64 *hw_stats = stats->hw_stats; 8746 u64 *sw_stats = stats->sw_stats; 8747 u64 *masks = stats->hw_masks; 8748 int cnt; 8749 8750 cnt = sizeof(struct rx_port_stats) / 8; 8751 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 8752 8753 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8754 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8755 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8756 cnt = sizeof(struct tx_port_stats) / 8; 8757 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 8758 } 8759 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 8760 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 8761 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 8762 } 8763 } 8764 8765 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 8766 { 8767 struct hwrm_port_qstats_input *req; 8768 struct bnxt_pf_info *pf = &bp->pf; 8769 int rc; 8770 8771 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 8772 return 0; 8773 8774 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 8775 return -EOPNOTSUPP; 8776 8777 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 8778 if (rc) 8779 return rc; 8780 8781 req->flags = flags; 8782 req->port_id = cpu_to_le16(pf->port_id); 8783 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 8784 BNXT_TX_PORT_STATS_BYTE_OFFSET); 8785 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 8786 return hwrm_req_send(bp, req); 8787 } 8788 8789 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 8790 { 8791 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 8792 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 8793 struct hwrm_port_qstats_ext_output *resp_qs; 8794 struct hwrm_port_qstats_ext_input *req_qs; 8795 struct bnxt_pf_info *pf = &bp->pf; 8796 u32 tx_stat_size; 8797 int rc; 8798 8799 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 8800 return 0; 8801 8802 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 8803 return -EOPNOTSUPP; 8804 8805 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 8806 if (rc) 8807 return rc; 8808 8809 req_qs->flags = flags; 8810 req_qs->port_id = cpu_to_le16(pf->port_id); 8811 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 8812 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 8813 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 8814 sizeof(struct tx_port_stats_ext) : 0; 8815 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 8816 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 8817 resp_qs = hwrm_req_hold(bp, req_qs); 8818 rc = hwrm_req_send(bp, req_qs); 8819 if (!rc) { 8820 bp->fw_rx_stats_ext_size = 8821 le16_to_cpu(resp_qs->rx_stat_size) / 8; 8822 if (BNXT_FW_MAJ(bp) < 220 && 8823 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 8824 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 8825 8826 bp->fw_tx_stats_ext_size = tx_stat_size ? 8827 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 8828 } else { 8829 bp->fw_rx_stats_ext_size = 0; 8830 bp->fw_tx_stats_ext_size = 0; 8831 } 8832 hwrm_req_drop(bp, req_qs); 8833 8834 if (flags) 8835 return rc; 8836 8837 if (bp->fw_tx_stats_ext_size <= 8838 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 8839 bp->pri2cos_valid = 0; 8840 return rc; 8841 } 8842 8843 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 8844 if (rc) 8845 return rc; 8846 8847 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 8848 8849 resp_qc = hwrm_req_hold(bp, req_qc); 8850 rc = hwrm_req_send(bp, req_qc); 8851 if (!rc) { 8852 u8 *pri2cos; 8853 int i, j; 8854 8855 pri2cos = &resp_qc->pri0_cos_queue_id; 8856 for (i = 0; i < 8; i++) { 8857 u8 queue_id = pri2cos[i]; 8858 u8 queue_idx; 8859 8860 /* Per port queue IDs start from 0, 10, 20, etc */ 8861 queue_idx = queue_id % 10; 8862 if (queue_idx > BNXT_MAX_QUEUE) { 8863 bp->pri2cos_valid = false; 8864 hwrm_req_drop(bp, req_qc); 8865 return rc; 8866 } 8867 for (j = 0; j < bp->max_q; j++) { 8868 if (bp->q_ids[j] == queue_id) 8869 bp->pri2cos_idx[i] = queue_idx; 8870 } 8871 } 8872 bp->pri2cos_valid = true; 8873 } 8874 hwrm_req_drop(bp, req_qc); 8875 8876 return rc; 8877 } 8878 8879 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 8880 { 8881 bnxt_hwrm_tunnel_dst_port_free(bp, 8882 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 8883 bnxt_hwrm_tunnel_dst_port_free(bp, 8884 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 8885 } 8886 8887 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 8888 { 8889 int rc, i; 8890 u32 tpa_flags = 0; 8891 8892 if (set_tpa) 8893 tpa_flags = bp->flags & BNXT_FLAG_TPA; 8894 else if (BNXT_NO_FW_ACCESS(bp)) 8895 return 0; 8896 for (i = 0; i < bp->nr_vnics; i++) { 8897 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 8898 if (rc) { 8899 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 8900 i, rc); 8901 return rc; 8902 } 8903 } 8904 return 0; 8905 } 8906 8907 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 8908 { 8909 int i; 8910 8911 for (i = 0; i < bp->nr_vnics; i++) 8912 bnxt_hwrm_vnic_set_rss(bp, i, false); 8913 } 8914 8915 static void bnxt_clear_vnic(struct bnxt *bp) 8916 { 8917 if (!bp->vnic_info) 8918 return; 8919 8920 bnxt_hwrm_clear_vnic_filter(bp); 8921 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 8922 /* clear all RSS setting before free vnic ctx */ 8923 bnxt_hwrm_clear_vnic_rss(bp); 8924 bnxt_hwrm_vnic_ctx_free(bp); 8925 } 8926 /* before free the vnic, undo the vnic tpa settings */ 8927 if (bp->flags & BNXT_FLAG_TPA) 8928 bnxt_set_tpa(bp, false); 8929 bnxt_hwrm_vnic_free(bp); 8930 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 8931 bnxt_hwrm_vnic_ctx_free(bp); 8932 } 8933 8934 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 8935 bool irq_re_init) 8936 { 8937 bnxt_clear_vnic(bp); 8938 bnxt_hwrm_ring_free(bp, close_path); 8939 bnxt_hwrm_ring_grp_free(bp); 8940 if (irq_re_init) { 8941 bnxt_hwrm_stat_ctx_free(bp); 8942 bnxt_hwrm_free_tunnel_ports(bp); 8943 } 8944 } 8945 8946 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 8947 { 8948 struct hwrm_func_cfg_input *req; 8949 u8 evb_mode; 8950 int rc; 8951 8952 if (br_mode == BRIDGE_MODE_VEB) 8953 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 8954 else if (br_mode == BRIDGE_MODE_VEPA) 8955 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 8956 else 8957 return -EINVAL; 8958 8959 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 8960 if (rc) 8961 return rc; 8962 8963 req->fid = cpu_to_le16(0xffff); 8964 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 8965 req->evb_mode = evb_mode; 8966 return hwrm_req_send(bp, req); 8967 } 8968 8969 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 8970 { 8971 struct hwrm_func_cfg_input *req; 8972 int rc; 8973 8974 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 8975 return 0; 8976 8977 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 8978 if (rc) 8979 return rc; 8980 8981 req->fid = cpu_to_le16(0xffff); 8982 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 8983 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 8984 if (size == 128) 8985 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 8986 8987 return hwrm_req_send(bp, req); 8988 } 8989 8990 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 8991 { 8992 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 8993 int rc; 8994 8995 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 8996 goto skip_rss_ctx; 8997 8998 /* allocate context for vnic */ 8999 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 9000 if (rc) { 9001 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9002 vnic_id, rc); 9003 goto vnic_setup_err; 9004 } 9005 bp->rsscos_nr_ctxs++; 9006 9007 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9008 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 9009 if (rc) { 9010 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 9011 vnic_id, rc); 9012 goto vnic_setup_err; 9013 } 9014 bp->rsscos_nr_ctxs++; 9015 } 9016 9017 skip_rss_ctx: 9018 /* configure default vnic, ring grp */ 9019 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 9020 if (rc) { 9021 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9022 vnic_id, rc); 9023 goto vnic_setup_err; 9024 } 9025 9026 /* Enable RSS hashing on vnic */ 9027 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 9028 if (rc) { 9029 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 9030 vnic_id, rc); 9031 goto vnic_setup_err; 9032 } 9033 9034 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9035 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 9036 if (rc) { 9037 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9038 vnic_id, rc); 9039 } 9040 } 9041 9042 vnic_setup_err: 9043 return rc; 9044 } 9045 9046 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 9047 { 9048 int rc, i, nr_ctxs; 9049 9050 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 9051 for (i = 0; i < nr_ctxs; i++) { 9052 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 9053 if (rc) { 9054 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 9055 vnic_id, i, rc); 9056 break; 9057 } 9058 bp->rsscos_nr_ctxs++; 9059 } 9060 if (i < nr_ctxs) 9061 return -ENOMEM; 9062 9063 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 9064 if (rc) { 9065 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 9066 vnic_id, rc); 9067 return rc; 9068 } 9069 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 9070 if (rc) { 9071 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 9072 vnic_id, rc); 9073 return rc; 9074 } 9075 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 9076 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 9077 if (rc) { 9078 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 9079 vnic_id, rc); 9080 } 9081 } 9082 return rc; 9083 } 9084 9085 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 9086 { 9087 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9088 return __bnxt_setup_vnic_p5(bp, vnic_id); 9089 else 9090 return __bnxt_setup_vnic(bp, vnic_id); 9091 } 9092 9093 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 9094 { 9095 #ifdef CONFIG_RFS_ACCEL 9096 int i, rc = 0; 9097 9098 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9099 return 0; 9100 9101 for (i = 0; i < bp->rx_nr_rings; i++) { 9102 struct bnxt_vnic_info *vnic; 9103 u16 vnic_id = i + 1; 9104 u16 ring_id = i; 9105 9106 if (vnic_id >= bp->nr_vnics) 9107 break; 9108 9109 vnic = &bp->vnic_info[vnic_id]; 9110 vnic->flags |= BNXT_VNIC_RFS_FLAG; 9111 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9112 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 9113 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 9114 if (rc) { 9115 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9116 vnic_id, rc); 9117 break; 9118 } 9119 rc = bnxt_setup_vnic(bp, vnic_id); 9120 if (rc) 9121 break; 9122 } 9123 return rc; 9124 #else 9125 return 0; 9126 #endif 9127 } 9128 9129 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 9130 static bool bnxt_promisc_ok(struct bnxt *bp) 9131 { 9132 #ifdef CONFIG_BNXT_SRIOV 9133 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 9134 return false; 9135 #endif 9136 return true; 9137 } 9138 9139 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 9140 { 9141 unsigned int rc = 0; 9142 9143 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 9144 if (rc) { 9145 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 9146 rc); 9147 return rc; 9148 } 9149 9150 rc = bnxt_hwrm_vnic_cfg(bp, 1); 9151 if (rc) { 9152 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 9153 rc); 9154 return rc; 9155 } 9156 return rc; 9157 } 9158 9159 static int bnxt_cfg_rx_mode(struct bnxt *); 9160 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 9161 9162 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 9163 { 9164 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9165 int rc = 0; 9166 unsigned int rx_nr_rings = bp->rx_nr_rings; 9167 9168 if (irq_re_init) { 9169 rc = bnxt_hwrm_stat_ctx_alloc(bp); 9170 if (rc) { 9171 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 9172 rc); 9173 goto err_out; 9174 } 9175 } 9176 9177 rc = bnxt_hwrm_ring_alloc(bp); 9178 if (rc) { 9179 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 9180 goto err_out; 9181 } 9182 9183 rc = bnxt_hwrm_ring_grp_alloc(bp); 9184 if (rc) { 9185 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 9186 goto err_out; 9187 } 9188 9189 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 9190 rx_nr_rings--; 9191 9192 /* default vnic 0 */ 9193 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 9194 if (rc) { 9195 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 9196 goto err_out; 9197 } 9198 9199 if (BNXT_VF(bp)) 9200 bnxt_hwrm_func_qcfg(bp); 9201 9202 rc = bnxt_setup_vnic(bp, 0); 9203 if (rc) 9204 goto err_out; 9205 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA) 9206 bnxt_hwrm_update_rss_hash_cfg(bp); 9207 9208 if (bp->flags & BNXT_FLAG_RFS) { 9209 rc = bnxt_alloc_rfs_vnics(bp); 9210 if (rc) 9211 goto err_out; 9212 } 9213 9214 if (bp->flags & BNXT_FLAG_TPA) { 9215 rc = bnxt_set_tpa(bp, true); 9216 if (rc) 9217 goto err_out; 9218 } 9219 9220 if (BNXT_VF(bp)) 9221 bnxt_update_vf_mac(bp); 9222 9223 /* Filter for default vnic 0 */ 9224 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 9225 if (rc) { 9226 if (BNXT_VF(bp) && rc == -ENODEV) 9227 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 9228 else 9229 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 9230 goto err_out; 9231 } 9232 vnic->uc_filter_count = 1; 9233 9234 vnic->rx_mask = 0; 9235 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 9236 goto skip_rx_mask; 9237 9238 if (bp->dev->flags & IFF_BROADCAST) 9239 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 9240 9241 if (bp->dev->flags & IFF_PROMISC) 9242 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9243 9244 if (bp->dev->flags & IFF_ALLMULTI) { 9245 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9246 vnic->mc_list_count = 0; 9247 } else if (bp->dev->flags & IFF_MULTICAST) { 9248 u32 mask = 0; 9249 9250 bnxt_mc_list_updated(bp, &mask); 9251 vnic->rx_mask |= mask; 9252 } 9253 9254 rc = bnxt_cfg_rx_mode(bp); 9255 if (rc) 9256 goto err_out; 9257 9258 skip_rx_mask: 9259 rc = bnxt_hwrm_set_coal(bp); 9260 if (rc) 9261 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 9262 rc); 9263 9264 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9265 rc = bnxt_setup_nitroa0_vnic(bp); 9266 if (rc) 9267 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 9268 rc); 9269 } 9270 9271 if (BNXT_VF(bp)) { 9272 bnxt_hwrm_func_qcfg(bp); 9273 netdev_update_features(bp->dev); 9274 } 9275 9276 return 0; 9277 9278 err_out: 9279 bnxt_hwrm_resource_free(bp, 0, true); 9280 9281 return rc; 9282 } 9283 9284 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 9285 { 9286 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 9287 return 0; 9288 } 9289 9290 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 9291 { 9292 bnxt_init_cp_rings(bp); 9293 bnxt_init_rx_rings(bp); 9294 bnxt_init_tx_rings(bp); 9295 bnxt_init_ring_grps(bp, irq_re_init); 9296 bnxt_init_vnics(bp); 9297 9298 return bnxt_init_chip(bp, irq_re_init); 9299 } 9300 9301 static int bnxt_set_real_num_queues(struct bnxt *bp) 9302 { 9303 int rc; 9304 struct net_device *dev = bp->dev; 9305 9306 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 9307 bp->tx_nr_rings_xdp); 9308 if (rc) 9309 return rc; 9310 9311 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 9312 if (rc) 9313 return rc; 9314 9315 #ifdef CONFIG_RFS_ACCEL 9316 if (bp->flags & BNXT_FLAG_RFS) 9317 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 9318 #endif 9319 9320 return rc; 9321 } 9322 9323 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 9324 bool shared) 9325 { 9326 int _rx = *rx, _tx = *tx; 9327 9328 if (shared) { 9329 *rx = min_t(int, _rx, max); 9330 *tx = min_t(int, _tx, max); 9331 } else { 9332 if (max < 2) 9333 return -ENOMEM; 9334 9335 while (_rx + _tx > max) { 9336 if (_rx > _tx && _rx > 1) 9337 _rx--; 9338 else if (_tx > 1) 9339 _tx--; 9340 } 9341 *rx = _rx; 9342 *tx = _tx; 9343 } 9344 return 0; 9345 } 9346 9347 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 9348 { 9349 return (tx - tx_xdp) / tx_sets + tx_xdp; 9350 } 9351 9352 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 9353 { 9354 int tcs = netdev_get_num_tc(bp->dev); 9355 9356 if (!tcs) 9357 tcs = 1; 9358 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 9359 } 9360 9361 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 9362 { 9363 int tcs = netdev_get_num_tc(bp->dev); 9364 9365 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 9366 bp->tx_nr_rings_xdp; 9367 } 9368 9369 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 9370 bool sh) 9371 { 9372 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 9373 9374 if (tx_cp != *tx) { 9375 int tx_saved = tx_cp, rc; 9376 9377 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 9378 if (rc) 9379 return rc; 9380 if (tx_cp != tx_saved) 9381 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 9382 return 0; 9383 } 9384 return __bnxt_trim_rings(bp, rx, tx, max, sh); 9385 } 9386 9387 static void bnxt_setup_msix(struct bnxt *bp) 9388 { 9389 const int len = sizeof(bp->irq_tbl[0].name); 9390 struct net_device *dev = bp->dev; 9391 int tcs, i; 9392 9393 tcs = netdev_get_num_tc(dev); 9394 if (tcs) { 9395 int i, off, count; 9396 9397 for (i = 0; i < tcs; i++) { 9398 count = bp->tx_nr_rings_per_tc; 9399 off = BNXT_TC_TO_RING_BASE(bp, i); 9400 netdev_set_tc_queue(dev, i, count, off); 9401 } 9402 } 9403 9404 for (i = 0; i < bp->cp_nr_rings; i++) { 9405 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 9406 char *attr; 9407 9408 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 9409 attr = "TxRx"; 9410 else if (i < bp->rx_nr_rings) 9411 attr = "rx"; 9412 else 9413 attr = "tx"; 9414 9415 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 9416 attr, i); 9417 bp->irq_tbl[map_idx].handler = bnxt_msix; 9418 } 9419 } 9420 9421 static void bnxt_setup_inta(struct bnxt *bp) 9422 { 9423 const int len = sizeof(bp->irq_tbl[0].name); 9424 9425 if (netdev_get_num_tc(bp->dev)) 9426 netdev_reset_tc(bp->dev); 9427 9428 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 9429 0); 9430 bp->irq_tbl[0].handler = bnxt_inta; 9431 } 9432 9433 static int bnxt_init_int_mode(struct bnxt *bp); 9434 9435 static int bnxt_setup_int_mode(struct bnxt *bp) 9436 { 9437 int rc; 9438 9439 if (!bp->irq_tbl) { 9440 rc = bnxt_init_int_mode(bp); 9441 if (rc || !bp->irq_tbl) 9442 return rc ?: -ENODEV; 9443 } 9444 9445 if (bp->flags & BNXT_FLAG_USING_MSIX) 9446 bnxt_setup_msix(bp); 9447 else 9448 bnxt_setup_inta(bp); 9449 9450 rc = bnxt_set_real_num_queues(bp); 9451 return rc; 9452 } 9453 9454 #ifdef CONFIG_RFS_ACCEL 9455 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 9456 { 9457 return bp->hw_resc.max_rsscos_ctxs; 9458 } 9459 9460 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 9461 { 9462 return bp->hw_resc.max_vnics; 9463 } 9464 #endif 9465 9466 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 9467 { 9468 return bp->hw_resc.max_stat_ctxs; 9469 } 9470 9471 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 9472 { 9473 return bp->hw_resc.max_cp_rings; 9474 } 9475 9476 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 9477 { 9478 unsigned int cp = bp->hw_resc.max_cp_rings; 9479 9480 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 9481 cp -= bnxt_get_ulp_msix_num(bp); 9482 9483 return cp; 9484 } 9485 9486 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 9487 { 9488 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9489 9490 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9491 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 9492 9493 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 9494 } 9495 9496 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 9497 { 9498 bp->hw_resc.max_irqs = max_irqs; 9499 } 9500 9501 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 9502 { 9503 unsigned int cp; 9504 9505 cp = bnxt_get_max_func_cp_rings_for_en(bp); 9506 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9507 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 9508 else 9509 return cp - bp->cp_nr_rings; 9510 } 9511 9512 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 9513 { 9514 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 9515 } 9516 9517 int bnxt_get_avail_msix(struct bnxt *bp, int num) 9518 { 9519 int max_cp = bnxt_get_max_func_cp_rings(bp); 9520 int max_irq = bnxt_get_max_func_irqs(bp); 9521 int total_req = bp->cp_nr_rings + num; 9522 int max_idx, avail_msix; 9523 9524 max_idx = bp->total_irqs; 9525 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 9526 max_idx = min_t(int, bp->total_irqs, max_cp); 9527 avail_msix = max_idx - bp->cp_nr_rings; 9528 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 9529 return avail_msix; 9530 9531 if (max_irq < total_req) { 9532 num = max_irq - bp->cp_nr_rings; 9533 if (num <= 0) 9534 return 0; 9535 } 9536 return num; 9537 } 9538 9539 static int bnxt_get_num_msix(struct bnxt *bp) 9540 { 9541 if (!BNXT_NEW_RM(bp)) 9542 return bnxt_get_max_func_irqs(bp); 9543 9544 return bnxt_nq_rings_in_use(bp); 9545 } 9546 9547 static int bnxt_init_msix(struct bnxt *bp) 9548 { 9549 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp; 9550 struct msix_entry *msix_ent; 9551 9552 total_vecs = bnxt_get_num_msix(bp); 9553 max = bnxt_get_max_func_irqs(bp); 9554 if (total_vecs > max) 9555 total_vecs = max; 9556 9557 if (!total_vecs) 9558 return 0; 9559 9560 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 9561 if (!msix_ent) 9562 return -ENOMEM; 9563 9564 for (i = 0; i < total_vecs; i++) { 9565 msix_ent[i].entry = i; 9566 msix_ent[i].vector = 0; 9567 } 9568 9569 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 9570 min = 2; 9571 9572 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 9573 ulp_msix = bnxt_get_ulp_msix_num(bp); 9574 if (total_vecs < 0 || total_vecs < ulp_msix) { 9575 rc = -ENODEV; 9576 goto msix_setup_exit; 9577 } 9578 9579 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 9580 if (bp->irq_tbl) { 9581 for (i = 0; i < total_vecs; i++) 9582 bp->irq_tbl[i].vector = msix_ent[i].vector; 9583 9584 bp->total_irqs = total_vecs; 9585 /* Trim rings based upon num of vectors allocated */ 9586 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 9587 total_vecs - ulp_msix, min == 1); 9588 if (rc) 9589 goto msix_setup_exit; 9590 9591 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 9592 bp->cp_nr_rings = (min == 1) ? 9593 max_t(int, tx_cp, bp->rx_nr_rings) : 9594 tx_cp + bp->rx_nr_rings; 9595 9596 } else { 9597 rc = -ENOMEM; 9598 goto msix_setup_exit; 9599 } 9600 bp->flags |= BNXT_FLAG_USING_MSIX; 9601 kfree(msix_ent); 9602 return 0; 9603 9604 msix_setup_exit: 9605 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 9606 kfree(bp->irq_tbl); 9607 bp->irq_tbl = NULL; 9608 pci_disable_msix(bp->pdev); 9609 kfree(msix_ent); 9610 return rc; 9611 } 9612 9613 static int bnxt_init_inta(struct bnxt *bp) 9614 { 9615 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL); 9616 if (!bp->irq_tbl) 9617 return -ENOMEM; 9618 9619 bp->total_irqs = 1; 9620 bp->rx_nr_rings = 1; 9621 bp->tx_nr_rings = 1; 9622 bp->cp_nr_rings = 1; 9623 bp->flags |= BNXT_FLAG_SHARED_RINGS; 9624 bp->irq_tbl[0].vector = bp->pdev->irq; 9625 return 0; 9626 } 9627 9628 static int bnxt_init_int_mode(struct bnxt *bp) 9629 { 9630 int rc = -ENODEV; 9631 9632 if (bp->flags & BNXT_FLAG_MSIX_CAP) 9633 rc = bnxt_init_msix(bp); 9634 9635 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 9636 /* fallback to INTA */ 9637 rc = bnxt_init_inta(bp); 9638 } 9639 return rc; 9640 } 9641 9642 static void bnxt_clear_int_mode(struct bnxt *bp) 9643 { 9644 if (bp->flags & BNXT_FLAG_USING_MSIX) 9645 pci_disable_msix(bp->pdev); 9646 9647 kfree(bp->irq_tbl); 9648 bp->irq_tbl = NULL; 9649 bp->flags &= ~BNXT_FLAG_USING_MSIX; 9650 } 9651 9652 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 9653 { 9654 int tcs = netdev_get_num_tc(bp->dev); 9655 bool irq_cleared = false; 9656 int rc; 9657 9658 if (!bnxt_need_reserve_rings(bp)) 9659 return 0; 9660 9661 if (irq_re_init && BNXT_NEW_RM(bp) && 9662 bnxt_get_num_msix(bp) != bp->total_irqs) { 9663 bnxt_ulp_irq_stop(bp); 9664 bnxt_clear_int_mode(bp); 9665 irq_cleared = true; 9666 } 9667 rc = __bnxt_reserve_rings(bp); 9668 if (irq_cleared) { 9669 if (!rc) 9670 rc = bnxt_init_int_mode(bp); 9671 bnxt_ulp_irq_restart(bp, rc); 9672 } 9673 if (rc) { 9674 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 9675 return rc; 9676 } 9677 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 9678 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 9679 netdev_err(bp->dev, "tx ring reservation failure\n"); 9680 netdev_reset_tc(bp->dev); 9681 if (bp->tx_nr_rings_xdp) 9682 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 9683 else 9684 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 9685 return -ENOMEM; 9686 } 9687 return 0; 9688 } 9689 9690 static void bnxt_free_irq(struct bnxt *bp) 9691 { 9692 struct bnxt_irq *irq; 9693 int i; 9694 9695 #ifdef CONFIG_RFS_ACCEL 9696 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 9697 bp->dev->rx_cpu_rmap = NULL; 9698 #endif 9699 if (!bp->irq_tbl || !bp->bnapi) 9700 return; 9701 9702 for (i = 0; i < bp->cp_nr_rings; i++) { 9703 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 9704 9705 irq = &bp->irq_tbl[map_idx]; 9706 if (irq->requested) { 9707 if (irq->have_cpumask) { 9708 irq_set_affinity_hint(irq->vector, NULL); 9709 free_cpumask_var(irq->cpu_mask); 9710 irq->have_cpumask = 0; 9711 } 9712 free_irq(irq->vector, bp->bnapi[i]); 9713 } 9714 9715 irq->requested = 0; 9716 } 9717 } 9718 9719 static int bnxt_request_irq(struct bnxt *bp) 9720 { 9721 int i, j, rc = 0; 9722 unsigned long flags = 0; 9723 #ifdef CONFIG_RFS_ACCEL 9724 struct cpu_rmap *rmap; 9725 #endif 9726 9727 rc = bnxt_setup_int_mode(bp); 9728 if (rc) { 9729 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 9730 rc); 9731 return rc; 9732 } 9733 #ifdef CONFIG_RFS_ACCEL 9734 rmap = bp->dev->rx_cpu_rmap; 9735 #endif 9736 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 9737 flags = IRQF_SHARED; 9738 9739 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 9740 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 9741 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 9742 9743 #ifdef CONFIG_RFS_ACCEL 9744 if (rmap && bp->bnapi[i]->rx_ring) { 9745 rc = irq_cpu_rmap_add(rmap, irq->vector); 9746 if (rc) 9747 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 9748 j); 9749 j++; 9750 } 9751 #endif 9752 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 9753 bp->bnapi[i]); 9754 if (rc) 9755 break; 9756 9757 irq->requested = 1; 9758 9759 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 9760 int numa_node = dev_to_node(&bp->pdev->dev); 9761 9762 irq->have_cpumask = 1; 9763 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 9764 irq->cpu_mask); 9765 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 9766 if (rc) { 9767 netdev_warn(bp->dev, 9768 "Set affinity failed, IRQ = %d\n", 9769 irq->vector); 9770 break; 9771 } 9772 } 9773 } 9774 return rc; 9775 } 9776 9777 static void bnxt_del_napi(struct bnxt *bp) 9778 { 9779 int i; 9780 9781 if (!bp->bnapi) 9782 return; 9783 9784 for (i = 0; i < bp->cp_nr_rings; i++) { 9785 struct bnxt_napi *bnapi = bp->bnapi[i]; 9786 9787 __netif_napi_del(&bnapi->napi); 9788 } 9789 /* We called __netif_napi_del(), we need 9790 * to respect an RCU grace period before freeing napi structures. 9791 */ 9792 synchronize_net(); 9793 } 9794 9795 static void bnxt_init_napi(struct bnxt *bp) 9796 { 9797 int i; 9798 unsigned int cp_nr_rings = bp->cp_nr_rings; 9799 struct bnxt_napi *bnapi; 9800 9801 if (bp->flags & BNXT_FLAG_USING_MSIX) { 9802 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 9803 9804 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9805 poll_fn = bnxt_poll_p5; 9806 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 9807 cp_nr_rings--; 9808 for (i = 0; i < cp_nr_rings; i++) { 9809 bnapi = bp->bnapi[i]; 9810 netif_napi_add(bp->dev, &bnapi->napi, poll_fn); 9811 } 9812 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9813 bnapi = bp->bnapi[cp_nr_rings]; 9814 netif_napi_add(bp->dev, &bnapi->napi, 9815 bnxt_poll_nitroa0); 9816 } 9817 } else { 9818 bnapi = bp->bnapi[0]; 9819 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll); 9820 } 9821 } 9822 9823 static void bnxt_disable_napi(struct bnxt *bp) 9824 { 9825 int i; 9826 9827 if (!bp->bnapi || 9828 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 9829 return; 9830 9831 for (i = 0; i < bp->cp_nr_rings; i++) { 9832 struct bnxt_napi *bnapi = bp->bnapi[i]; 9833 struct bnxt_cp_ring_info *cpr; 9834 9835 cpr = &bnapi->cp_ring; 9836 if (bnapi->tx_fault) 9837 cpr->sw_stats.tx.tx_resets++; 9838 if (bnapi->in_reset) 9839 cpr->sw_stats.rx.rx_resets++; 9840 napi_disable(&bnapi->napi); 9841 if (bnapi->rx_ring) 9842 cancel_work_sync(&cpr->dim.work); 9843 } 9844 } 9845 9846 static void bnxt_enable_napi(struct bnxt *bp) 9847 { 9848 int i; 9849 9850 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 9851 for (i = 0; i < bp->cp_nr_rings; i++) { 9852 struct bnxt_napi *bnapi = bp->bnapi[i]; 9853 struct bnxt_cp_ring_info *cpr; 9854 9855 bnapi->tx_fault = 0; 9856 9857 cpr = &bnapi->cp_ring; 9858 bnapi->in_reset = false; 9859 9860 if (bnapi->rx_ring) { 9861 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 9862 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 9863 } 9864 napi_enable(&bnapi->napi); 9865 } 9866 } 9867 9868 void bnxt_tx_disable(struct bnxt *bp) 9869 { 9870 int i; 9871 struct bnxt_tx_ring_info *txr; 9872 9873 if (bp->tx_ring) { 9874 for (i = 0; i < bp->tx_nr_rings; i++) { 9875 txr = &bp->tx_ring[i]; 9876 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 9877 } 9878 } 9879 /* Make sure napi polls see @dev_state change */ 9880 synchronize_net(); 9881 /* Drop carrier first to prevent TX timeout */ 9882 netif_carrier_off(bp->dev); 9883 /* Stop all TX queues */ 9884 netif_tx_disable(bp->dev); 9885 } 9886 9887 void bnxt_tx_enable(struct bnxt *bp) 9888 { 9889 int i; 9890 struct bnxt_tx_ring_info *txr; 9891 9892 for (i = 0; i < bp->tx_nr_rings; i++) { 9893 txr = &bp->tx_ring[i]; 9894 WRITE_ONCE(txr->dev_state, 0); 9895 } 9896 /* Make sure napi polls see @dev_state change */ 9897 synchronize_net(); 9898 netif_tx_wake_all_queues(bp->dev); 9899 if (BNXT_LINK_IS_UP(bp)) 9900 netif_carrier_on(bp->dev); 9901 } 9902 9903 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 9904 { 9905 u8 active_fec = link_info->active_fec_sig_mode & 9906 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 9907 9908 switch (active_fec) { 9909 default: 9910 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 9911 return "None"; 9912 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 9913 return "Clause 74 BaseR"; 9914 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 9915 return "Clause 91 RS(528,514)"; 9916 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 9917 return "Clause 91 RS544_1XN"; 9918 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 9919 return "Clause 91 RS(544,514)"; 9920 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 9921 return "Clause 91 RS272_1XN"; 9922 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 9923 return "Clause 91 RS(272,257)"; 9924 } 9925 } 9926 9927 void bnxt_report_link(struct bnxt *bp) 9928 { 9929 if (BNXT_LINK_IS_UP(bp)) { 9930 const char *signal = ""; 9931 const char *flow_ctrl; 9932 const char *duplex; 9933 u32 speed; 9934 u16 fec; 9935 9936 netif_carrier_on(bp->dev); 9937 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 9938 if (speed == SPEED_UNKNOWN) { 9939 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 9940 return; 9941 } 9942 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 9943 duplex = "full"; 9944 else 9945 duplex = "half"; 9946 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 9947 flow_ctrl = "ON - receive & transmit"; 9948 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 9949 flow_ctrl = "ON - transmit"; 9950 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 9951 flow_ctrl = "ON - receive"; 9952 else 9953 flow_ctrl = "none"; 9954 if (bp->link_info.phy_qcfg_resp.option_flags & 9955 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 9956 u8 sig_mode = bp->link_info.active_fec_sig_mode & 9957 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 9958 switch (sig_mode) { 9959 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 9960 signal = "(NRZ) "; 9961 break; 9962 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 9963 signal = "(PAM4) "; 9964 break; 9965 default: 9966 break; 9967 } 9968 } 9969 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 9970 speed, signal, duplex, flow_ctrl); 9971 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 9972 netdev_info(bp->dev, "EEE is %s\n", 9973 bp->eee.eee_active ? "active" : 9974 "not active"); 9975 fec = bp->link_info.fec_cfg; 9976 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 9977 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 9978 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 9979 bnxt_report_fec(&bp->link_info)); 9980 } else { 9981 netif_carrier_off(bp->dev); 9982 netdev_err(bp->dev, "NIC Link is Down\n"); 9983 } 9984 } 9985 9986 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 9987 { 9988 if (!resp->supported_speeds_auto_mode && 9989 !resp->supported_speeds_force_mode && 9990 !resp->supported_pam4_speeds_auto_mode && 9991 !resp->supported_pam4_speeds_force_mode) 9992 return true; 9993 return false; 9994 } 9995 9996 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 9997 { 9998 struct bnxt_link_info *link_info = &bp->link_info; 9999 struct hwrm_port_phy_qcaps_output *resp; 10000 struct hwrm_port_phy_qcaps_input *req; 10001 int rc = 0; 10002 10003 if (bp->hwrm_spec_code < 0x10201) 10004 return 0; 10005 10006 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 10007 if (rc) 10008 return rc; 10009 10010 resp = hwrm_req_hold(bp, req); 10011 rc = hwrm_req_send(bp, req); 10012 if (rc) 10013 goto hwrm_phy_qcaps_exit; 10014 10015 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 10016 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 10017 struct ethtool_eee *eee = &bp->eee; 10018 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 10019 10020 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10021 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 10022 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 10023 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 10024 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 10025 } 10026 10027 if (bp->hwrm_spec_code >= 0x10a01) { 10028 if (bnxt_phy_qcaps_no_speed(resp)) { 10029 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 10030 netdev_warn(bp->dev, "Ethernet link disabled\n"); 10031 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 10032 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 10033 netdev_info(bp->dev, "Ethernet link enabled\n"); 10034 /* Phy re-enabled, reprobe the speeds */ 10035 link_info->support_auto_speeds = 0; 10036 link_info->support_pam4_auto_speeds = 0; 10037 } 10038 } 10039 if (resp->supported_speeds_auto_mode) 10040 link_info->support_auto_speeds = 10041 le16_to_cpu(resp->supported_speeds_auto_mode); 10042 if (resp->supported_pam4_speeds_auto_mode) 10043 link_info->support_pam4_auto_speeds = 10044 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 10045 10046 bp->port_count = resp->port_cnt; 10047 10048 hwrm_phy_qcaps_exit: 10049 hwrm_req_drop(bp, req); 10050 return rc; 10051 } 10052 10053 static bool bnxt_support_dropped(u16 advertising, u16 supported) 10054 { 10055 u16 diff = advertising ^ supported; 10056 10057 return ((supported | diff) != supported); 10058 } 10059 10060 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 10061 { 10062 /* Check if any advertised speeds are no longer supported. The caller 10063 * holds the link_lock mutex, so we can modify link_info settings. 10064 */ 10065 if (bnxt_support_dropped(link_info->advertising, 10066 link_info->support_auto_speeds)) { 10067 link_info->advertising = link_info->support_auto_speeds; 10068 return true; 10069 } 10070 if (bnxt_support_dropped(link_info->advertising_pam4, 10071 link_info->support_pam4_auto_speeds)) { 10072 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 10073 return true; 10074 } 10075 return false; 10076 } 10077 10078 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 10079 { 10080 struct bnxt_link_info *link_info = &bp->link_info; 10081 struct hwrm_port_phy_qcfg_output *resp; 10082 struct hwrm_port_phy_qcfg_input *req; 10083 u8 link_state = link_info->link_state; 10084 bool support_changed; 10085 int rc; 10086 10087 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 10088 if (rc) 10089 return rc; 10090 10091 resp = hwrm_req_hold(bp, req); 10092 rc = hwrm_req_send(bp, req); 10093 if (rc) { 10094 hwrm_req_drop(bp, req); 10095 if (BNXT_VF(bp) && rc == -ENODEV) { 10096 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 10097 rc = 0; 10098 } 10099 return rc; 10100 } 10101 10102 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 10103 link_info->phy_link_status = resp->link; 10104 link_info->duplex = resp->duplex_cfg; 10105 if (bp->hwrm_spec_code >= 0x10800) 10106 link_info->duplex = resp->duplex_state; 10107 link_info->pause = resp->pause; 10108 link_info->auto_mode = resp->auto_mode; 10109 link_info->auto_pause_setting = resp->auto_pause; 10110 link_info->lp_pause = resp->link_partner_adv_pause; 10111 link_info->force_pause_setting = resp->force_pause; 10112 link_info->duplex_setting = resp->duplex_cfg; 10113 if (link_info->phy_link_status == BNXT_LINK_LINK) 10114 link_info->link_speed = le16_to_cpu(resp->link_speed); 10115 else 10116 link_info->link_speed = 0; 10117 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 10118 link_info->force_pam4_link_speed = 10119 le16_to_cpu(resp->force_pam4_link_speed); 10120 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 10121 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 10122 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 10123 link_info->auto_pam4_link_speeds = 10124 le16_to_cpu(resp->auto_pam4_link_speed_mask); 10125 link_info->lp_auto_link_speeds = 10126 le16_to_cpu(resp->link_partner_adv_speeds); 10127 link_info->lp_auto_pam4_link_speeds = 10128 resp->link_partner_pam4_adv_speeds; 10129 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 10130 link_info->phy_ver[0] = resp->phy_maj; 10131 link_info->phy_ver[1] = resp->phy_min; 10132 link_info->phy_ver[2] = resp->phy_bld; 10133 link_info->media_type = resp->media_type; 10134 link_info->phy_type = resp->phy_type; 10135 link_info->transceiver = resp->xcvr_pkg_type; 10136 link_info->phy_addr = resp->eee_config_phy_addr & 10137 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 10138 link_info->module_status = resp->module_status; 10139 10140 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 10141 struct ethtool_eee *eee = &bp->eee; 10142 u16 fw_speeds; 10143 10144 eee->eee_active = 0; 10145 if (resp->eee_config_phy_addr & 10146 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 10147 eee->eee_active = 1; 10148 fw_speeds = le16_to_cpu( 10149 resp->link_partner_adv_eee_link_speed_mask); 10150 eee->lp_advertised = 10151 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10152 } 10153 10154 /* Pull initial EEE config */ 10155 if (!chng_link_state) { 10156 if (resp->eee_config_phy_addr & 10157 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 10158 eee->eee_enabled = 1; 10159 10160 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 10161 eee->advertised = 10162 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 10163 10164 if (resp->eee_config_phy_addr & 10165 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 10166 __le32 tmr; 10167 10168 eee->tx_lpi_enabled = 1; 10169 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 10170 eee->tx_lpi_timer = le32_to_cpu(tmr) & 10171 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 10172 } 10173 } 10174 } 10175 10176 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 10177 if (bp->hwrm_spec_code >= 0x10504) { 10178 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 10179 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 10180 } 10181 /* TODO: need to add more logic to report VF link */ 10182 if (chng_link_state) { 10183 if (link_info->phy_link_status == BNXT_LINK_LINK) 10184 link_info->link_state = BNXT_LINK_STATE_UP; 10185 else 10186 link_info->link_state = BNXT_LINK_STATE_DOWN; 10187 if (link_state != link_info->link_state) 10188 bnxt_report_link(bp); 10189 } else { 10190 /* always link down if not require to update link state */ 10191 link_info->link_state = BNXT_LINK_STATE_DOWN; 10192 } 10193 hwrm_req_drop(bp, req); 10194 10195 if (!BNXT_PHY_CFG_ABLE(bp)) 10196 return 0; 10197 10198 support_changed = bnxt_support_speed_dropped(link_info); 10199 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 10200 bnxt_hwrm_set_link_setting(bp, true, false); 10201 return 0; 10202 } 10203 10204 static void bnxt_get_port_module_status(struct bnxt *bp) 10205 { 10206 struct bnxt_link_info *link_info = &bp->link_info; 10207 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 10208 u8 module_status; 10209 10210 if (bnxt_update_link(bp, true)) 10211 return; 10212 10213 module_status = link_info->module_status; 10214 switch (module_status) { 10215 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 10216 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 10217 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 10218 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 10219 bp->pf.port_id); 10220 if (bp->hwrm_spec_code >= 0x10201) { 10221 netdev_warn(bp->dev, "Module part number %s\n", 10222 resp->phy_vendor_partnumber); 10223 } 10224 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 10225 netdev_warn(bp->dev, "TX is disabled\n"); 10226 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 10227 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 10228 } 10229 } 10230 10231 static void 10232 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 10233 { 10234 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 10235 if (bp->hwrm_spec_code >= 0x10201) 10236 req->auto_pause = 10237 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 10238 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 10239 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 10240 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 10241 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 10242 req->enables |= 10243 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 10244 } else { 10245 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 10246 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 10247 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 10248 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 10249 req->enables |= 10250 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 10251 if (bp->hwrm_spec_code >= 0x10201) { 10252 req->auto_pause = req->force_pause; 10253 req->enables |= cpu_to_le32( 10254 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 10255 } 10256 } 10257 } 10258 10259 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 10260 { 10261 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 10262 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 10263 if (bp->link_info.advertising) { 10264 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 10265 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 10266 } 10267 if (bp->link_info.advertising_pam4) { 10268 req->enables |= 10269 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 10270 req->auto_link_pam4_speed_mask = 10271 cpu_to_le16(bp->link_info.advertising_pam4); 10272 } 10273 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 10274 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 10275 } else { 10276 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 10277 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 10278 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 10279 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 10280 } else { 10281 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 10282 } 10283 } 10284 10285 /* tell chimp that the setting takes effect immediately */ 10286 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 10287 } 10288 10289 int bnxt_hwrm_set_pause(struct bnxt *bp) 10290 { 10291 struct hwrm_port_phy_cfg_input *req; 10292 int rc; 10293 10294 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10295 if (rc) 10296 return rc; 10297 10298 bnxt_hwrm_set_pause_common(bp, req); 10299 10300 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 10301 bp->link_info.force_link_chng) 10302 bnxt_hwrm_set_link_common(bp, req); 10303 10304 rc = hwrm_req_send(bp, req); 10305 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 10306 /* since changing of pause setting doesn't trigger any link 10307 * change event, the driver needs to update the current pause 10308 * result upon successfully return of the phy_cfg command 10309 */ 10310 bp->link_info.pause = 10311 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 10312 bp->link_info.auto_pause_setting = 0; 10313 if (!bp->link_info.force_link_chng) 10314 bnxt_report_link(bp); 10315 } 10316 bp->link_info.force_link_chng = false; 10317 return rc; 10318 } 10319 10320 static void bnxt_hwrm_set_eee(struct bnxt *bp, 10321 struct hwrm_port_phy_cfg_input *req) 10322 { 10323 struct ethtool_eee *eee = &bp->eee; 10324 10325 if (eee->eee_enabled) { 10326 u16 eee_speeds; 10327 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 10328 10329 if (eee->tx_lpi_enabled) 10330 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 10331 else 10332 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 10333 10334 req->flags |= cpu_to_le32(flags); 10335 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 10336 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 10337 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 10338 } else { 10339 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 10340 } 10341 } 10342 10343 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 10344 { 10345 struct hwrm_port_phy_cfg_input *req; 10346 int rc; 10347 10348 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10349 if (rc) 10350 return rc; 10351 10352 if (set_pause) 10353 bnxt_hwrm_set_pause_common(bp, req); 10354 10355 bnxt_hwrm_set_link_common(bp, req); 10356 10357 if (set_eee) 10358 bnxt_hwrm_set_eee(bp, req); 10359 return hwrm_req_send(bp, req); 10360 } 10361 10362 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 10363 { 10364 struct hwrm_port_phy_cfg_input *req; 10365 int rc; 10366 10367 if (!BNXT_SINGLE_PF(bp)) 10368 return 0; 10369 10370 if (pci_num_vf(bp->pdev) && 10371 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 10372 return 0; 10373 10374 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 10375 if (rc) 10376 return rc; 10377 10378 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 10379 rc = hwrm_req_send(bp, req); 10380 if (!rc) { 10381 mutex_lock(&bp->link_lock); 10382 /* Device is not obliged link down in certain scenarios, even 10383 * when forced. Setting the state unknown is consistent with 10384 * driver startup and will force link state to be reported 10385 * during subsequent open based on PORT_PHY_QCFG. 10386 */ 10387 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 10388 mutex_unlock(&bp->link_lock); 10389 } 10390 return rc; 10391 } 10392 10393 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 10394 { 10395 #ifdef CONFIG_TEE_BNXT_FW 10396 int rc = tee_bnxt_fw_load(); 10397 10398 if (rc) 10399 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 10400 10401 return rc; 10402 #else 10403 netdev_err(bp->dev, "OP-TEE not supported\n"); 10404 return -ENODEV; 10405 #endif 10406 } 10407 10408 static int bnxt_try_recover_fw(struct bnxt *bp) 10409 { 10410 if (bp->fw_health && bp->fw_health->status_reliable) { 10411 int retry = 0, rc; 10412 u32 sts; 10413 10414 do { 10415 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 10416 rc = bnxt_hwrm_poll(bp); 10417 if (!BNXT_FW_IS_BOOTING(sts) && 10418 !BNXT_FW_IS_RECOVERING(sts)) 10419 break; 10420 retry++; 10421 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 10422 10423 if (!BNXT_FW_IS_HEALTHY(sts)) { 10424 netdev_err(bp->dev, 10425 "Firmware not responding, status: 0x%x\n", 10426 sts); 10427 rc = -ENODEV; 10428 } 10429 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 10430 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 10431 return bnxt_fw_reset_via_optee(bp); 10432 } 10433 return rc; 10434 } 10435 10436 return -ENODEV; 10437 } 10438 10439 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 10440 { 10441 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 10442 10443 if (!BNXT_NEW_RM(bp)) 10444 return; /* no resource reservations required */ 10445 10446 hw_resc->resv_cp_rings = 0; 10447 hw_resc->resv_stat_ctxs = 0; 10448 hw_resc->resv_irqs = 0; 10449 hw_resc->resv_tx_rings = 0; 10450 hw_resc->resv_rx_rings = 0; 10451 hw_resc->resv_hw_ring_grps = 0; 10452 hw_resc->resv_vnics = 0; 10453 if (!fw_reset) { 10454 bp->tx_nr_rings = 0; 10455 bp->rx_nr_rings = 0; 10456 } 10457 } 10458 10459 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 10460 { 10461 int rc; 10462 10463 if (!BNXT_NEW_RM(bp)) 10464 return 0; /* no resource reservations required */ 10465 10466 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 10467 if (rc) 10468 netdev_err(bp->dev, "resc_qcaps failed\n"); 10469 10470 bnxt_clear_reservations(bp, fw_reset); 10471 10472 return rc; 10473 } 10474 10475 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 10476 { 10477 struct hwrm_func_drv_if_change_output *resp; 10478 struct hwrm_func_drv_if_change_input *req; 10479 bool fw_reset = !bp->irq_tbl; 10480 bool resc_reinit = false; 10481 int rc, retry = 0; 10482 u32 flags = 0; 10483 10484 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 10485 return 0; 10486 10487 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 10488 if (rc) 10489 return rc; 10490 10491 if (up) 10492 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 10493 resp = hwrm_req_hold(bp, req); 10494 10495 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 10496 while (retry < BNXT_FW_IF_RETRY) { 10497 rc = hwrm_req_send(bp, req); 10498 if (rc != -EAGAIN) 10499 break; 10500 10501 msleep(50); 10502 retry++; 10503 } 10504 10505 if (rc == -EAGAIN) { 10506 hwrm_req_drop(bp, req); 10507 return rc; 10508 } else if (!rc) { 10509 flags = le32_to_cpu(resp->flags); 10510 } else if (up) { 10511 rc = bnxt_try_recover_fw(bp); 10512 fw_reset = true; 10513 } 10514 hwrm_req_drop(bp, req); 10515 if (rc) 10516 return rc; 10517 10518 if (!up) { 10519 bnxt_inv_fw_health_reg(bp); 10520 return 0; 10521 } 10522 10523 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 10524 resc_reinit = true; 10525 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 10526 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 10527 fw_reset = true; 10528 else 10529 bnxt_remap_fw_health_regs(bp); 10530 10531 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 10532 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 10533 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 10534 return -ENODEV; 10535 } 10536 if (resc_reinit || fw_reset) { 10537 if (fw_reset) { 10538 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 10539 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10540 bnxt_ulp_stop(bp); 10541 bnxt_free_ctx_mem(bp); 10542 bnxt_dcb_free(bp); 10543 rc = bnxt_fw_init_one(bp); 10544 if (rc) { 10545 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 10546 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 10547 return rc; 10548 } 10549 bnxt_clear_int_mode(bp); 10550 rc = bnxt_init_int_mode(bp); 10551 if (rc) { 10552 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 10553 netdev_err(bp->dev, "init int mode failed\n"); 10554 return rc; 10555 } 10556 } 10557 rc = bnxt_cancel_reservations(bp, fw_reset); 10558 } 10559 return rc; 10560 } 10561 10562 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 10563 { 10564 struct hwrm_port_led_qcaps_output *resp; 10565 struct hwrm_port_led_qcaps_input *req; 10566 struct bnxt_pf_info *pf = &bp->pf; 10567 int rc; 10568 10569 bp->num_leds = 0; 10570 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 10571 return 0; 10572 10573 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 10574 if (rc) 10575 return rc; 10576 10577 req->port_id = cpu_to_le16(pf->port_id); 10578 resp = hwrm_req_hold(bp, req); 10579 rc = hwrm_req_send(bp, req); 10580 if (rc) { 10581 hwrm_req_drop(bp, req); 10582 return rc; 10583 } 10584 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 10585 int i; 10586 10587 bp->num_leds = resp->num_leds; 10588 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 10589 bp->num_leds); 10590 for (i = 0; i < bp->num_leds; i++) { 10591 struct bnxt_led_info *led = &bp->leds[i]; 10592 __le16 caps = led->led_state_caps; 10593 10594 if (!led->led_group_id || 10595 !BNXT_LED_ALT_BLINK_CAP(caps)) { 10596 bp->num_leds = 0; 10597 break; 10598 } 10599 } 10600 } 10601 hwrm_req_drop(bp, req); 10602 return 0; 10603 } 10604 10605 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 10606 { 10607 struct hwrm_wol_filter_alloc_output *resp; 10608 struct hwrm_wol_filter_alloc_input *req; 10609 int rc; 10610 10611 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 10612 if (rc) 10613 return rc; 10614 10615 req->port_id = cpu_to_le16(bp->pf.port_id); 10616 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 10617 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 10618 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 10619 10620 resp = hwrm_req_hold(bp, req); 10621 rc = hwrm_req_send(bp, req); 10622 if (!rc) 10623 bp->wol_filter_id = resp->wol_filter_id; 10624 hwrm_req_drop(bp, req); 10625 return rc; 10626 } 10627 10628 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 10629 { 10630 struct hwrm_wol_filter_free_input *req; 10631 int rc; 10632 10633 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 10634 if (rc) 10635 return rc; 10636 10637 req->port_id = cpu_to_le16(bp->pf.port_id); 10638 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 10639 req->wol_filter_id = bp->wol_filter_id; 10640 10641 return hwrm_req_send(bp, req); 10642 } 10643 10644 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 10645 { 10646 struct hwrm_wol_filter_qcfg_output *resp; 10647 struct hwrm_wol_filter_qcfg_input *req; 10648 u16 next_handle = 0; 10649 int rc; 10650 10651 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 10652 if (rc) 10653 return rc; 10654 10655 req->port_id = cpu_to_le16(bp->pf.port_id); 10656 req->handle = cpu_to_le16(handle); 10657 resp = hwrm_req_hold(bp, req); 10658 rc = hwrm_req_send(bp, req); 10659 if (!rc) { 10660 next_handle = le16_to_cpu(resp->next_handle); 10661 if (next_handle != 0) { 10662 if (resp->wol_type == 10663 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 10664 bp->wol = 1; 10665 bp->wol_filter_id = resp->wol_filter_id; 10666 } 10667 } 10668 } 10669 hwrm_req_drop(bp, req); 10670 return next_handle; 10671 } 10672 10673 static void bnxt_get_wol_settings(struct bnxt *bp) 10674 { 10675 u16 handle = 0; 10676 10677 bp->wol = 0; 10678 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 10679 return; 10680 10681 do { 10682 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 10683 } while (handle && handle != 0xffff); 10684 } 10685 10686 static bool bnxt_eee_config_ok(struct bnxt *bp) 10687 { 10688 struct ethtool_eee *eee = &bp->eee; 10689 struct bnxt_link_info *link_info = &bp->link_info; 10690 10691 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 10692 return true; 10693 10694 if (eee->eee_enabled) { 10695 u32 advertising = 10696 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 10697 10698 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 10699 eee->eee_enabled = 0; 10700 return false; 10701 } 10702 if (eee->advertised & ~advertising) { 10703 eee->advertised = advertising & eee->supported; 10704 return false; 10705 } 10706 } 10707 return true; 10708 } 10709 10710 static int bnxt_update_phy_setting(struct bnxt *bp) 10711 { 10712 int rc; 10713 bool update_link = false; 10714 bool update_pause = false; 10715 bool update_eee = false; 10716 struct bnxt_link_info *link_info = &bp->link_info; 10717 10718 rc = bnxt_update_link(bp, true); 10719 if (rc) { 10720 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 10721 rc); 10722 return rc; 10723 } 10724 if (!BNXT_SINGLE_PF(bp)) 10725 return 0; 10726 10727 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 10728 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 10729 link_info->req_flow_ctrl) 10730 update_pause = true; 10731 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 10732 link_info->force_pause_setting != link_info->req_flow_ctrl) 10733 update_pause = true; 10734 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 10735 if (BNXT_AUTO_MODE(link_info->auto_mode)) 10736 update_link = true; 10737 if (bnxt_force_speed_updated(link_info)) 10738 update_link = true; 10739 if (link_info->req_duplex != link_info->duplex_setting) 10740 update_link = true; 10741 } else { 10742 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 10743 update_link = true; 10744 if (bnxt_auto_speed_updated(link_info)) 10745 update_link = true; 10746 } 10747 10748 /* The last close may have shutdown the link, so need to call 10749 * PHY_CFG to bring it back up. 10750 */ 10751 if (!BNXT_LINK_IS_UP(bp)) 10752 update_link = true; 10753 10754 if (!bnxt_eee_config_ok(bp)) 10755 update_eee = true; 10756 10757 if (update_link) 10758 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 10759 else if (update_pause) 10760 rc = bnxt_hwrm_set_pause(bp); 10761 if (rc) { 10762 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 10763 rc); 10764 return rc; 10765 } 10766 10767 return rc; 10768 } 10769 10770 /* Common routine to pre-map certain register block to different GRC window. 10771 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 10772 * in PF and 3 windows in VF that can be customized to map in different 10773 * register blocks. 10774 */ 10775 static void bnxt_preset_reg_win(struct bnxt *bp) 10776 { 10777 if (BNXT_PF(bp)) { 10778 /* CAG registers map to GRC window #4 */ 10779 writel(BNXT_CAG_REG_BASE, 10780 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 10781 } 10782 } 10783 10784 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 10785 10786 static int bnxt_reinit_after_abort(struct bnxt *bp) 10787 { 10788 int rc; 10789 10790 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10791 return -EBUSY; 10792 10793 if (bp->dev->reg_state == NETREG_UNREGISTERED) 10794 return -ENODEV; 10795 10796 rc = bnxt_fw_init_one(bp); 10797 if (!rc) { 10798 bnxt_clear_int_mode(bp); 10799 rc = bnxt_init_int_mode(bp); 10800 if (!rc) { 10801 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 10802 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 10803 } 10804 } 10805 return rc; 10806 } 10807 10808 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 10809 { 10810 int rc = 0; 10811 10812 bnxt_preset_reg_win(bp); 10813 netif_carrier_off(bp->dev); 10814 if (irq_re_init) { 10815 /* Reserve rings now if none were reserved at driver probe. */ 10816 rc = bnxt_init_dflt_ring_mode(bp); 10817 if (rc) { 10818 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 10819 return rc; 10820 } 10821 } 10822 rc = bnxt_reserve_rings(bp, irq_re_init); 10823 if (rc) 10824 return rc; 10825 if ((bp->flags & BNXT_FLAG_RFS) && 10826 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 10827 /* disable RFS if falling back to INTA */ 10828 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 10829 bp->flags &= ~BNXT_FLAG_RFS; 10830 } 10831 10832 rc = bnxt_alloc_mem(bp, irq_re_init); 10833 if (rc) { 10834 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 10835 goto open_err_free_mem; 10836 } 10837 10838 if (irq_re_init) { 10839 bnxt_init_napi(bp); 10840 rc = bnxt_request_irq(bp); 10841 if (rc) { 10842 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 10843 goto open_err_irq; 10844 } 10845 } 10846 10847 rc = bnxt_init_nic(bp, irq_re_init); 10848 if (rc) { 10849 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 10850 goto open_err_irq; 10851 } 10852 10853 bnxt_enable_napi(bp); 10854 bnxt_debug_dev_init(bp); 10855 10856 if (link_re_init) { 10857 mutex_lock(&bp->link_lock); 10858 rc = bnxt_update_phy_setting(bp); 10859 mutex_unlock(&bp->link_lock); 10860 if (rc) { 10861 netdev_warn(bp->dev, "failed to update phy settings\n"); 10862 if (BNXT_SINGLE_PF(bp)) { 10863 bp->link_info.phy_retry = true; 10864 bp->link_info.phy_retry_expires = 10865 jiffies + 5 * HZ; 10866 } 10867 } 10868 } 10869 10870 if (irq_re_init) 10871 udp_tunnel_nic_reset_ntf(bp->dev); 10872 10873 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 10874 if (!static_key_enabled(&bnxt_xdp_locking_key)) 10875 static_branch_enable(&bnxt_xdp_locking_key); 10876 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 10877 static_branch_disable(&bnxt_xdp_locking_key); 10878 } 10879 set_bit(BNXT_STATE_OPEN, &bp->state); 10880 bnxt_enable_int(bp); 10881 /* Enable TX queues */ 10882 bnxt_tx_enable(bp); 10883 mod_timer(&bp->timer, jiffies + bp->current_interval); 10884 /* Poll link status and check for SFP+ module status */ 10885 mutex_lock(&bp->link_lock); 10886 bnxt_get_port_module_status(bp); 10887 mutex_unlock(&bp->link_lock); 10888 10889 /* VF-reps may need to be re-opened after the PF is re-opened */ 10890 if (BNXT_PF(bp)) 10891 bnxt_vf_reps_open(bp); 10892 bnxt_ptp_init_rtc(bp, true); 10893 bnxt_ptp_cfg_tstamp_filters(bp); 10894 return 0; 10895 10896 open_err_irq: 10897 bnxt_del_napi(bp); 10898 10899 open_err_free_mem: 10900 bnxt_free_skbs(bp); 10901 bnxt_free_irq(bp); 10902 bnxt_free_mem(bp, true); 10903 return rc; 10904 } 10905 10906 /* rtnl_lock held */ 10907 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 10908 { 10909 int rc = 0; 10910 10911 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 10912 rc = -EIO; 10913 if (!rc) 10914 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 10915 if (rc) { 10916 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 10917 dev_close(bp->dev); 10918 } 10919 return rc; 10920 } 10921 10922 /* rtnl_lock held, open the NIC half way by allocating all resources, but 10923 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 10924 * self tests. 10925 */ 10926 int bnxt_half_open_nic(struct bnxt *bp) 10927 { 10928 int rc = 0; 10929 10930 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 10931 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 10932 rc = -ENODEV; 10933 goto half_open_err; 10934 } 10935 10936 rc = bnxt_alloc_mem(bp, true); 10937 if (rc) { 10938 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 10939 goto half_open_err; 10940 } 10941 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 10942 rc = bnxt_init_nic(bp, true); 10943 if (rc) { 10944 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 10945 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 10946 goto half_open_err; 10947 } 10948 return 0; 10949 10950 half_open_err: 10951 bnxt_free_skbs(bp); 10952 bnxt_free_mem(bp, true); 10953 dev_close(bp->dev); 10954 return rc; 10955 } 10956 10957 /* rtnl_lock held, this call can only be made after a previous successful 10958 * call to bnxt_half_open_nic(). 10959 */ 10960 void bnxt_half_close_nic(struct bnxt *bp) 10961 { 10962 bnxt_hwrm_resource_free(bp, false, true); 10963 bnxt_free_skbs(bp); 10964 bnxt_free_mem(bp, true); 10965 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 10966 } 10967 10968 void bnxt_reenable_sriov(struct bnxt *bp) 10969 { 10970 if (BNXT_PF(bp)) { 10971 struct bnxt_pf_info *pf = &bp->pf; 10972 int n = pf->active_vfs; 10973 10974 if (n) 10975 bnxt_cfg_hw_sriov(bp, &n, true); 10976 } 10977 } 10978 10979 static int bnxt_open(struct net_device *dev) 10980 { 10981 struct bnxt *bp = netdev_priv(dev); 10982 int rc; 10983 10984 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 10985 rc = bnxt_reinit_after_abort(bp); 10986 if (rc) { 10987 if (rc == -EBUSY) 10988 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 10989 else 10990 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 10991 return -ENODEV; 10992 } 10993 } 10994 10995 rc = bnxt_hwrm_if_change(bp, true); 10996 if (rc) 10997 return rc; 10998 10999 rc = __bnxt_open_nic(bp, true, true); 11000 if (rc) { 11001 bnxt_hwrm_if_change(bp, false); 11002 } else { 11003 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 11004 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11005 bnxt_ulp_start(bp, 0); 11006 bnxt_reenable_sriov(bp); 11007 } 11008 } 11009 } 11010 11011 return rc; 11012 } 11013 11014 static bool bnxt_drv_busy(struct bnxt *bp) 11015 { 11016 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 11017 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 11018 } 11019 11020 static void bnxt_get_ring_stats(struct bnxt *bp, 11021 struct rtnl_link_stats64 *stats); 11022 11023 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 11024 bool link_re_init) 11025 { 11026 /* Close the VF-reps before closing PF */ 11027 if (BNXT_PF(bp)) 11028 bnxt_vf_reps_close(bp); 11029 11030 /* Change device state to avoid TX queue wake up's */ 11031 bnxt_tx_disable(bp); 11032 11033 clear_bit(BNXT_STATE_OPEN, &bp->state); 11034 smp_mb__after_atomic(); 11035 while (bnxt_drv_busy(bp)) 11036 msleep(20); 11037 11038 /* Flush rings and disable interrupts */ 11039 bnxt_shutdown_nic(bp, irq_re_init); 11040 11041 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 11042 11043 bnxt_debug_dev_exit(bp); 11044 bnxt_disable_napi(bp); 11045 del_timer_sync(&bp->timer); 11046 bnxt_free_skbs(bp); 11047 11048 /* Save ring stats before shutdown */ 11049 if (bp->bnapi && irq_re_init) { 11050 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 11051 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 11052 } 11053 if (irq_re_init) { 11054 bnxt_free_irq(bp); 11055 bnxt_del_napi(bp); 11056 } 11057 bnxt_free_mem(bp, irq_re_init); 11058 } 11059 11060 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 11061 { 11062 int rc = 0; 11063 11064 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11065 /* If we get here, it means firmware reset is in progress 11066 * while we are trying to close. We can safely proceed with 11067 * the close because we are holding rtnl_lock(). Some firmware 11068 * messages may fail as we proceed to close. We set the 11069 * ABORT_ERR flag here so that the FW reset thread will later 11070 * abort when it gets the rtnl_lock() and sees the flag. 11071 */ 11072 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 11073 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 11074 } 11075 11076 #ifdef CONFIG_BNXT_SRIOV 11077 if (bp->sriov_cfg) { 11078 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 11079 !bp->sriov_cfg, 11080 BNXT_SRIOV_CFG_WAIT_TMO); 11081 if (rc) 11082 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 11083 } 11084 #endif 11085 __bnxt_close_nic(bp, irq_re_init, link_re_init); 11086 return rc; 11087 } 11088 11089 static int bnxt_close(struct net_device *dev) 11090 { 11091 struct bnxt *bp = netdev_priv(dev); 11092 11093 bnxt_close_nic(bp, true, true); 11094 bnxt_hwrm_shutdown_link(bp); 11095 bnxt_hwrm_if_change(bp, false); 11096 return 0; 11097 } 11098 11099 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 11100 u16 *val) 11101 { 11102 struct hwrm_port_phy_mdio_read_output *resp; 11103 struct hwrm_port_phy_mdio_read_input *req; 11104 int rc; 11105 11106 if (bp->hwrm_spec_code < 0x10a00) 11107 return -EOPNOTSUPP; 11108 11109 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 11110 if (rc) 11111 return rc; 11112 11113 req->port_id = cpu_to_le16(bp->pf.port_id); 11114 req->phy_addr = phy_addr; 11115 req->reg_addr = cpu_to_le16(reg & 0x1f); 11116 if (mdio_phy_id_is_c45(phy_addr)) { 11117 req->cl45_mdio = 1; 11118 req->phy_addr = mdio_phy_id_prtad(phy_addr); 11119 req->dev_addr = mdio_phy_id_devad(phy_addr); 11120 req->reg_addr = cpu_to_le16(reg); 11121 } 11122 11123 resp = hwrm_req_hold(bp, req); 11124 rc = hwrm_req_send(bp, req); 11125 if (!rc) 11126 *val = le16_to_cpu(resp->reg_data); 11127 hwrm_req_drop(bp, req); 11128 return rc; 11129 } 11130 11131 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 11132 u16 val) 11133 { 11134 struct hwrm_port_phy_mdio_write_input *req; 11135 int rc; 11136 11137 if (bp->hwrm_spec_code < 0x10a00) 11138 return -EOPNOTSUPP; 11139 11140 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 11141 if (rc) 11142 return rc; 11143 11144 req->port_id = cpu_to_le16(bp->pf.port_id); 11145 req->phy_addr = phy_addr; 11146 req->reg_addr = cpu_to_le16(reg & 0x1f); 11147 if (mdio_phy_id_is_c45(phy_addr)) { 11148 req->cl45_mdio = 1; 11149 req->phy_addr = mdio_phy_id_prtad(phy_addr); 11150 req->dev_addr = mdio_phy_id_devad(phy_addr); 11151 req->reg_addr = cpu_to_le16(reg); 11152 } 11153 req->reg_data = cpu_to_le16(val); 11154 11155 return hwrm_req_send(bp, req); 11156 } 11157 11158 /* rtnl_lock held */ 11159 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 11160 { 11161 struct mii_ioctl_data *mdio = if_mii(ifr); 11162 struct bnxt *bp = netdev_priv(dev); 11163 int rc; 11164 11165 switch (cmd) { 11166 case SIOCGMIIPHY: 11167 mdio->phy_id = bp->link_info.phy_addr; 11168 11169 fallthrough; 11170 case SIOCGMIIREG: { 11171 u16 mii_regval = 0; 11172 11173 if (!netif_running(dev)) 11174 return -EAGAIN; 11175 11176 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 11177 &mii_regval); 11178 mdio->val_out = mii_regval; 11179 return rc; 11180 } 11181 11182 case SIOCSMIIREG: 11183 if (!netif_running(dev)) 11184 return -EAGAIN; 11185 11186 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 11187 mdio->val_in); 11188 11189 case SIOCSHWTSTAMP: 11190 return bnxt_hwtstamp_set(dev, ifr); 11191 11192 case SIOCGHWTSTAMP: 11193 return bnxt_hwtstamp_get(dev, ifr); 11194 11195 default: 11196 /* do nothing */ 11197 break; 11198 } 11199 return -EOPNOTSUPP; 11200 } 11201 11202 static void bnxt_get_ring_stats(struct bnxt *bp, 11203 struct rtnl_link_stats64 *stats) 11204 { 11205 int i; 11206 11207 for (i = 0; i < bp->cp_nr_rings; i++) { 11208 struct bnxt_napi *bnapi = bp->bnapi[i]; 11209 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 11210 u64 *sw = cpr->stats.sw_stats; 11211 11212 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 11213 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 11214 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 11215 11216 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 11217 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 11218 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 11219 11220 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 11221 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 11222 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 11223 11224 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 11225 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 11226 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 11227 11228 stats->rx_missed_errors += 11229 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 11230 11231 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 11232 11233 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 11234 11235 stats->rx_dropped += 11236 cpr->sw_stats.rx.rx_netpoll_discards + 11237 cpr->sw_stats.rx.rx_oom_discards; 11238 } 11239 } 11240 11241 static void bnxt_add_prev_stats(struct bnxt *bp, 11242 struct rtnl_link_stats64 *stats) 11243 { 11244 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 11245 11246 stats->rx_packets += prev_stats->rx_packets; 11247 stats->tx_packets += prev_stats->tx_packets; 11248 stats->rx_bytes += prev_stats->rx_bytes; 11249 stats->tx_bytes += prev_stats->tx_bytes; 11250 stats->rx_missed_errors += prev_stats->rx_missed_errors; 11251 stats->multicast += prev_stats->multicast; 11252 stats->rx_dropped += prev_stats->rx_dropped; 11253 stats->tx_dropped += prev_stats->tx_dropped; 11254 } 11255 11256 static void 11257 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 11258 { 11259 struct bnxt *bp = netdev_priv(dev); 11260 11261 set_bit(BNXT_STATE_READ_STATS, &bp->state); 11262 /* Make sure bnxt_close_nic() sees that we are reading stats before 11263 * we check the BNXT_STATE_OPEN flag. 11264 */ 11265 smp_mb__after_atomic(); 11266 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11267 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 11268 *stats = bp->net_stats_prev; 11269 return; 11270 } 11271 11272 bnxt_get_ring_stats(bp, stats); 11273 bnxt_add_prev_stats(bp, stats); 11274 11275 if (bp->flags & BNXT_FLAG_PORT_STATS) { 11276 u64 *rx = bp->port_stats.sw_stats; 11277 u64 *tx = bp->port_stats.sw_stats + 11278 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 11279 11280 stats->rx_crc_errors = 11281 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 11282 stats->rx_frame_errors = 11283 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 11284 stats->rx_length_errors = 11285 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 11286 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 11287 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 11288 stats->rx_errors = 11289 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 11290 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 11291 stats->collisions = 11292 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 11293 stats->tx_fifo_errors = 11294 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 11295 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 11296 } 11297 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 11298 } 11299 11300 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 11301 struct bnxt_total_ring_err_stats *stats, 11302 struct bnxt_cp_ring_info *cpr) 11303 { 11304 struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; 11305 u64 *hw_stats = cpr->stats.sw_stats; 11306 11307 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 11308 stats->rx_total_resets += sw_stats->rx.rx_resets; 11309 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 11310 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 11311 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 11312 stats->rx_total_ring_discards += 11313 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 11314 stats->tx_total_resets += sw_stats->tx.tx_resets; 11315 stats->tx_total_ring_discards += 11316 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 11317 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 11318 } 11319 11320 void bnxt_get_ring_err_stats(struct bnxt *bp, 11321 struct bnxt_total_ring_err_stats *stats) 11322 { 11323 int i; 11324 11325 for (i = 0; i < bp->cp_nr_rings; i++) 11326 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 11327 } 11328 11329 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 11330 { 11331 struct net_device *dev = bp->dev; 11332 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11333 struct netdev_hw_addr *ha; 11334 u8 *haddr; 11335 int mc_count = 0; 11336 bool update = false; 11337 int off = 0; 11338 11339 netdev_for_each_mc_addr(ha, dev) { 11340 if (mc_count >= BNXT_MAX_MC_ADDRS) { 11341 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11342 vnic->mc_list_count = 0; 11343 return false; 11344 } 11345 haddr = ha->addr; 11346 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 11347 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 11348 update = true; 11349 } 11350 off += ETH_ALEN; 11351 mc_count++; 11352 } 11353 if (mc_count) 11354 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 11355 11356 if (mc_count != vnic->mc_list_count) { 11357 vnic->mc_list_count = mc_count; 11358 update = true; 11359 } 11360 return update; 11361 } 11362 11363 static bool bnxt_uc_list_updated(struct bnxt *bp) 11364 { 11365 struct net_device *dev = bp->dev; 11366 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11367 struct netdev_hw_addr *ha; 11368 int off = 0; 11369 11370 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 11371 return true; 11372 11373 netdev_for_each_uc_addr(ha, dev) { 11374 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 11375 return true; 11376 11377 off += ETH_ALEN; 11378 } 11379 return false; 11380 } 11381 11382 static void bnxt_set_rx_mode(struct net_device *dev) 11383 { 11384 struct bnxt *bp = netdev_priv(dev); 11385 struct bnxt_vnic_info *vnic; 11386 bool mc_update = false; 11387 bool uc_update; 11388 u32 mask; 11389 11390 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 11391 return; 11392 11393 vnic = &bp->vnic_info[0]; 11394 mask = vnic->rx_mask; 11395 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 11396 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 11397 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 11398 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 11399 11400 if (dev->flags & IFF_PROMISC) 11401 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 11402 11403 uc_update = bnxt_uc_list_updated(bp); 11404 11405 if (dev->flags & IFF_BROADCAST) 11406 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 11407 if (dev->flags & IFF_ALLMULTI) { 11408 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11409 vnic->mc_list_count = 0; 11410 } else if (dev->flags & IFF_MULTICAST) { 11411 mc_update = bnxt_mc_list_updated(bp, &mask); 11412 } 11413 11414 if (mask != vnic->rx_mask || uc_update || mc_update) { 11415 vnic->rx_mask = mask; 11416 11417 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 11418 } 11419 } 11420 11421 static int bnxt_cfg_rx_mode(struct bnxt *bp) 11422 { 11423 struct net_device *dev = bp->dev; 11424 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11425 struct hwrm_cfa_l2_filter_free_input *req; 11426 struct netdev_hw_addr *ha; 11427 int i, off = 0, rc; 11428 bool uc_update; 11429 11430 netif_addr_lock_bh(dev); 11431 uc_update = bnxt_uc_list_updated(bp); 11432 netif_addr_unlock_bh(dev); 11433 11434 if (!uc_update) 11435 goto skip_uc; 11436 11437 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 11438 if (rc) 11439 return rc; 11440 hwrm_req_hold(bp, req); 11441 for (i = 1; i < vnic->uc_filter_count; i++) { 11442 req->l2_filter_id = vnic->fw_l2_filter_id[i]; 11443 11444 rc = hwrm_req_send(bp, req); 11445 } 11446 hwrm_req_drop(bp, req); 11447 11448 vnic->uc_filter_count = 1; 11449 11450 netif_addr_lock_bh(dev); 11451 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 11452 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 11453 } else { 11454 netdev_for_each_uc_addr(ha, dev) { 11455 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 11456 off += ETH_ALEN; 11457 vnic->uc_filter_count++; 11458 } 11459 } 11460 netif_addr_unlock_bh(dev); 11461 11462 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 11463 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 11464 if (rc) { 11465 if (BNXT_VF(bp) && rc == -ENODEV) { 11466 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 11467 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 11468 else 11469 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 11470 rc = 0; 11471 } else { 11472 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 11473 } 11474 vnic->uc_filter_count = i; 11475 return rc; 11476 } 11477 } 11478 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 11479 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 11480 11481 skip_uc: 11482 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 11483 !bnxt_promisc_ok(bp)) 11484 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 11485 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 11486 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 11487 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 11488 rc); 11489 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 11490 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11491 vnic->mc_list_count = 0; 11492 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 11493 } 11494 if (rc) 11495 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 11496 rc); 11497 11498 return rc; 11499 } 11500 11501 static bool bnxt_can_reserve_rings(struct bnxt *bp) 11502 { 11503 #ifdef CONFIG_BNXT_SRIOV 11504 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 11505 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11506 11507 /* No minimum rings were provisioned by the PF. Don't 11508 * reserve rings by default when device is down. 11509 */ 11510 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 11511 return true; 11512 11513 if (!netif_running(bp->dev)) 11514 return false; 11515 } 11516 #endif 11517 return true; 11518 } 11519 11520 /* If the chip and firmware supports RFS */ 11521 static bool bnxt_rfs_supported(struct bnxt *bp) 11522 { 11523 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 11524 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 11525 return true; 11526 return false; 11527 } 11528 /* 212 firmware is broken for aRFS */ 11529 if (BNXT_FW_MAJ(bp) == 212) 11530 return false; 11531 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 11532 return true; 11533 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 11534 return true; 11535 return false; 11536 } 11537 11538 /* If runtime conditions support RFS */ 11539 static bool bnxt_rfs_capable(struct bnxt *bp) 11540 { 11541 #ifdef CONFIG_RFS_ACCEL 11542 int vnics, max_vnics, max_rss_ctxs; 11543 11544 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11545 return bnxt_rfs_supported(bp); 11546 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 11547 return false; 11548 11549 vnics = 1 + bp->rx_nr_rings; 11550 max_vnics = bnxt_get_max_func_vnics(bp); 11551 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 11552 11553 /* RSS contexts not a limiting factor */ 11554 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 11555 max_rss_ctxs = max_vnics; 11556 if (vnics > max_vnics || vnics > max_rss_ctxs) { 11557 if (bp->rx_nr_rings > 1) 11558 netdev_warn(bp->dev, 11559 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 11560 min(max_rss_ctxs - 1, max_vnics - 1)); 11561 return false; 11562 } 11563 11564 if (!BNXT_NEW_RM(bp)) 11565 return true; 11566 11567 if (vnics == bp->hw_resc.resv_vnics) 11568 return true; 11569 11570 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 11571 if (vnics <= bp->hw_resc.resv_vnics) 11572 return true; 11573 11574 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 11575 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 11576 return false; 11577 #else 11578 return false; 11579 #endif 11580 } 11581 11582 static netdev_features_t bnxt_fix_features(struct net_device *dev, 11583 netdev_features_t features) 11584 { 11585 struct bnxt *bp = netdev_priv(dev); 11586 netdev_features_t vlan_features; 11587 11588 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 11589 features &= ~NETIF_F_NTUPLE; 11590 11591 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 11592 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11593 11594 if (!(features & NETIF_F_GRO)) 11595 features &= ~NETIF_F_GRO_HW; 11596 11597 if (features & NETIF_F_GRO_HW) 11598 features &= ~NETIF_F_LRO; 11599 11600 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 11601 * turned on or off together. 11602 */ 11603 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 11604 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 11605 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 11606 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 11607 else if (vlan_features) 11608 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 11609 } 11610 #ifdef CONFIG_BNXT_SRIOV 11611 if (BNXT_VF(bp) && bp->vf.vlan) 11612 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 11613 #endif 11614 return features; 11615 } 11616 11617 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 11618 { 11619 struct bnxt *bp = netdev_priv(dev); 11620 u32 flags = bp->flags; 11621 u32 changes; 11622 int rc = 0; 11623 bool re_init = false; 11624 bool update_tpa = false; 11625 11626 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 11627 if (features & NETIF_F_GRO_HW) 11628 flags |= BNXT_FLAG_GRO; 11629 else if (features & NETIF_F_LRO) 11630 flags |= BNXT_FLAG_LRO; 11631 11632 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 11633 flags &= ~BNXT_FLAG_TPA; 11634 11635 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 11636 flags |= BNXT_FLAG_STRIP_VLAN; 11637 11638 if (features & NETIF_F_NTUPLE) 11639 flags |= BNXT_FLAG_RFS; 11640 11641 changes = flags ^ bp->flags; 11642 if (changes & BNXT_FLAG_TPA) { 11643 update_tpa = true; 11644 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 11645 (flags & BNXT_FLAG_TPA) == 0 || 11646 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 11647 re_init = true; 11648 } 11649 11650 if (changes & ~BNXT_FLAG_TPA) 11651 re_init = true; 11652 11653 if (flags != bp->flags) { 11654 u32 old_flags = bp->flags; 11655 11656 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11657 bp->flags = flags; 11658 if (update_tpa) 11659 bnxt_set_ring_params(bp); 11660 return rc; 11661 } 11662 11663 if (re_init) { 11664 bnxt_close_nic(bp, false, false); 11665 bp->flags = flags; 11666 if (update_tpa) 11667 bnxt_set_ring_params(bp); 11668 11669 return bnxt_open_nic(bp, false, false); 11670 } 11671 if (update_tpa) { 11672 bp->flags = flags; 11673 rc = bnxt_set_tpa(bp, 11674 (flags & BNXT_FLAG_TPA) ? 11675 true : false); 11676 if (rc) 11677 bp->flags = old_flags; 11678 } 11679 } 11680 return rc; 11681 } 11682 11683 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 11684 u8 **nextp) 11685 { 11686 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 11687 struct hop_jumbo_hdr *jhdr; 11688 int hdr_count = 0; 11689 u8 *nexthdr; 11690 int start; 11691 11692 /* Check that there are at most 2 IPv6 extension headers, no 11693 * fragment header, and each is <= 64 bytes. 11694 */ 11695 start = nw_off + sizeof(*ip6h); 11696 nexthdr = &ip6h->nexthdr; 11697 while (ipv6_ext_hdr(*nexthdr)) { 11698 struct ipv6_opt_hdr *hp; 11699 int hdrlen; 11700 11701 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 11702 *nexthdr == NEXTHDR_FRAGMENT) 11703 return false; 11704 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 11705 skb_headlen(skb), NULL); 11706 if (!hp) 11707 return false; 11708 if (*nexthdr == NEXTHDR_AUTH) 11709 hdrlen = ipv6_authlen(hp); 11710 else 11711 hdrlen = ipv6_optlen(hp); 11712 11713 if (hdrlen > 64) 11714 return false; 11715 11716 /* The ext header may be a hop-by-hop header inserted for 11717 * big TCP purposes. This will be removed before sending 11718 * from NIC, so do not count it. 11719 */ 11720 if (*nexthdr == NEXTHDR_HOP) { 11721 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 11722 goto increment_hdr; 11723 11724 jhdr = (struct hop_jumbo_hdr *)hp; 11725 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 11726 jhdr->nexthdr != IPPROTO_TCP) 11727 goto increment_hdr; 11728 11729 goto next_hdr; 11730 } 11731 increment_hdr: 11732 hdr_count++; 11733 next_hdr: 11734 nexthdr = &hp->nexthdr; 11735 start += hdrlen; 11736 } 11737 if (nextp) { 11738 /* Caller will check inner protocol */ 11739 if (skb->encapsulation) { 11740 *nextp = nexthdr; 11741 return true; 11742 } 11743 *nextp = NULL; 11744 } 11745 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 11746 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 11747 } 11748 11749 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 11750 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 11751 { 11752 struct udphdr *uh = udp_hdr(skb); 11753 __be16 udp_port = uh->dest; 11754 11755 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port) 11756 return false; 11757 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) { 11758 struct ethhdr *eh = inner_eth_hdr(skb); 11759 11760 switch (eh->h_proto) { 11761 case htons(ETH_P_IP): 11762 return true; 11763 case htons(ETH_P_IPV6): 11764 return bnxt_exthdr_check(bp, skb, 11765 skb_inner_network_offset(skb), 11766 NULL); 11767 } 11768 } 11769 return false; 11770 } 11771 11772 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 11773 { 11774 switch (l4_proto) { 11775 case IPPROTO_UDP: 11776 return bnxt_udp_tunl_check(bp, skb); 11777 case IPPROTO_IPIP: 11778 return true; 11779 case IPPROTO_GRE: { 11780 switch (skb->inner_protocol) { 11781 default: 11782 return false; 11783 case htons(ETH_P_IP): 11784 return true; 11785 case htons(ETH_P_IPV6): 11786 fallthrough; 11787 } 11788 } 11789 case IPPROTO_IPV6: 11790 /* Check ext headers of inner ipv6 */ 11791 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 11792 NULL); 11793 } 11794 return false; 11795 } 11796 11797 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 11798 struct net_device *dev, 11799 netdev_features_t features) 11800 { 11801 struct bnxt *bp = netdev_priv(dev); 11802 u8 *l4_proto; 11803 11804 features = vlan_features_check(skb, features); 11805 switch (vlan_get_protocol(skb)) { 11806 case htons(ETH_P_IP): 11807 if (!skb->encapsulation) 11808 return features; 11809 l4_proto = &ip_hdr(skb)->protocol; 11810 if (bnxt_tunl_check(bp, skb, *l4_proto)) 11811 return features; 11812 break; 11813 case htons(ETH_P_IPV6): 11814 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 11815 &l4_proto)) 11816 break; 11817 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 11818 return features; 11819 break; 11820 } 11821 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 11822 } 11823 11824 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 11825 u32 *reg_buf) 11826 { 11827 struct hwrm_dbg_read_direct_output *resp; 11828 struct hwrm_dbg_read_direct_input *req; 11829 __le32 *dbg_reg_buf; 11830 dma_addr_t mapping; 11831 int rc, i; 11832 11833 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 11834 if (rc) 11835 return rc; 11836 11837 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 11838 &mapping); 11839 if (!dbg_reg_buf) { 11840 rc = -ENOMEM; 11841 goto dbg_rd_reg_exit; 11842 } 11843 11844 req->host_dest_addr = cpu_to_le64(mapping); 11845 11846 resp = hwrm_req_hold(bp, req); 11847 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 11848 req->read_len32 = cpu_to_le32(num_words); 11849 11850 rc = hwrm_req_send(bp, req); 11851 if (rc || resp->error_code) { 11852 rc = -EIO; 11853 goto dbg_rd_reg_exit; 11854 } 11855 for (i = 0; i < num_words; i++) 11856 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 11857 11858 dbg_rd_reg_exit: 11859 hwrm_req_drop(bp, req); 11860 return rc; 11861 } 11862 11863 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 11864 u32 ring_id, u32 *prod, u32 *cons) 11865 { 11866 struct hwrm_dbg_ring_info_get_output *resp; 11867 struct hwrm_dbg_ring_info_get_input *req; 11868 int rc; 11869 11870 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 11871 if (rc) 11872 return rc; 11873 11874 req->ring_type = ring_type; 11875 req->fw_ring_id = cpu_to_le32(ring_id); 11876 resp = hwrm_req_hold(bp, req); 11877 rc = hwrm_req_send(bp, req); 11878 if (!rc) { 11879 *prod = le32_to_cpu(resp->producer_index); 11880 *cons = le32_to_cpu(resp->consumer_index); 11881 } 11882 hwrm_req_drop(bp, req); 11883 return rc; 11884 } 11885 11886 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 11887 { 11888 struct bnxt_tx_ring_info *txr; 11889 int i = bnapi->index, j; 11890 11891 bnxt_for_each_napi_tx(j, bnapi, txr) 11892 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 11893 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 11894 txr->tx_cons); 11895 } 11896 11897 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 11898 { 11899 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 11900 int i = bnapi->index; 11901 11902 if (!rxr) 11903 return; 11904 11905 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 11906 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 11907 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 11908 rxr->rx_sw_agg_prod); 11909 } 11910 11911 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 11912 { 11913 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 11914 int i = bnapi->index; 11915 11916 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 11917 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 11918 } 11919 11920 static void bnxt_dbg_dump_states(struct bnxt *bp) 11921 { 11922 int i; 11923 struct bnxt_napi *bnapi; 11924 11925 for (i = 0; i < bp->cp_nr_rings; i++) { 11926 bnapi = bp->bnapi[i]; 11927 if (netif_msg_drv(bp)) { 11928 bnxt_dump_tx_sw_state(bnapi); 11929 bnxt_dump_rx_sw_state(bnapi); 11930 bnxt_dump_cp_sw_state(bnapi); 11931 } 11932 } 11933 } 11934 11935 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 11936 { 11937 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 11938 struct hwrm_ring_reset_input *req; 11939 struct bnxt_napi *bnapi = rxr->bnapi; 11940 struct bnxt_cp_ring_info *cpr; 11941 u16 cp_ring_id; 11942 int rc; 11943 11944 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 11945 if (rc) 11946 return rc; 11947 11948 cpr = &bnapi->cp_ring; 11949 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 11950 req->cmpl_ring = cpu_to_le16(cp_ring_id); 11951 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 11952 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 11953 return hwrm_req_send_silent(bp, req); 11954 } 11955 11956 static void bnxt_reset_task(struct bnxt *bp, bool silent) 11957 { 11958 if (!silent) 11959 bnxt_dbg_dump_states(bp); 11960 if (netif_running(bp->dev)) { 11961 int rc; 11962 11963 if (silent) { 11964 bnxt_close_nic(bp, false, false); 11965 bnxt_open_nic(bp, false, false); 11966 } else { 11967 bnxt_ulp_stop(bp); 11968 bnxt_close_nic(bp, true, false); 11969 rc = bnxt_open_nic(bp, true, false); 11970 bnxt_ulp_start(bp, rc); 11971 } 11972 } 11973 } 11974 11975 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 11976 { 11977 struct bnxt *bp = netdev_priv(dev); 11978 11979 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 11980 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 11981 } 11982 11983 static void bnxt_fw_health_check(struct bnxt *bp) 11984 { 11985 struct bnxt_fw_health *fw_health = bp->fw_health; 11986 struct pci_dev *pdev = bp->pdev; 11987 u32 val; 11988 11989 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11990 return; 11991 11992 /* Make sure it is enabled before checking the tmr_counter. */ 11993 smp_rmb(); 11994 if (fw_health->tmr_counter) { 11995 fw_health->tmr_counter--; 11996 return; 11997 } 11998 11999 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 12000 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 12001 fw_health->arrests++; 12002 goto fw_reset; 12003 } 12004 12005 fw_health->last_fw_heartbeat = val; 12006 12007 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12008 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 12009 fw_health->discoveries++; 12010 goto fw_reset; 12011 } 12012 12013 fw_health->tmr_counter = fw_health->tmr_multiplier; 12014 return; 12015 12016 fw_reset: 12017 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 12018 } 12019 12020 static void bnxt_timer(struct timer_list *t) 12021 { 12022 struct bnxt *bp = from_timer(bp, t, timer); 12023 struct net_device *dev = bp->dev; 12024 12025 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 12026 return; 12027 12028 if (atomic_read(&bp->intr_sem) != 0) 12029 goto bnxt_restart_timer; 12030 12031 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 12032 bnxt_fw_health_check(bp); 12033 12034 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 12035 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 12036 12037 if (bnxt_tc_flower_enabled(bp)) 12038 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 12039 12040 #ifdef CONFIG_RFS_ACCEL 12041 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 12042 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 12043 #endif /*CONFIG_RFS_ACCEL*/ 12044 12045 if (bp->link_info.phy_retry) { 12046 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 12047 bp->link_info.phy_retry = false; 12048 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 12049 } else { 12050 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 12051 } 12052 } 12053 12054 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 12055 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 12056 12057 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 12058 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 12059 12060 bnxt_restart_timer: 12061 mod_timer(&bp->timer, jiffies + bp->current_interval); 12062 } 12063 12064 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 12065 { 12066 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 12067 * set. If the device is being closed, bnxt_close() may be holding 12068 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 12069 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 12070 */ 12071 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12072 rtnl_lock(); 12073 } 12074 12075 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 12076 { 12077 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12078 rtnl_unlock(); 12079 } 12080 12081 /* Only called from bnxt_sp_task() */ 12082 static void bnxt_reset(struct bnxt *bp, bool silent) 12083 { 12084 bnxt_rtnl_lock_sp(bp); 12085 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 12086 bnxt_reset_task(bp, silent); 12087 bnxt_rtnl_unlock_sp(bp); 12088 } 12089 12090 /* Only called from bnxt_sp_task() */ 12091 static void bnxt_rx_ring_reset(struct bnxt *bp) 12092 { 12093 int i; 12094 12095 bnxt_rtnl_lock_sp(bp); 12096 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12097 bnxt_rtnl_unlock_sp(bp); 12098 return; 12099 } 12100 /* Disable and flush TPA before resetting the RX ring */ 12101 if (bp->flags & BNXT_FLAG_TPA) 12102 bnxt_set_tpa(bp, false); 12103 for (i = 0; i < bp->rx_nr_rings; i++) { 12104 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 12105 struct bnxt_cp_ring_info *cpr; 12106 int rc; 12107 12108 if (!rxr->bnapi->in_reset) 12109 continue; 12110 12111 rc = bnxt_hwrm_rx_ring_reset(bp, i); 12112 if (rc) { 12113 if (rc == -EINVAL || rc == -EOPNOTSUPP) 12114 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 12115 else 12116 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 12117 rc); 12118 bnxt_reset_task(bp, true); 12119 break; 12120 } 12121 bnxt_free_one_rx_ring_skbs(bp, i); 12122 rxr->rx_prod = 0; 12123 rxr->rx_agg_prod = 0; 12124 rxr->rx_sw_agg_prod = 0; 12125 rxr->rx_next_cons = 0; 12126 rxr->bnapi->in_reset = false; 12127 bnxt_alloc_one_rx_ring(bp, i); 12128 cpr = &rxr->bnapi->cp_ring; 12129 cpr->sw_stats.rx.rx_resets++; 12130 if (bp->flags & BNXT_FLAG_AGG_RINGS) 12131 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 12132 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 12133 } 12134 if (bp->flags & BNXT_FLAG_TPA) 12135 bnxt_set_tpa(bp, true); 12136 bnxt_rtnl_unlock_sp(bp); 12137 } 12138 12139 static void bnxt_fw_reset_close(struct bnxt *bp) 12140 { 12141 bnxt_ulp_stop(bp); 12142 /* When firmware is in fatal state, quiesce device and disable 12143 * bus master to prevent any potential bad DMAs before freeing 12144 * kernel memory. 12145 */ 12146 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 12147 u16 val = 0; 12148 12149 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 12150 if (val == 0xffff) 12151 bp->fw_reset_min_dsecs = 0; 12152 bnxt_tx_disable(bp); 12153 bnxt_disable_napi(bp); 12154 bnxt_disable_int_sync(bp); 12155 bnxt_free_irq(bp); 12156 bnxt_clear_int_mode(bp); 12157 pci_disable_device(bp->pdev); 12158 } 12159 __bnxt_close_nic(bp, true, false); 12160 bnxt_vf_reps_free(bp); 12161 bnxt_clear_int_mode(bp); 12162 bnxt_hwrm_func_drv_unrgtr(bp); 12163 if (pci_is_enabled(bp->pdev)) 12164 pci_disable_device(bp->pdev); 12165 bnxt_free_ctx_mem(bp); 12166 } 12167 12168 static bool is_bnxt_fw_ok(struct bnxt *bp) 12169 { 12170 struct bnxt_fw_health *fw_health = bp->fw_health; 12171 bool no_heartbeat = false, has_reset = false; 12172 u32 val; 12173 12174 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 12175 if (val == fw_health->last_fw_heartbeat) 12176 no_heartbeat = true; 12177 12178 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12179 if (val != fw_health->last_fw_reset_cnt) 12180 has_reset = true; 12181 12182 if (!no_heartbeat && has_reset) 12183 return true; 12184 12185 return false; 12186 } 12187 12188 /* rtnl_lock is acquired before calling this function */ 12189 static void bnxt_force_fw_reset(struct bnxt *bp) 12190 { 12191 struct bnxt_fw_health *fw_health = bp->fw_health; 12192 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 12193 u32 wait_dsecs; 12194 12195 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 12196 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12197 return; 12198 12199 if (ptp) { 12200 spin_lock_bh(&ptp->ptp_lock); 12201 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12202 spin_unlock_bh(&ptp->ptp_lock); 12203 } else { 12204 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12205 } 12206 bnxt_fw_reset_close(bp); 12207 wait_dsecs = fw_health->master_func_wait_dsecs; 12208 if (fw_health->primary) { 12209 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 12210 wait_dsecs = 0; 12211 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 12212 } else { 12213 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 12214 wait_dsecs = fw_health->normal_func_wait_dsecs; 12215 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12216 } 12217 12218 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 12219 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 12220 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 12221 } 12222 12223 void bnxt_fw_exception(struct bnxt *bp) 12224 { 12225 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 12226 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 12227 bnxt_rtnl_lock_sp(bp); 12228 bnxt_force_fw_reset(bp); 12229 bnxt_rtnl_unlock_sp(bp); 12230 } 12231 12232 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 12233 * < 0 on error. 12234 */ 12235 static int bnxt_get_registered_vfs(struct bnxt *bp) 12236 { 12237 #ifdef CONFIG_BNXT_SRIOV 12238 int rc; 12239 12240 if (!BNXT_PF(bp)) 12241 return 0; 12242 12243 rc = bnxt_hwrm_func_qcfg(bp); 12244 if (rc) { 12245 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 12246 return rc; 12247 } 12248 if (bp->pf.registered_vfs) 12249 return bp->pf.registered_vfs; 12250 if (bp->sriov_cfg) 12251 return 1; 12252 #endif 12253 return 0; 12254 } 12255 12256 void bnxt_fw_reset(struct bnxt *bp) 12257 { 12258 bnxt_rtnl_lock_sp(bp); 12259 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 12260 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12261 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 12262 int n = 0, tmo; 12263 12264 if (ptp) { 12265 spin_lock_bh(&ptp->ptp_lock); 12266 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12267 spin_unlock_bh(&ptp->ptp_lock); 12268 } else { 12269 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12270 } 12271 if (bp->pf.active_vfs && 12272 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 12273 n = bnxt_get_registered_vfs(bp); 12274 if (n < 0) { 12275 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 12276 n); 12277 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12278 dev_close(bp->dev); 12279 goto fw_reset_exit; 12280 } else if (n > 0) { 12281 u16 vf_tmo_dsecs = n * 10; 12282 12283 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 12284 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 12285 bp->fw_reset_state = 12286 BNXT_FW_RESET_STATE_POLL_VF; 12287 bnxt_queue_fw_reset_work(bp, HZ / 10); 12288 goto fw_reset_exit; 12289 } 12290 bnxt_fw_reset_close(bp); 12291 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 12292 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 12293 tmo = HZ / 10; 12294 } else { 12295 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12296 tmo = bp->fw_reset_min_dsecs * HZ / 10; 12297 } 12298 bnxt_queue_fw_reset_work(bp, tmo); 12299 } 12300 fw_reset_exit: 12301 bnxt_rtnl_unlock_sp(bp); 12302 } 12303 12304 static void bnxt_chk_missed_irq(struct bnxt *bp) 12305 { 12306 int i; 12307 12308 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 12309 return; 12310 12311 for (i = 0; i < bp->cp_nr_rings; i++) { 12312 struct bnxt_napi *bnapi = bp->bnapi[i]; 12313 struct bnxt_cp_ring_info *cpr; 12314 u32 fw_ring_id; 12315 int j; 12316 12317 if (!bnapi) 12318 continue; 12319 12320 cpr = &bnapi->cp_ring; 12321 for (j = 0; j < cpr->cp_ring_count; j++) { 12322 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 12323 u32 val[2]; 12324 12325 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 12326 continue; 12327 12328 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 12329 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 12330 continue; 12331 } 12332 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 12333 bnxt_dbg_hwrm_ring_info_get(bp, 12334 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 12335 fw_ring_id, &val[0], &val[1]); 12336 cpr->sw_stats.cmn.missed_irqs++; 12337 } 12338 } 12339 } 12340 12341 static void bnxt_cfg_ntp_filters(struct bnxt *); 12342 12343 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 12344 { 12345 struct bnxt_link_info *link_info = &bp->link_info; 12346 12347 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 12348 link_info->autoneg = BNXT_AUTONEG_SPEED; 12349 if (bp->hwrm_spec_code >= 0x10201) { 12350 if (link_info->auto_pause_setting & 12351 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 12352 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 12353 } else { 12354 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 12355 } 12356 bnxt_set_auto_speed(link_info); 12357 } else { 12358 bnxt_set_force_speed(link_info); 12359 link_info->req_duplex = link_info->duplex_setting; 12360 } 12361 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 12362 link_info->req_flow_ctrl = 12363 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 12364 else 12365 link_info->req_flow_ctrl = link_info->force_pause_setting; 12366 } 12367 12368 static void bnxt_fw_echo_reply(struct bnxt *bp) 12369 { 12370 struct bnxt_fw_health *fw_health = bp->fw_health; 12371 struct hwrm_func_echo_response_input *req; 12372 int rc; 12373 12374 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 12375 if (rc) 12376 return; 12377 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 12378 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 12379 hwrm_req_send(bp, req); 12380 } 12381 12382 static void bnxt_sp_task(struct work_struct *work) 12383 { 12384 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 12385 12386 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12387 smp_mb__after_atomic(); 12388 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 12389 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12390 return; 12391 } 12392 12393 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 12394 bnxt_cfg_rx_mode(bp); 12395 12396 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 12397 bnxt_cfg_ntp_filters(bp); 12398 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 12399 bnxt_hwrm_exec_fwd_req(bp); 12400 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 12401 bnxt_hwrm_port_qstats(bp, 0); 12402 bnxt_hwrm_port_qstats_ext(bp, 0); 12403 bnxt_accumulate_all_stats(bp); 12404 } 12405 12406 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 12407 int rc; 12408 12409 mutex_lock(&bp->link_lock); 12410 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 12411 &bp->sp_event)) 12412 bnxt_hwrm_phy_qcaps(bp); 12413 12414 rc = bnxt_update_link(bp, true); 12415 if (rc) 12416 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 12417 rc); 12418 12419 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 12420 &bp->sp_event)) 12421 bnxt_init_ethtool_link_settings(bp); 12422 mutex_unlock(&bp->link_lock); 12423 } 12424 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 12425 int rc; 12426 12427 mutex_lock(&bp->link_lock); 12428 rc = bnxt_update_phy_setting(bp); 12429 mutex_unlock(&bp->link_lock); 12430 if (rc) { 12431 netdev_warn(bp->dev, "update phy settings retry failed\n"); 12432 } else { 12433 bp->link_info.phy_retry = false; 12434 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 12435 } 12436 } 12437 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 12438 mutex_lock(&bp->link_lock); 12439 bnxt_get_port_module_status(bp); 12440 mutex_unlock(&bp->link_lock); 12441 } 12442 12443 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 12444 bnxt_tc_flow_stats_work(bp); 12445 12446 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 12447 bnxt_chk_missed_irq(bp); 12448 12449 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 12450 bnxt_fw_echo_reply(bp); 12451 12452 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 12453 bnxt_hwmon_notify_event(bp); 12454 12455 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 12456 * must be the last functions to be called before exiting. 12457 */ 12458 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 12459 bnxt_reset(bp, false); 12460 12461 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 12462 bnxt_reset(bp, true); 12463 12464 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 12465 bnxt_rx_ring_reset(bp); 12466 12467 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 12468 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 12469 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 12470 bnxt_devlink_health_fw_report(bp); 12471 else 12472 bnxt_fw_reset(bp); 12473 } 12474 12475 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 12476 if (!is_bnxt_fw_ok(bp)) 12477 bnxt_devlink_health_fw_report(bp); 12478 } 12479 12480 smp_mb__before_atomic(); 12481 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 12482 } 12483 12484 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 12485 int *max_cp); 12486 12487 /* Under rtnl_lock */ 12488 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 12489 int tx_xdp) 12490 { 12491 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 12492 int tx_rings_needed, stats; 12493 int rx_rings = rx; 12494 int cp, vnics; 12495 12496 if (tcs) 12497 tx_sets = tcs; 12498 12499 if (bp->flags & BNXT_FLAG_AGG_RINGS) 12500 rx_rings <<= 1; 12501 12502 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 12503 12504 if (max_rx < rx_rings) 12505 return -ENOMEM; 12506 12507 tx_rings_needed = tx * tx_sets + tx_xdp; 12508 if (max_tx < tx_rings_needed) 12509 return -ENOMEM; 12510 12511 vnics = 1; 12512 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == 12513 BNXT_FLAG_RFS) 12514 vnics += rx; 12515 12516 tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp); 12517 cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 12518 if (max_cp < cp) 12519 return -ENOMEM; 12520 stats = cp; 12521 if (BNXT_NEW_RM(bp)) { 12522 cp += bnxt_get_ulp_msix_num(bp); 12523 stats += bnxt_get_ulp_stat_ctxs(bp); 12524 } 12525 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 12526 stats, vnics); 12527 } 12528 12529 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 12530 { 12531 if (bp->bar2) { 12532 pci_iounmap(pdev, bp->bar2); 12533 bp->bar2 = NULL; 12534 } 12535 12536 if (bp->bar1) { 12537 pci_iounmap(pdev, bp->bar1); 12538 bp->bar1 = NULL; 12539 } 12540 12541 if (bp->bar0) { 12542 pci_iounmap(pdev, bp->bar0); 12543 bp->bar0 = NULL; 12544 } 12545 } 12546 12547 static void bnxt_cleanup_pci(struct bnxt *bp) 12548 { 12549 bnxt_unmap_bars(bp, bp->pdev); 12550 pci_release_regions(bp->pdev); 12551 if (pci_is_enabled(bp->pdev)) 12552 pci_disable_device(bp->pdev); 12553 } 12554 12555 static void bnxt_init_dflt_coal(struct bnxt *bp) 12556 { 12557 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 12558 struct bnxt_coal *coal; 12559 u16 flags = 0; 12560 12561 if (coal_cap->cmpl_params & 12562 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 12563 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 12564 12565 /* Tick values in micro seconds. 12566 * 1 coal_buf x bufs_per_record = 1 completion record. 12567 */ 12568 coal = &bp->rx_coal; 12569 coal->coal_ticks = 10; 12570 coal->coal_bufs = 30; 12571 coal->coal_ticks_irq = 1; 12572 coal->coal_bufs_irq = 2; 12573 coal->idle_thresh = 50; 12574 coal->bufs_per_record = 2; 12575 coal->budget = 64; /* NAPI budget */ 12576 coal->flags = flags; 12577 12578 coal = &bp->tx_coal; 12579 coal->coal_ticks = 28; 12580 coal->coal_bufs = 30; 12581 coal->coal_ticks_irq = 2; 12582 coal->coal_bufs_irq = 2; 12583 coal->bufs_per_record = 1; 12584 coal->flags = flags; 12585 12586 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 12587 } 12588 12589 /* FW that pre-reserves 1 VNIC per function */ 12590 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 12591 { 12592 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 12593 12594 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 12595 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 12596 return true; 12597 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 12598 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 12599 return true; 12600 return false; 12601 } 12602 12603 static int bnxt_fw_init_one_p1(struct bnxt *bp) 12604 { 12605 int rc; 12606 12607 bp->fw_cap = 0; 12608 rc = bnxt_hwrm_ver_get(bp); 12609 bnxt_try_map_fw_health_reg(bp); 12610 if (rc) { 12611 rc = bnxt_try_recover_fw(bp); 12612 if (rc) 12613 return rc; 12614 rc = bnxt_hwrm_ver_get(bp); 12615 if (rc) 12616 return rc; 12617 } 12618 12619 bnxt_nvm_cfg_ver_get(bp); 12620 12621 rc = bnxt_hwrm_func_reset(bp); 12622 if (rc) 12623 return -ENODEV; 12624 12625 bnxt_hwrm_fw_set_time(bp); 12626 return 0; 12627 } 12628 12629 static int bnxt_fw_init_one_p2(struct bnxt *bp) 12630 { 12631 int rc; 12632 12633 /* Get the MAX capabilities for this function */ 12634 rc = bnxt_hwrm_func_qcaps(bp); 12635 if (rc) { 12636 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 12637 rc); 12638 return -ENODEV; 12639 } 12640 12641 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 12642 if (rc) 12643 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 12644 rc); 12645 12646 if (bnxt_alloc_fw_health(bp)) { 12647 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 12648 } else { 12649 rc = bnxt_hwrm_error_recovery_qcfg(bp); 12650 if (rc) 12651 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 12652 rc); 12653 } 12654 12655 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 12656 if (rc) 12657 return -ENODEV; 12658 12659 if (bnxt_fw_pre_resv_vnics(bp)) 12660 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 12661 12662 bnxt_hwrm_func_qcfg(bp); 12663 bnxt_hwrm_vnic_qcaps(bp); 12664 bnxt_hwrm_port_led_qcaps(bp); 12665 bnxt_ethtool_init(bp); 12666 if (bp->fw_cap & BNXT_FW_CAP_PTP) 12667 __bnxt_hwrm_ptp_qcfg(bp); 12668 bnxt_dcb_init(bp); 12669 bnxt_hwmon_init(bp); 12670 return 0; 12671 } 12672 12673 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 12674 { 12675 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; 12676 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 12677 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 12678 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 12679 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 12680 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA) 12681 bp->rss_hash_delta = bp->rss_hash_cfg; 12682 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 12683 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 12684 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 12685 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 12686 } 12687 } 12688 12689 static void bnxt_set_dflt_rfs(struct bnxt *bp) 12690 { 12691 struct net_device *dev = bp->dev; 12692 12693 dev->hw_features &= ~NETIF_F_NTUPLE; 12694 dev->features &= ~NETIF_F_NTUPLE; 12695 bp->flags &= ~BNXT_FLAG_RFS; 12696 if (bnxt_rfs_supported(bp)) { 12697 dev->hw_features |= NETIF_F_NTUPLE; 12698 if (bnxt_rfs_capable(bp)) { 12699 bp->flags |= BNXT_FLAG_RFS; 12700 dev->features |= NETIF_F_NTUPLE; 12701 } 12702 } 12703 } 12704 12705 static void bnxt_fw_init_one_p3(struct bnxt *bp) 12706 { 12707 struct pci_dev *pdev = bp->pdev; 12708 12709 bnxt_set_dflt_rss_hash_type(bp); 12710 bnxt_set_dflt_rfs(bp); 12711 12712 bnxt_get_wol_settings(bp); 12713 if (bp->flags & BNXT_FLAG_WOL_CAP) 12714 device_set_wakeup_enable(&pdev->dev, bp->wol); 12715 else 12716 device_set_wakeup_capable(&pdev->dev, false); 12717 12718 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 12719 bnxt_hwrm_coal_params_qcaps(bp); 12720 } 12721 12722 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 12723 12724 int bnxt_fw_init_one(struct bnxt *bp) 12725 { 12726 int rc; 12727 12728 rc = bnxt_fw_init_one_p1(bp); 12729 if (rc) { 12730 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 12731 return rc; 12732 } 12733 rc = bnxt_fw_init_one_p2(bp); 12734 if (rc) { 12735 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 12736 return rc; 12737 } 12738 rc = bnxt_probe_phy(bp, false); 12739 if (rc) 12740 return rc; 12741 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 12742 if (rc) 12743 return rc; 12744 12745 bnxt_fw_init_one_p3(bp); 12746 return 0; 12747 } 12748 12749 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 12750 { 12751 struct bnxt_fw_health *fw_health = bp->fw_health; 12752 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 12753 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 12754 u32 reg_type, reg_off, delay_msecs; 12755 12756 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 12757 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 12758 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 12759 switch (reg_type) { 12760 case BNXT_FW_HEALTH_REG_TYPE_CFG: 12761 pci_write_config_dword(bp->pdev, reg_off, val); 12762 break; 12763 case BNXT_FW_HEALTH_REG_TYPE_GRC: 12764 writel(reg_off & BNXT_GRC_BASE_MASK, 12765 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 12766 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 12767 fallthrough; 12768 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 12769 writel(val, bp->bar0 + reg_off); 12770 break; 12771 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 12772 writel(val, bp->bar1 + reg_off); 12773 break; 12774 } 12775 if (delay_msecs) { 12776 pci_read_config_dword(bp->pdev, 0, &val); 12777 msleep(delay_msecs); 12778 } 12779 } 12780 12781 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 12782 { 12783 struct hwrm_func_qcfg_output *resp; 12784 struct hwrm_func_qcfg_input *req; 12785 bool result = true; /* firmware will enforce if unknown */ 12786 12787 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 12788 return result; 12789 12790 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 12791 return result; 12792 12793 req->fid = cpu_to_le16(0xffff); 12794 resp = hwrm_req_hold(bp, req); 12795 if (!hwrm_req_send(bp, req)) 12796 result = !!(le16_to_cpu(resp->flags) & 12797 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 12798 hwrm_req_drop(bp, req); 12799 return result; 12800 } 12801 12802 static void bnxt_reset_all(struct bnxt *bp) 12803 { 12804 struct bnxt_fw_health *fw_health = bp->fw_health; 12805 int i, rc; 12806 12807 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 12808 bnxt_fw_reset_via_optee(bp); 12809 bp->fw_reset_timestamp = jiffies; 12810 return; 12811 } 12812 12813 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 12814 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 12815 bnxt_fw_reset_writel(bp, i); 12816 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 12817 struct hwrm_fw_reset_input *req; 12818 12819 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 12820 if (!rc) { 12821 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 12822 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 12823 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 12824 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 12825 rc = hwrm_req_send(bp, req); 12826 } 12827 if (rc != -ENODEV) 12828 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 12829 } 12830 bp->fw_reset_timestamp = jiffies; 12831 } 12832 12833 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 12834 { 12835 return time_after(jiffies, bp->fw_reset_timestamp + 12836 (bp->fw_reset_max_dsecs * HZ / 10)); 12837 } 12838 12839 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 12840 { 12841 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12842 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { 12843 bnxt_ulp_start(bp, rc); 12844 bnxt_dl_health_fw_status_update(bp, false); 12845 } 12846 bp->fw_reset_state = 0; 12847 dev_close(bp->dev); 12848 } 12849 12850 static void bnxt_fw_reset_task(struct work_struct *work) 12851 { 12852 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 12853 int rc = 0; 12854 12855 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12856 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 12857 return; 12858 } 12859 12860 switch (bp->fw_reset_state) { 12861 case BNXT_FW_RESET_STATE_POLL_VF: { 12862 int n = bnxt_get_registered_vfs(bp); 12863 int tmo; 12864 12865 if (n < 0) { 12866 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 12867 n, jiffies_to_msecs(jiffies - 12868 bp->fw_reset_timestamp)); 12869 goto fw_reset_abort; 12870 } else if (n > 0) { 12871 if (bnxt_fw_reset_timeout(bp)) { 12872 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12873 bp->fw_reset_state = 0; 12874 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 12875 n); 12876 return; 12877 } 12878 bnxt_queue_fw_reset_work(bp, HZ / 10); 12879 return; 12880 } 12881 bp->fw_reset_timestamp = jiffies; 12882 rtnl_lock(); 12883 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12884 bnxt_fw_reset_abort(bp, rc); 12885 rtnl_unlock(); 12886 return; 12887 } 12888 bnxt_fw_reset_close(bp); 12889 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 12890 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 12891 tmo = HZ / 10; 12892 } else { 12893 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12894 tmo = bp->fw_reset_min_dsecs * HZ / 10; 12895 } 12896 rtnl_unlock(); 12897 bnxt_queue_fw_reset_work(bp, tmo); 12898 return; 12899 } 12900 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 12901 u32 val; 12902 12903 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 12904 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 12905 !bnxt_fw_reset_timeout(bp)) { 12906 bnxt_queue_fw_reset_work(bp, HZ / 5); 12907 return; 12908 } 12909 12910 if (!bp->fw_health->primary) { 12911 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 12912 12913 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12914 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 12915 return; 12916 } 12917 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 12918 } 12919 fallthrough; 12920 case BNXT_FW_RESET_STATE_RESET_FW: 12921 bnxt_reset_all(bp); 12922 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 12923 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 12924 return; 12925 case BNXT_FW_RESET_STATE_ENABLE_DEV: 12926 bnxt_inv_fw_health_reg(bp); 12927 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 12928 !bp->fw_reset_min_dsecs) { 12929 u16 val; 12930 12931 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 12932 if (val == 0xffff) { 12933 if (bnxt_fw_reset_timeout(bp)) { 12934 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 12935 rc = -ETIMEDOUT; 12936 goto fw_reset_abort; 12937 } 12938 bnxt_queue_fw_reset_work(bp, HZ / 1000); 12939 return; 12940 } 12941 } 12942 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 12943 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 12944 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 12945 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 12946 bnxt_dl_remote_reload(bp); 12947 if (pci_enable_device(bp->pdev)) { 12948 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 12949 rc = -ENODEV; 12950 goto fw_reset_abort; 12951 } 12952 pci_set_master(bp->pdev); 12953 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 12954 fallthrough; 12955 case BNXT_FW_RESET_STATE_POLL_FW: 12956 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 12957 rc = bnxt_hwrm_poll(bp); 12958 if (rc) { 12959 if (bnxt_fw_reset_timeout(bp)) { 12960 netdev_err(bp->dev, "Firmware reset aborted\n"); 12961 goto fw_reset_abort_status; 12962 } 12963 bnxt_queue_fw_reset_work(bp, HZ / 5); 12964 return; 12965 } 12966 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 12967 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 12968 fallthrough; 12969 case BNXT_FW_RESET_STATE_OPENING: 12970 while (!rtnl_trylock()) { 12971 bnxt_queue_fw_reset_work(bp, HZ / 10); 12972 return; 12973 } 12974 rc = bnxt_open(bp->dev); 12975 if (rc) { 12976 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 12977 bnxt_fw_reset_abort(bp, rc); 12978 rtnl_unlock(); 12979 return; 12980 } 12981 12982 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 12983 bp->fw_health->enabled) { 12984 bp->fw_health->last_fw_reset_cnt = 12985 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12986 } 12987 bp->fw_reset_state = 0; 12988 /* Make sure fw_reset_state is 0 before clearing the flag */ 12989 smp_mb__before_atomic(); 12990 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12991 bnxt_ulp_start(bp, 0); 12992 bnxt_reenable_sriov(bp); 12993 bnxt_vf_reps_alloc(bp); 12994 bnxt_vf_reps_open(bp); 12995 bnxt_ptp_reapply_pps(bp); 12996 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 12997 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 12998 bnxt_dl_health_fw_recovery_done(bp); 12999 bnxt_dl_health_fw_status_update(bp, true); 13000 } 13001 rtnl_unlock(); 13002 break; 13003 } 13004 return; 13005 13006 fw_reset_abort_status: 13007 if (bp->fw_health->status_reliable || 13008 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 13009 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 13010 13011 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 13012 } 13013 fw_reset_abort: 13014 rtnl_lock(); 13015 bnxt_fw_reset_abort(bp, rc); 13016 rtnl_unlock(); 13017 } 13018 13019 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 13020 { 13021 int rc; 13022 struct bnxt *bp = netdev_priv(dev); 13023 13024 SET_NETDEV_DEV(dev, &pdev->dev); 13025 13026 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 13027 rc = pci_enable_device(pdev); 13028 if (rc) { 13029 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 13030 goto init_err; 13031 } 13032 13033 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 13034 dev_err(&pdev->dev, 13035 "Cannot find PCI device base address, aborting\n"); 13036 rc = -ENODEV; 13037 goto init_err_disable; 13038 } 13039 13040 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 13041 if (rc) { 13042 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 13043 goto init_err_disable; 13044 } 13045 13046 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 13047 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 13048 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 13049 rc = -EIO; 13050 goto init_err_release; 13051 } 13052 13053 pci_set_master(pdev); 13054 13055 bp->dev = dev; 13056 bp->pdev = pdev; 13057 13058 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 13059 * determines the BAR size. 13060 */ 13061 bp->bar0 = pci_ioremap_bar(pdev, 0); 13062 if (!bp->bar0) { 13063 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 13064 rc = -ENOMEM; 13065 goto init_err_release; 13066 } 13067 13068 bp->bar2 = pci_ioremap_bar(pdev, 4); 13069 if (!bp->bar2) { 13070 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 13071 rc = -ENOMEM; 13072 goto init_err_release; 13073 } 13074 13075 INIT_WORK(&bp->sp_task, bnxt_sp_task); 13076 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 13077 13078 spin_lock_init(&bp->ntp_fltr_lock); 13079 #if BITS_PER_LONG == 32 13080 spin_lock_init(&bp->db_lock); 13081 #endif 13082 13083 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 13084 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 13085 13086 timer_setup(&bp->timer, bnxt_timer, 0); 13087 bp->current_interval = BNXT_TIMER_INTERVAL; 13088 13089 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 13090 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 13091 13092 clear_bit(BNXT_STATE_OPEN, &bp->state); 13093 return 0; 13094 13095 init_err_release: 13096 bnxt_unmap_bars(bp, pdev); 13097 pci_release_regions(pdev); 13098 13099 init_err_disable: 13100 pci_disable_device(pdev); 13101 13102 init_err: 13103 return rc; 13104 } 13105 13106 /* rtnl_lock held */ 13107 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 13108 { 13109 struct sockaddr *addr = p; 13110 struct bnxt *bp = netdev_priv(dev); 13111 int rc = 0; 13112 13113 if (!is_valid_ether_addr(addr->sa_data)) 13114 return -EADDRNOTAVAIL; 13115 13116 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 13117 return 0; 13118 13119 rc = bnxt_approve_mac(bp, addr->sa_data, true); 13120 if (rc) 13121 return rc; 13122 13123 eth_hw_addr_set(dev, addr->sa_data); 13124 if (netif_running(dev)) { 13125 bnxt_close_nic(bp, false, false); 13126 rc = bnxt_open_nic(bp, false, false); 13127 } 13128 13129 return rc; 13130 } 13131 13132 /* rtnl_lock held */ 13133 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 13134 { 13135 struct bnxt *bp = netdev_priv(dev); 13136 13137 if (netif_running(dev)) 13138 bnxt_close_nic(bp, true, false); 13139 13140 dev->mtu = new_mtu; 13141 bnxt_set_ring_params(bp); 13142 13143 if (netif_running(dev)) 13144 return bnxt_open_nic(bp, true, false); 13145 13146 return 0; 13147 } 13148 13149 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 13150 { 13151 struct bnxt *bp = netdev_priv(dev); 13152 bool sh = false; 13153 int rc, tx_cp; 13154 13155 if (tc > bp->max_tc) { 13156 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 13157 tc, bp->max_tc); 13158 return -EINVAL; 13159 } 13160 13161 if (netdev_get_num_tc(dev) == tc) 13162 return 0; 13163 13164 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 13165 sh = true; 13166 13167 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 13168 sh, tc, bp->tx_nr_rings_xdp); 13169 if (rc) 13170 return rc; 13171 13172 /* Needs to close the device and do hw resource re-allocations */ 13173 if (netif_running(bp->dev)) 13174 bnxt_close_nic(bp, true, false); 13175 13176 if (tc) { 13177 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 13178 netdev_set_num_tc(dev, tc); 13179 } else { 13180 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13181 netdev_reset_tc(dev); 13182 } 13183 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 13184 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 13185 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 13186 tx_cp + bp->rx_nr_rings; 13187 13188 if (netif_running(bp->dev)) 13189 return bnxt_open_nic(bp, true, false); 13190 13191 return 0; 13192 } 13193 13194 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 13195 void *cb_priv) 13196 { 13197 struct bnxt *bp = cb_priv; 13198 13199 if (!bnxt_tc_flower_enabled(bp) || 13200 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 13201 return -EOPNOTSUPP; 13202 13203 switch (type) { 13204 case TC_SETUP_CLSFLOWER: 13205 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 13206 default: 13207 return -EOPNOTSUPP; 13208 } 13209 } 13210 13211 LIST_HEAD(bnxt_block_cb_list); 13212 13213 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 13214 void *type_data) 13215 { 13216 struct bnxt *bp = netdev_priv(dev); 13217 13218 switch (type) { 13219 case TC_SETUP_BLOCK: 13220 return flow_block_cb_setup_simple(type_data, 13221 &bnxt_block_cb_list, 13222 bnxt_setup_tc_block_cb, 13223 bp, bp, true); 13224 case TC_SETUP_QDISC_MQPRIO: { 13225 struct tc_mqprio_qopt *mqprio = type_data; 13226 13227 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 13228 13229 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 13230 } 13231 default: 13232 return -EOPNOTSUPP; 13233 } 13234 } 13235 13236 #ifdef CONFIG_RFS_ACCEL 13237 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 13238 struct bnxt_ntuple_filter *f2) 13239 { 13240 struct flow_keys *keys1 = &f1->fkeys; 13241 struct flow_keys *keys2 = &f2->fkeys; 13242 13243 if (keys1->basic.n_proto != keys2->basic.n_proto || 13244 keys1->basic.ip_proto != keys2->basic.ip_proto) 13245 return false; 13246 13247 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 13248 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 13249 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) 13250 return false; 13251 } else { 13252 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, 13253 sizeof(keys1->addrs.v6addrs.src)) || 13254 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, 13255 sizeof(keys1->addrs.v6addrs.dst))) 13256 return false; 13257 } 13258 13259 if (keys1->ports.ports == keys2->ports.ports && 13260 keys1->control.flags == keys2->control.flags && 13261 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 13262 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 13263 return true; 13264 13265 return false; 13266 } 13267 13268 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 13269 u16 rxq_index, u32 flow_id) 13270 { 13271 struct bnxt *bp = netdev_priv(dev); 13272 struct bnxt_ntuple_filter *fltr, *new_fltr; 13273 struct flow_keys *fkeys; 13274 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 13275 int rc = 0, idx, bit_id, l2_idx = 0; 13276 struct hlist_head *head; 13277 u32 flags; 13278 13279 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 13280 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 13281 int off = 0, j; 13282 13283 netif_addr_lock_bh(dev); 13284 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 13285 if (ether_addr_equal(eth->h_dest, 13286 vnic->uc_list + off)) { 13287 l2_idx = j + 1; 13288 break; 13289 } 13290 } 13291 netif_addr_unlock_bh(dev); 13292 if (!l2_idx) 13293 return -EINVAL; 13294 } 13295 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 13296 if (!new_fltr) 13297 return -ENOMEM; 13298 13299 fkeys = &new_fltr->fkeys; 13300 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 13301 rc = -EPROTONOSUPPORT; 13302 goto err_free; 13303 } 13304 13305 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 13306 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 13307 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 13308 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 13309 rc = -EPROTONOSUPPORT; 13310 goto err_free; 13311 } 13312 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 13313 bp->hwrm_spec_code < 0x10601) { 13314 rc = -EPROTONOSUPPORT; 13315 goto err_free; 13316 } 13317 flags = fkeys->control.flags; 13318 if (((flags & FLOW_DIS_ENCAPSULATION) && 13319 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 13320 rc = -EPROTONOSUPPORT; 13321 goto err_free; 13322 } 13323 13324 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 13325 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 13326 13327 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 13328 head = &bp->ntp_fltr_hash_tbl[idx]; 13329 rcu_read_lock(); 13330 hlist_for_each_entry_rcu(fltr, head, hash) { 13331 if (bnxt_fltr_match(fltr, new_fltr)) { 13332 rc = fltr->sw_id; 13333 rcu_read_unlock(); 13334 goto err_free; 13335 } 13336 } 13337 rcu_read_unlock(); 13338 13339 spin_lock_bh(&bp->ntp_fltr_lock); 13340 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 13341 BNXT_NTP_FLTR_MAX_FLTR, 0); 13342 if (bit_id < 0) { 13343 spin_unlock_bh(&bp->ntp_fltr_lock); 13344 rc = -ENOMEM; 13345 goto err_free; 13346 } 13347 13348 new_fltr->sw_id = (u16)bit_id; 13349 new_fltr->flow_id = flow_id; 13350 new_fltr->l2_fltr_idx = l2_idx; 13351 new_fltr->rxq = rxq_index; 13352 hlist_add_head_rcu(&new_fltr->hash, head); 13353 bp->ntp_fltr_count++; 13354 spin_unlock_bh(&bp->ntp_fltr_lock); 13355 13356 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 13357 13358 return new_fltr->sw_id; 13359 13360 err_free: 13361 kfree(new_fltr); 13362 return rc; 13363 } 13364 13365 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 13366 { 13367 int i; 13368 13369 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 13370 struct hlist_head *head; 13371 struct hlist_node *tmp; 13372 struct bnxt_ntuple_filter *fltr; 13373 int rc; 13374 13375 head = &bp->ntp_fltr_hash_tbl[i]; 13376 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 13377 bool del = false; 13378 13379 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 13380 if (rps_may_expire_flow(bp->dev, fltr->rxq, 13381 fltr->flow_id, 13382 fltr->sw_id)) { 13383 bnxt_hwrm_cfa_ntuple_filter_free(bp, 13384 fltr); 13385 del = true; 13386 } 13387 } else { 13388 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 13389 fltr); 13390 if (rc) 13391 del = true; 13392 else 13393 set_bit(BNXT_FLTR_VALID, &fltr->state); 13394 } 13395 13396 if (del) { 13397 spin_lock_bh(&bp->ntp_fltr_lock); 13398 hlist_del_rcu(&fltr->hash); 13399 bp->ntp_fltr_count--; 13400 spin_unlock_bh(&bp->ntp_fltr_lock); 13401 synchronize_rcu(); 13402 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 13403 kfree(fltr); 13404 } 13405 } 13406 } 13407 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 13408 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 13409 } 13410 13411 #else 13412 13413 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 13414 { 13415 } 13416 13417 #endif /* CONFIG_RFS_ACCEL */ 13418 13419 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 13420 unsigned int entry, struct udp_tunnel_info *ti) 13421 { 13422 struct bnxt *bp = netdev_priv(netdev); 13423 unsigned int cmd; 13424 13425 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 13426 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 13427 else 13428 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 13429 13430 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 13431 } 13432 13433 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 13434 unsigned int entry, struct udp_tunnel_info *ti) 13435 { 13436 struct bnxt *bp = netdev_priv(netdev); 13437 unsigned int cmd; 13438 13439 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 13440 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 13441 else 13442 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 13443 13444 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 13445 } 13446 13447 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 13448 .set_port = bnxt_udp_tunnel_set_port, 13449 .unset_port = bnxt_udp_tunnel_unset_port, 13450 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 13451 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 13452 .tables = { 13453 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 13454 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 13455 }, 13456 }; 13457 13458 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 13459 struct net_device *dev, u32 filter_mask, 13460 int nlflags) 13461 { 13462 struct bnxt *bp = netdev_priv(dev); 13463 13464 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 13465 nlflags, filter_mask, NULL); 13466 } 13467 13468 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 13469 u16 flags, struct netlink_ext_ack *extack) 13470 { 13471 struct bnxt *bp = netdev_priv(dev); 13472 struct nlattr *attr, *br_spec; 13473 int rem, rc = 0; 13474 13475 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 13476 return -EOPNOTSUPP; 13477 13478 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 13479 if (!br_spec) 13480 return -EINVAL; 13481 13482 nla_for_each_nested(attr, br_spec, rem) { 13483 u16 mode; 13484 13485 if (nla_type(attr) != IFLA_BRIDGE_MODE) 13486 continue; 13487 13488 mode = nla_get_u16(attr); 13489 if (mode == bp->br_mode) 13490 break; 13491 13492 rc = bnxt_hwrm_set_br_mode(bp, mode); 13493 if (!rc) 13494 bp->br_mode = mode; 13495 break; 13496 } 13497 return rc; 13498 } 13499 13500 int bnxt_get_port_parent_id(struct net_device *dev, 13501 struct netdev_phys_item_id *ppid) 13502 { 13503 struct bnxt *bp = netdev_priv(dev); 13504 13505 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 13506 return -EOPNOTSUPP; 13507 13508 /* The PF and it's VF-reps only support the switchdev framework */ 13509 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 13510 return -EOPNOTSUPP; 13511 13512 ppid->id_len = sizeof(bp->dsn); 13513 memcpy(ppid->id, bp->dsn, ppid->id_len); 13514 13515 return 0; 13516 } 13517 13518 static const struct net_device_ops bnxt_netdev_ops = { 13519 .ndo_open = bnxt_open, 13520 .ndo_start_xmit = bnxt_start_xmit, 13521 .ndo_stop = bnxt_close, 13522 .ndo_get_stats64 = bnxt_get_stats64, 13523 .ndo_set_rx_mode = bnxt_set_rx_mode, 13524 .ndo_eth_ioctl = bnxt_ioctl, 13525 .ndo_validate_addr = eth_validate_addr, 13526 .ndo_set_mac_address = bnxt_change_mac_addr, 13527 .ndo_change_mtu = bnxt_change_mtu, 13528 .ndo_fix_features = bnxt_fix_features, 13529 .ndo_set_features = bnxt_set_features, 13530 .ndo_features_check = bnxt_features_check, 13531 .ndo_tx_timeout = bnxt_tx_timeout, 13532 #ifdef CONFIG_BNXT_SRIOV 13533 .ndo_get_vf_config = bnxt_get_vf_config, 13534 .ndo_set_vf_mac = bnxt_set_vf_mac, 13535 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 13536 .ndo_set_vf_rate = bnxt_set_vf_bw, 13537 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 13538 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 13539 .ndo_set_vf_trust = bnxt_set_vf_trust, 13540 #endif 13541 .ndo_setup_tc = bnxt_setup_tc, 13542 #ifdef CONFIG_RFS_ACCEL 13543 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 13544 #endif 13545 .ndo_bpf = bnxt_xdp, 13546 .ndo_xdp_xmit = bnxt_xdp_xmit, 13547 .ndo_bridge_getlink = bnxt_bridge_getlink, 13548 .ndo_bridge_setlink = bnxt_bridge_setlink, 13549 }; 13550 13551 static void bnxt_remove_one(struct pci_dev *pdev) 13552 { 13553 struct net_device *dev = pci_get_drvdata(pdev); 13554 struct bnxt *bp = netdev_priv(dev); 13555 13556 if (BNXT_PF(bp)) 13557 bnxt_sriov_disable(bp); 13558 13559 bnxt_rdma_aux_device_uninit(bp); 13560 13561 bnxt_ptp_clear(bp); 13562 unregister_netdev(dev); 13563 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 13564 /* Flush any pending tasks */ 13565 cancel_work_sync(&bp->sp_task); 13566 cancel_delayed_work_sync(&bp->fw_reset_task); 13567 bp->sp_event = 0; 13568 13569 bnxt_dl_fw_reporters_destroy(bp); 13570 bnxt_dl_unregister(bp); 13571 bnxt_shutdown_tc(bp); 13572 13573 bnxt_clear_int_mode(bp); 13574 bnxt_hwrm_func_drv_unrgtr(bp); 13575 bnxt_free_hwrm_resources(bp); 13576 bnxt_hwmon_uninit(bp); 13577 bnxt_ethtool_free(bp); 13578 bnxt_dcb_free(bp); 13579 kfree(bp->ptp_cfg); 13580 bp->ptp_cfg = NULL; 13581 kfree(bp->fw_health); 13582 bp->fw_health = NULL; 13583 bnxt_cleanup_pci(bp); 13584 bnxt_free_ctx_mem(bp); 13585 kfree(bp->rss_indir_tbl); 13586 bp->rss_indir_tbl = NULL; 13587 bnxt_free_port_stats(bp); 13588 free_netdev(dev); 13589 } 13590 13591 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 13592 { 13593 int rc = 0; 13594 struct bnxt_link_info *link_info = &bp->link_info; 13595 13596 bp->phy_flags = 0; 13597 rc = bnxt_hwrm_phy_qcaps(bp); 13598 if (rc) { 13599 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 13600 rc); 13601 return rc; 13602 } 13603 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 13604 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 13605 else 13606 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 13607 if (!fw_dflt) 13608 return 0; 13609 13610 mutex_lock(&bp->link_lock); 13611 rc = bnxt_update_link(bp, false); 13612 if (rc) { 13613 mutex_unlock(&bp->link_lock); 13614 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 13615 rc); 13616 return rc; 13617 } 13618 13619 /* Older firmware does not have supported_auto_speeds, so assume 13620 * that all supported speeds can be autonegotiated. 13621 */ 13622 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 13623 link_info->support_auto_speeds = link_info->support_speeds; 13624 13625 bnxt_init_ethtool_link_settings(bp); 13626 mutex_unlock(&bp->link_lock); 13627 return 0; 13628 } 13629 13630 static int bnxt_get_max_irq(struct pci_dev *pdev) 13631 { 13632 u16 ctrl; 13633 13634 if (!pdev->msix_cap) 13635 return 1; 13636 13637 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 13638 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 13639 } 13640 13641 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 13642 int *max_cp) 13643 { 13644 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 13645 int max_ring_grps = 0, max_irq; 13646 13647 *max_tx = hw_resc->max_tx_rings; 13648 *max_rx = hw_resc->max_rx_rings; 13649 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 13650 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 13651 bnxt_get_ulp_msix_num(bp), 13652 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 13653 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 13654 *max_cp = min_t(int, *max_cp, max_irq); 13655 max_ring_grps = hw_resc->max_hw_ring_grps; 13656 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 13657 *max_cp -= 1; 13658 *max_rx -= 2; 13659 } 13660 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13661 *max_rx >>= 1; 13662 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 13663 if (*max_cp < (*max_rx + *max_tx)) { 13664 *max_rx = *max_cp / 2; 13665 *max_tx = *max_rx; 13666 } 13667 /* On P5 chips, max_cp output param should be available NQs */ 13668 *max_cp = max_irq; 13669 } 13670 *max_rx = min_t(int, *max_rx, max_ring_grps); 13671 } 13672 13673 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 13674 { 13675 int rx, tx, cp; 13676 13677 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 13678 *max_rx = rx; 13679 *max_tx = tx; 13680 if (!rx || !tx || !cp) 13681 return -ENOMEM; 13682 13683 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 13684 } 13685 13686 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 13687 bool shared) 13688 { 13689 int rc; 13690 13691 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 13692 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 13693 /* Not enough rings, try disabling agg rings. */ 13694 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 13695 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 13696 if (rc) { 13697 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 13698 bp->flags |= BNXT_FLAG_AGG_RINGS; 13699 return rc; 13700 } 13701 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 13702 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 13703 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 13704 bnxt_set_ring_params(bp); 13705 } 13706 13707 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 13708 int max_cp, max_stat, max_irq; 13709 13710 /* Reserve minimum resources for RoCE */ 13711 max_cp = bnxt_get_max_func_cp_rings(bp); 13712 max_stat = bnxt_get_max_func_stat_ctxs(bp); 13713 max_irq = bnxt_get_max_func_irqs(bp); 13714 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 13715 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 13716 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 13717 return 0; 13718 13719 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 13720 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 13721 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 13722 max_cp = min_t(int, max_cp, max_irq); 13723 max_cp = min_t(int, max_cp, max_stat); 13724 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 13725 if (rc) 13726 rc = 0; 13727 } 13728 return rc; 13729 } 13730 13731 /* In initial default shared ring setting, each shared ring must have a 13732 * RX/TX ring pair. 13733 */ 13734 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 13735 { 13736 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 13737 bp->rx_nr_rings = bp->cp_nr_rings; 13738 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 13739 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13740 } 13741 13742 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 13743 { 13744 int dflt_rings, max_rx_rings, max_tx_rings, rc; 13745 13746 if (!bnxt_can_reserve_rings(bp)) 13747 return 0; 13748 13749 if (sh) 13750 bp->flags |= BNXT_FLAG_SHARED_RINGS; 13751 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 13752 /* Reduce default rings on multi-port cards so that total default 13753 * rings do not exceed CPU count. 13754 */ 13755 if (bp->port_count > 1) { 13756 int max_rings = 13757 max_t(int, num_online_cpus() / bp->port_count, 1); 13758 13759 dflt_rings = min_t(int, dflt_rings, max_rings); 13760 } 13761 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 13762 if (rc) 13763 return rc; 13764 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 13765 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 13766 if (sh) 13767 bnxt_trim_dflt_sh_rings(bp); 13768 else 13769 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 13770 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13771 13772 rc = __bnxt_reserve_rings(bp); 13773 if (rc && rc != -ENODEV) 13774 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 13775 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 13776 if (sh) 13777 bnxt_trim_dflt_sh_rings(bp); 13778 13779 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 13780 if (bnxt_need_reserve_rings(bp)) { 13781 rc = __bnxt_reserve_rings(bp); 13782 if (rc && rc != -ENODEV) 13783 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 13784 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 13785 } 13786 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 13787 bp->rx_nr_rings++; 13788 bp->cp_nr_rings++; 13789 } 13790 if (rc) { 13791 bp->tx_nr_rings = 0; 13792 bp->rx_nr_rings = 0; 13793 } 13794 return rc; 13795 } 13796 13797 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 13798 { 13799 int rc; 13800 13801 if (bp->tx_nr_rings) 13802 return 0; 13803 13804 bnxt_ulp_irq_stop(bp); 13805 bnxt_clear_int_mode(bp); 13806 rc = bnxt_set_dflt_rings(bp, true); 13807 if (rc) { 13808 if (BNXT_VF(bp) && rc == -ENODEV) 13809 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 13810 else 13811 netdev_err(bp->dev, "Not enough rings available.\n"); 13812 goto init_dflt_ring_err; 13813 } 13814 rc = bnxt_init_int_mode(bp); 13815 if (rc) 13816 goto init_dflt_ring_err; 13817 13818 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 13819 13820 bnxt_set_dflt_rfs(bp); 13821 13822 init_dflt_ring_err: 13823 bnxt_ulp_irq_restart(bp, rc); 13824 return rc; 13825 } 13826 13827 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 13828 { 13829 int rc; 13830 13831 ASSERT_RTNL(); 13832 bnxt_hwrm_func_qcaps(bp); 13833 13834 if (netif_running(bp->dev)) 13835 __bnxt_close_nic(bp, true, false); 13836 13837 bnxt_ulp_irq_stop(bp); 13838 bnxt_clear_int_mode(bp); 13839 rc = bnxt_init_int_mode(bp); 13840 bnxt_ulp_irq_restart(bp, rc); 13841 13842 if (netif_running(bp->dev)) { 13843 if (rc) 13844 dev_close(bp->dev); 13845 else 13846 rc = bnxt_open_nic(bp, true, false); 13847 } 13848 13849 return rc; 13850 } 13851 13852 static int bnxt_init_mac_addr(struct bnxt *bp) 13853 { 13854 int rc = 0; 13855 13856 if (BNXT_PF(bp)) { 13857 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 13858 } else { 13859 #ifdef CONFIG_BNXT_SRIOV 13860 struct bnxt_vf_info *vf = &bp->vf; 13861 bool strict_approval = true; 13862 13863 if (is_valid_ether_addr(vf->mac_addr)) { 13864 /* overwrite netdev dev_addr with admin VF MAC */ 13865 eth_hw_addr_set(bp->dev, vf->mac_addr); 13866 /* Older PF driver or firmware may not approve this 13867 * correctly. 13868 */ 13869 strict_approval = false; 13870 } else { 13871 eth_hw_addr_random(bp->dev); 13872 } 13873 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 13874 #endif 13875 } 13876 return rc; 13877 } 13878 13879 static void bnxt_vpd_read_info(struct bnxt *bp) 13880 { 13881 struct pci_dev *pdev = bp->pdev; 13882 unsigned int vpd_size, kw_len; 13883 int pos, size; 13884 u8 *vpd_data; 13885 13886 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 13887 if (IS_ERR(vpd_data)) { 13888 pci_warn(pdev, "Unable to read VPD\n"); 13889 return; 13890 } 13891 13892 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 13893 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 13894 if (pos < 0) 13895 goto read_sn; 13896 13897 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 13898 memcpy(bp->board_partno, &vpd_data[pos], size); 13899 13900 read_sn: 13901 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 13902 PCI_VPD_RO_KEYWORD_SERIALNO, 13903 &kw_len); 13904 if (pos < 0) 13905 goto exit; 13906 13907 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 13908 memcpy(bp->board_serialno, &vpd_data[pos], size); 13909 exit: 13910 kfree(vpd_data); 13911 } 13912 13913 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 13914 { 13915 struct pci_dev *pdev = bp->pdev; 13916 u64 qword; 13917 13918 qword = pci_get_dsn(pdev); 13919 if (!qword) { 13920 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 13921 return -EOPNOTSUPP; 13922 } 13923 13924 put_unaligned_le64(qword, dsn); 13925 13926 bp->flags |= BNXT_FLAG_DSN_VALID; 13927 return 0; 13928 } 13929 13930 static int bnxt_map_db_bar(struct bnxt *bp) 13931 { 13932 if (!bp->db_size) 13933 return -ENODEV; 13934 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 13935 if (!bp->bar1) 13936 return -ENOMEM; 13937 return 0; 13938 } 13939 13940 void bnxt_print_device_info(struct bnxt *bp) 13941 { 13942 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 13943 board_info[bp->board_idx].name, 13944 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 13945 13946 pcie_print_link_status(bp->pdev); 13947 } 13948 13949 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 13950 { 13951 struct net_device *dev; 13952 struct bnxt *bp; 13953 int rc, max_irqs; 13954 13955 if (pci_is_bridge(pdev)) 13956 return -ENODEV; 13957 13958 /* Clear any pending DMA transactions from crash kernel 13959 * while loading driver in capture kernel. 13960 */ 13961 if (is_kdump_kernel()) { 13962 pci_clear_master(pdev); 13963 pcie_flr(pdev); 13964 } 13965 13966 max_irqs = bnxt_get_max_irq(pdev); 13967 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 13968 max_irqs); 13969 if (!dev) 13970 return -ENOMEM; 13971 13972 bp = netdev_priv(dev); 13973 bp->board_idx = ent->driver_data; 13974 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 13975 bnxt_set_max_func_irqs(bp, max_irqs); 13976 13977 if (bnxt_vf_pciid(bp->board_idx)) 13978 bp->flags |= BNXT_FLAG_VF; 13979 13980 /* No devlink port registration in case of a VF */ 13981 if (BNXT_PF(bp)) 13982 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 13983 13984 if (pdev->msix_cap) 13985 bp->flags |= BNXT_FLAG_MSIX_CAP; 13986 13987 rc = bnxt_init_board(pdev, dev); 13988 if (rc < 0) 13989 goto init_err_free; 13990 13991 dev->netdev_ops = &bnxt_netdev_ops; 13992 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 13993 dev->ethtool_ops = &bnxt_ethtool_ops; 13994 pci_set_drvdata(pdev, dev); 13995 13996 rc = bnxt_alloc_hwrm_resources(bp); 13997 if (rc) 13998 goto init_err_pci_clean; 13999 14000 mutex_init(&bp->hwrm_cmd_lock); 14001 mutex_init(&bp->link_lock); 14002 14003 rc = bnxt_fw_init_one_p1(bp); 14004 if (rc) 14005 goto init_err_pci_clean; 14006 14007 if (BNXT_PF(bp)) 14008 bnxt_vpd_read_info(bp); 14009 14010 if (BNXT_CHIP_P5_PLUS(bp)) { 14011 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 14012 if (BNXT_CHIP_SR2(bp)) 14013 bp->flags |= BNXT_FLAG_CHIP_SR2; 14014 } 14015 14016 rc = bnxt_alloc_rss_indir_tbl(bp); 14017 if (rc) 14018 goto init_err_pci_clean; 14019 14020 rc = bnxt_fw_init_one_p2(bp); 14021 if (rc) 14022 goto init_err_pci_clean; 14023 14024 rc = bnxt_map_db_bar(bp); 14025 if (rc) { 14026 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 14027 rc); 14028 goto init_err_pci_clean; 14029 } 14030 14031 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 14032 NETIF_F_TSO | NETIF_F_TSO6 | 14033 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 14034 NETIF_F_GSO_IPXIP4 | 14035 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 14036 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 14037 NETIF_F_RXCSUM | NETIF_F_GRO; 14038 14039 if (BNXT_SUPPORTS_TPA(bp)) 14040 dev->hw_features |= NETIF_F_LRO; 14041 14042 dev->hw_enc_features = 14043 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 14044 NETIF_F_TSO | NETIF_F_TSO6 | 14045 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 14046 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 14047 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 14048 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 14049 14050 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 14051 NETIF_F_GSO_GRE_CSUM; 14052 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 14053 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 14054 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 14055 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 14056 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 14057 if (BNXT_SUPPORTS_TPA(bp)) 14058 dev->hw_features |= NETIF_F_GRO_HW; 14059 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 14060 if (dev->features & NETIF_F_GRO_HW) 14061 dev->features &= ~NETIF_F_LRO; 14062 dev->priv_flags |= IFF_UNICAST_FLT; 14063 14064 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 14065 14066 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 14067 NETDEV_XDP_ACT_RX_SG; 14068 14069 #ifdef CONFIG_BNXT_SRIOV 14070 init_waitqueue_head(&bp->sriov_cfg_wait); 14071 #endif 14072 if (BNXT_SUPPORTS_TPA(bp)) { 14073 bp->gro_func = bnxt_gro_func_5730x; 14074 if (BNXT_CHIP_P4(bp)) 14075 bp->gro_func = bnxt_gro_func_5731x; 14076 else if (BNXT_CHIP_P5_PLUS(bp)) 14077 bp->gro_func = bnxt_gro_func_5750x; 14078 } 14079 if (!BNXT_CHIP_P4_PLUS(bp)) 14080 bp->flags |= BNXT_FLAG_DOUBLE_DB; 14081 14082 rc = bnxt_init_mac_addr(bp); 14083 if (rc) { 14084 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 14085 rc = -EADDRNOTAVAIL; 14086 goto init_err_pci_clean; 14087 } 14088 14089 if (BNXT_PF(bp)) { 14090 /* Read the adapter's DSN to use as the eswitch switch_id */ 14091 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 14092 } 14093 14094 /* MTU range: 60 - FW defined max */ 14095 dev->min_mtu = ETH_ZLEN; 14096 dev->max_mtu = bp->max_mtu; 14097 14098 rc = bnxt_probe_phy(bp, true); 14099 if (rc) 14100 goto init_err_pci_clean; 14101 14102 bnxt_set_rx_skb_mode(bp, false); 14103 bnxt_set_tpa_flags(bp); 14104 bnxt_set_ring_params(bp); 14105 rc = bnxt_set_dflt_rings(bp, true); 14106 if (rc) { 14107 if (BNXT_VF(bp) && rc == -ENODEV) { 14108 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 14109 } else { 14110 netdev_err(bp->dev, "Not enough rings available.\n"); 14111 rc = -ENOMEM; 14112 } 14113 goto init_err_pci_clean; 14114 } 14115 14116 bnxt_fw_init_one_p3(bp); 14117 14118 bnxt_init_dflt_coal(bp); 14119 14120 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 14121 bp->flags |= BNXT_FLAG_STRIP_VLAN; 14122 14123 rc = bnxt_init_int_mode(bp); 14124 if (rc) 14125 goto init_err_pci_clean; 14126 14127 /* No TC has been set yet and rings may have been trimmed due to 14128 * limited MSIX, so we re-initialize the TX rings per TC. 14129 */ 14130 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 14131 14132 if (BNXT_PF(bp)) { 14133 if (!bnxt_pf_wq) { 14134 bnxt_pf_wq = 14135 create_singlethread_workqueue("bnxt_pf_wq"); 14136 if (!bnxt_pf_wq) { 14137 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 14138 rc = -ENOMEM; 14139 goto init_err_pci_clean; 14140 } 14141 } 14142 rc = bnxt_init_tc(bp); 14143 if (rc) 14144 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 14145 rc); 14146 } 14147 14148 bnxt_inv_fw_health_reg(bp); 14149 rc = bnxt_dl_register(bp); 14150 if (rc) 14151 goto init_err_dl; 14152 14153 rc = register_netdev(dev); 14154 if (rc) 14155 goto init_err_cleanup; 14156 14157 bnxt_dl_fw_reporters_create(bp); 14158 14159 bnxt_rdma_aux_device_init(bp); 14160 14161 bnxt_print_device_info(bp); 14162 14163 pci_save_state(pdev); 14164 14165 return 0; 14166 init_err_cleanup: 14167 bnxt_dl_unregister(bp); 14168 init_err_dl: 14169 bnxt_shutdown_tc(bp); 14170 bnxt_clear_int_mode(bp); 14171 14172 init_err_pci_clean: 14173 bnxt_hwrm_func_drv_unrgtr(bp); 14174 bnxt_free_hwrm_resources(bp); 14175 bnxt_hwmon_uninit(bp); 14176 bnxt_ethtool_free(bp); 14177 bnxt_ptp_clear(bp); 14178 kfree(bp->ptp_cfg); 14179 bp->ptp_cfg = NULL; 14180 kfree(bp->fw_health); 14181 bp->fw_health = NULL; 14182 bnxt_cleanup_pci(bp); 14183 bnxt_free_ctx_mem(bp); 14184 kfree(bp->rss_indir_tbl); 14185 bp->rss_indir_tbl = NULL; 14186 14187 init_err_free: 14188 free_netdev(dev); 14189 return rc; 14190 } 14191 14192 static void bnxt_shutdown(struct pci_dev *pdev) 14193 { 14194 struct net_device *dev = pci_get_drvdata(pdev); 14195 struct bnxt *bp; 14196 14197 if (!dev) 14198 return; 14199 14200 rtnl_lock(); 14201 bp = netdev_priv(dev); 14202 if (!bp) 14203 goto shutdown_exit; 14204 14205 if (netif_running(dev)) 14206 dev_close(dev); 14207 14208 bnxt_clear_int_mode(bp); 14209 pci_disable_device(pdev); 14210 14211 if (system_state == SYSTEM_POWER_OFF) { 14212 pci_wake_from_d3(pdev, bp->wol); 14213 pci_set_power_state(pdev, PCI_D3hot); 14214 } 14215 14216 shutdown_exit: 14217 rtnl_unlock(); 14218 } 14219 14220 #ifdef CONFIG_PM_SLEEP 14221 static int bnxt_suspend(struct device *device) 14222 { 14223 struct net_device *dev = dev_get_drvdata(device); 14224 struct bnxt *bp = netdev_priv(dev); 14225 int rc = 0; 14226 14227 rtnl_lock(); 14228 bnxt_ulp_stop(bp); 14229 if (netif_running(dev)) { 14230 netif_device_detach(dev); 14231 rc = bnxt_close(dev); 14232 } 14233 bnxt_hwrm_func_drv_unrgtr(bp); 14234 pci_disable_device(bp->pdev); 14235 bnxt_free_ctx_mem(bp); 14236 rtnl_unlock(); 14237 return rc; 14238 } 14239 14240 static int bnxt_resume(struct device *device) 14241 { 14242 struct net_device *dev = dev_get_drvdata(device); 14243 struct bnxt *bp = netdev_priv(dev); 14244 int rc = 0; 14245 14246 rtnl_lock(); 14247 rc = pci_enable_device(bp->pdev); 14248 if (rc) { 14249 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 14250 rc); 14251 goto resume_exit; 14252 } 14253 pci_set_master(bp->pdev); 14254 if (bnxt_hwrm_ver_get(bp)) { 14255 rc = -ENODEV; 14256 goto resume_exit; 14257 } 14258 rc = bnxt_hwrm_func_reset(bp); 14259 if (rc) { 14260 rc = -EBUSY; 14261 goto resume_exit; 14262 } 14263 14264 rc = bnxt_hwrm_func_qcaps(bp); 14265 if (rc) 14266 goto resume_exit; 14267 14268 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 14269 rc = -ENODEV; 14270 goto resume_exit; 14271 } 14272 14273 bnxt_get_wol_settings(bp); 14274 if (netif_running(dev)) { 14275 rc = bnxt_open(dev); 14276 if (!rc) 14277 netif_device_attach(dev); 14278 } 14279 14280 resume_exit: 14281 bnxt_ulp_start(bp, rc); 14282 if (!rc) 14283 bnxt_reenable_sriov(bp); 14284 rtnl_unlock(); 14285 return rc; 14286 } 14287 14288 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 14289 #define BNXT_PM_OPS (&bnxt_pm_ops) 14290 14291 #else 14292 14293 #define BNXT_PM_OPS NULL 14294 14295 #endif /* CONFIG_PM_SLEEP */ 14296 14297 /** 14298 * bnxt_io_error_detected - called when PCI error is detected 14299 * @pdev: Pointer to PCI device 14300 * @state: The current pci connection state 14301 * 14302 * This function is called after a PCI bus error affecting 14303 * this device has been detected. 14304 */ 14305 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 14306 pci_channel_state_t state) 14307 { 14308 struct net_device *netdev = pci_get_drvdata(pdev); 14309 struct bnxt *bp = netdev_priv(netdev); 14310 14311 netdev_info(netdev, "PCI I/O error detected\n"); 14312 14313 rtnl_lock(); 14314 netif_device_detach(netdev); 14315 14316 bnxt_ulp_stop(bp); 14317 14318 if (state == pci_channel_io_perm_failure) { 14319 rtnl_unlock(); 14320 return PCI_ERS_RESULT_DISCONNECT; 14321 } 14322 14323 if (state == pci_channel_io_frozen) 14324 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 14325 14326 if (netif_running(netdev)) 14327 bnxt_close(netdev); 14328 14329 if (pci_is_enabled(pdev)) 14330 pci_disable_device(pdev); 14331 bnxt_free_ctx_mem(bp); 14332 rtnl_unlock(); 14333 14334 /* Request a slot slot reset. */ 14335 return PCI_ERS_RESULT_NEED_RESET; 14336 } 14337 14338 /** 14339 * bnxt_io_slot_reset - called after the pci bus has been reset. 14340 * @pdev: Pointer to PCI device 14341 * 14342 * Restart the card from scratch, as if from a cold-boot. 14343 * At this point, the card has exprienced a hard reset, 14344 * followed by fixups by BIOS, and has its config space 14345 * set up identically to what it was at cold boot. 14346 */ 14347 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 14348 { 14349 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 14350 struct net_device *netdev = pci_get_drvdata(pdev); 14351 struct bnxt *bp = netdev_priv(netdev); 14352 int retry = 0; 14353 int err = 0; 14354 int off; 14355 14356 netdev_info(bp->dev, "PCI Slot Reset\n"); 14357 14358 rtnl_lock(); 14359 14360 if (pci_enable_device(pdev)) { 14361 dev_err(&pdev->dev, 14362 "Cannot re-enable PCI device after reset.\n"); 14363 } else { 14364 pci_set_master(pdev); 14365 /* Upon fatal error, our device internal logic that latches to 14366 * BAR value is getting reset and will restore only upon 14367 * rewritting the BARs. 14368 * 14369 * As pci_restore_state() does not re-write the BARs if the 14370 * value is same as saved value earlier, driver needs to 14371 * write the BARs to 0 to force restore, in case of fatal error. 14372 */ 14373 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 14374 &bp->state)) { 14375 for (off = PCI_BASE_ADDRESS_0; 14376 off <= PCI_BASE_ADDRESS_5; off += 4) 14377 pci_write_config_dword(bp->pdev, off, 0); 14378 } 14379 pci_restore_state(pdev); 14380 pci_save_state(pdev); 14381 14382 bnxt_inv_fw_health_reg(bp); 14383 bnxt_try_map_fw_health_reg(bp); 14384 14385 /* In some PCIe AER scenarios, firmware may take up to 14386 * 10 seconds to become ready in the worst case. 14387 */ 14388 do { 14389 err = bnxt_try_recover_fw(bp); 14390 if (!err) 14391 break; 14392 retry++; 14393 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 14394 14395 if (err) { 14396 dev_err(&pdev->dev, "Firmware not ready\n"); 14397 goto reset_exit; 14398 } 14399 14400 err = bnxt_hwrm_func_reset(bp); 14401 if (!err) 14402 result = PCI_ERS_RESULT_RECOVERED; 14403 14404 bnxt_ulp_irq_stop(bp); 14405 bnxt_clear_int_mode(bp); 14406 err = bnxt_init_int_mode(bp); 14407 bnxt_ulp_irq_restart(bp, err); 14408 } 14409 14410 reset_exit: 14411 bnxt_clear_reservations(bp, true); 14412 rtnl_unlock(); 14413 14414 return result; 14415 } 14416 14417 /** 14418 * bnxt_io_resume - called when traffic can start flowing again. 14419 * @pdev: Pointer to PCI device 14420 * 14421 * This callback is called when the error recovery driver tells 14422 * us that its OK to resume normal operation. 14423 */ 14424 static void bnxt_io_resume(struct pci_dev *pdev) 14425 { 14426 struct net_device *netdev = pci_get_drvdata(pdev); 14427 struct bnxt *bp = netdev_priv(netdev); 14428 int err; 14429 14430 netdev_info(bp->dev, "PCI Slot Resume\n"); 14431 rtnl_lock(); 14432 14433 err = bnxt_hwrm_func_qcaps(bp); 14434 if (!err && netif_running(netdev)) 14435 err = bnxt_open(netdev); 14436 14437 bnxt_ulp_start(bp, err); 14438 if (!err) { 14439 bnxt_reenable_sriov(bp); 14440 netif_device_attach(netdev); 14441 } 14442 14443 rtnl_unlock(); 14444 } 14445 14446 static const struct pci_error_handlers bnxt_err_handler = { 14447 .error_detected = bnxt_io_error_detected, 14448 .slot_reset = bnxt_io_slot_reset, 14449 .resume = bnxt_io_resume 14450 }; 14451 14452 static struct pci_driver bnxt_pci_driver = { 14453 .name = DRV_MODULE_NAME, 14454 .id_table = bnxt_pci_tbl, 14455 .probe = bnxt_init_one, 14456 .remove = bnxt_remove_one, 14457 .shutdown = bnxt_shutdown, 14458 .driver.pm = BNXT_PM_OPS, 14459 .err_handler = &bnxt_err_handler, 14460 #if defined(CONFIG_BNXT_SRIOV) 14461 .sriov_configure = bnxt_sriov_configure, 14462 #endif 14463 }; 14464 14465 static int __init bnxt_init(void) 14466 { 14467 int err; 14468 14469 bnxt_debug_init(); 14470 err = pci_register_driver(&bnxt_pci_driver); 14471 if (err) { 14472 bnxt_debug_exit(); 14473 return err; 14474 } 14475 14476 return 0; 14477 } 14478 14479 static void __exit bnxt_exit(void) 14480 { 14481 pci_unregister_driver(&bnxt_pci_driver); 14482 if (bnxt_pf_wq) 14483 destroy_workqueue(bnxt_pf_wq); 14484 bnxt_debug_exit(); 14485 } 14486 14487 module_init(bnxt_init); 14488 module_exit(bnxt_exit); 14489