1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/ip.h> 41 #include <net/tcp.h> 42 #include <net/udp.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <net/udp_tunnel.h> 46 #include <linux/workqueue.h> 47 #include <linux/prefetch.h> 48 #include <linux/cache.h> 49 #include <linux/log2.h> 50 #include <linux/aer.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <net/page_pool.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_ulp.h" 62 #include "bnxt_sriov.h" 63 #include "bnxt_ethtool.h" 64 #include "bnxt_dcb.h" 65 #include "bnxt_xdp.h" 66 #include "bnxt_vfr.h" 67 #include "bnxt_tc.h" 68 #include "bnxt_devlink.h" 69 #include "bnxt_debugfs.h" 70 71 #define BNXT_TX_TIMEOUT (5 * HZ) 72 73 MODULE_LICENSE("GPL"); 74 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 75 76 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 77 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 78 #define BNXT_RX_COPY_THRESH 256 79 80 #define BNXT_TX_PUSH_THRESH 164 81 82 enum board_idx { 83 BCM57301, 84 BCM57302, 85 BCM57304, 86 BCM57417_NPAR, 87 BCM58700, 88 BCM57311, 89 BCM57312, 90 BCM57402, 91 BCM57404, 92 BCM57406, 93 BCM57402_NPAR, 94 BCM57407, 95 BCM57412, 96 BCM57414, 97 BCM57416, 98 BCM57417, 99 BCM57412_NPAR, 100 BCM57314, 101 BCM57417_SFP, 102 BCM57416_SFP, 103 BCM57404_NPAR, 104 BCM57406_NPAR, 105 BCM57407_SFP, 106 BCM57407_NPAR, 107 BCM57414_NPAR, 108 BCM57416_NPAR, 109 BCM57452, 110 BCM57454, 111 BCM5745x_NPAR, 112 BCM57508, 113 BCM57504, 114 BCM57502, 115 BCM57508_NPAR, 116 BCM57504_NPAR, 117 BCM57502_NPAR, 118 BCM58802, 119 BCM58804, 120 BCM58808, 121 NETXTREME_E_VF, 122 NETXTREME_C_VF, 123 NETXTREME_S_VF, 124 NETXTREME_E_P5_VF, 125 }; 126 127 /* indexed by enum above */ 128 static const struct { 129 char *name; 130 } board_info[] = { 131 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 132 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 133 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 134 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 135 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 136 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 137 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 138 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 139 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 140 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 141 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 142 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 143 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 144 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 145 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 146 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 147 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 148 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 149 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 150 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 151 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 152 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 153 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 154 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 155 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 156 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 157 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 158 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 159 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 160 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 161 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 162 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 163 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 164 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 165 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 166 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 167 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 168 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 169 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 170 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 171 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 172 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 173 }; 174 175 static const struct pci_device_id bnxt_pci_tbl[] = { 176 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 179 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 180 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 181 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 182 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 183 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 184 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 185 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 186 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 187 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 188 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 189 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 190 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 192 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 193 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 194 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 195 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 196 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 197 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 198 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 199 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 200 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 201 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 202 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 203 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 204 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 205 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 206 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 207 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 208 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 209 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 210 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 211 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 212 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 213 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 214 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, 215 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 216 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, 217 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, 218 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 219 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, 220 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 221 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 222 #ifdef CONFIG_BNXT_SRIOV 223 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 224 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 225 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 226 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 227 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 228 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 229 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 230 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 231 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 232 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 233 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 234 #endif 235 { 0 } 236 }; 237 238 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 239 240 static const u16 bnxt_vf_req_snif[] = { 241 HWRM_FUNC_CFG, 242 HWRM_FUNC_VF_CFG, 243 HWRM_PORT_PHY_QCFG, 244 HWRM_CFA_L2_FILTER_ALLOC, 245 }; 246 247 static const u16 bnxt_async_events_arr[] = { 248 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 249 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 250 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 251 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 252 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 253 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 254 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 255 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 256 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 257 }; 258 259 static struct workqueue_struct *bnxt_pf_wq; 260 261 static bool bnxt_vf_pciid(enum board_idx idx) 262 { 263 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 264 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); 265 } 266 267 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 268 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 269 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 270 271 #define BNXT_CP_DB_IRQ_DIS(db) \ 272 writel(DB_CP_IRQ_DIS_FLAGS, db) 273 274 #define BNXT_DB_CQ(db, idx) \ 275 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) 276 277 #define BNXT_DB_NQ_P5(db, idx) \ 278 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) 279 280 #define BNXT_DB_CQ_ARM(db, idx) \ 281 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) 282 283 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 284 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) 285 286 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 287 { 288 if (bp->flags & BNXT_FLAG_CHIP_P5) 289 BNXT_DB_NQ_P5(db, idx); 290 else 291 BNXT_DB_CQ(db, idx); 292 } 293 294 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 295 { 296 if (bp->flags & BNXT_FLAG_CHIP_P5) 297 BNXT_DB_NQ_ARM_P5(db, idx); 298 else 299 BNXT_DB_CQ_ARM(db, idx); 300 } 301 302 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 303 { 304 if (bp->flags & BNXT_FLAG_CHIP_P5) 305 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), 306 db->doorbell); 307 else 308 BNXT_DB_CQ(db, idx); 309 } 310 311 const u16 bnxt_lhint_arr[] = { 312 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 313 TX_BD_FLAGS_LHINT_512_TO_1023, 314 TX_BD_FLAGS_LHINT_1024_TO_2047, 315 TX_BD_FLAGS_LHINT_1024_TO_2047, 316 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 317 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 318 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 319 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 320 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 321 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 322 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 323 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 324 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 325 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 326 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 327 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 328 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 329 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 330 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 331 }; 332 333 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 334 { 335 struct metadata_dst *md_dst = skb_metadata_dst(skb); 336 337 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 338 return 0; 339 340 return md_dst->u.port_info.port_id; 341 } 342 343 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 344 { 345 struct bnxt *bp = netdev_priv(dev); 346 struct tx_bd *txbd; 347 struct tx_bd_ext *txbd1; 348 struct netdev_queue *txq; 349 int i; 350 dma_addr_t mapping; 351 unsigned int length, pad = 0; 352 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 353 u16 prod, last_frag; 354 struct pci_dev *pdev = bp->pdev; 355 struct bnxt_tx_ring_info *txr; 356 struct bnxt_sw_tx_bd *tx_buf; 357 358 i = skb_get_queue_mapping(skb); 359 if (unlikely(i >= bp->tx_nr_rings)) { 360 dev_kfree_skb_any(skb); 361 return NETDEV_TX_OK; 362 } 363 364 txq = netdev_get_tx_queue(dev, i); 365 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 366 prod = txr->tx_prod; 367 368 free_size = bnxt_tx_avail(bp, txr); 369 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 370 netif_tx_stop_queue(txq); 371 return NETDEV_TX_BUSY; 372 } 373 374 length = skb->len; 375 len = skb_headlen(skb); 376 last_frag = skb_shinfo(skb)->nr_frags; 377 378 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 379 380 txbd->tx_bd_opaque = prod; 381 382 tx_buf = &txr->tx_buf_ring[prod]; 383 tx_buf->skb = skb; 384 tx_buf->nr_frags = last_frag; 385 386 vlan_tag_flags = 0; 387 cfa_action = bnxt_xmit_get_cfa_action(skb); 388 if (skb_vlan_tag_present(skb)) { 389 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 390 skb_vlan_tag_get(skb); 391 /* Currently supports 8021Q, 8021AD vlan offloads 392 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 393 */ 394 if (skb->vlan_proto == htons(ETH_P_8021Q)) 395 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 396 } 397 398 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 399 struct tx_push_buffer *tx_push_buf = txr->tx_push; 400 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 401 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 402 void __iomem *db = txr->tx_db.doorbell; 403 void *pdata = tx_push_buf->data; 404 u64 *end; 405 int j, push_len; 406 407 /* Set COAL_NOW to be ready quickly for the next push */ 408 tx_push->tx_bd_len_flags_type = 409 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 410 TX_BD_TYPE_LONG_TX_BD | 411 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 412 TX_BD_FLAGS_COAL_NOW | 413 TX_BD_FLAGS_PACKET_END | 414 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 415 416 if (skb->ip_summed == CHECKSUM_PARTIAL) 417 tx_push1->tx_bd_hsize_lflags = 418 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 419 else 420 tx_push1->tx_bd_hsize_lflags = 0; 421 422 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 423 tx_push1->tx_bd_cfa_action = 424 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 425 426 end = pdata + length; 427 end = PTR_ALIGN(end, 8) - 1; 428 *end = 0; 429 430 skb_copy_from_linear_data(skb, pdata, len); 431 pdata += len; 432 for (j = 0; j < last_frag; j++) { 433 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 434 void *fptr; 435 436 fptr = skb_frag_address_safe(frag); 437 if (!fptr) 438 goto normal_tx; 439 440 memcpy(pdata, fptr, skb_frag_size(frag)); 441 pdata += skb_frag_size(frag); 442 } 443 444 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 445 txbd->tx_bd_haddr = txr->data_mapping; 446 prod = NEXT_TX(prod); 447 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 448 memcpy(txbd, tx_push1, sizeof(*txbd)); 449 prod = NEXT_TX(prod); 450 tx_push->doorbell = 451 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 452 txr->tx_prod = prod; 453 454 tx_buf->is_push = 1; 455 netdev_tx_sent_queue(txq, skb->len); 456 wmb(); /* Sync is_push and byte queue before pushing data */ 457 458 push_len = (length + sizeof(*tx_push) + 7) / 8; 459 if (push_len > 16) { 460 __iowrite64_copy(db, tx_push_buf, 16); 461 __iowrite32_copy(db + 4, tx_push_buf + 1, 462 (push_len - 16) << 1); 463 } else { 464 __iowrite64_copy(db, tx_push_buf, push_len); 465 } 466 467 goto tx_done; 468 } 469 470 normal_tx: 471 if (length < BNXT_MIN_PKT_SIZE) { 472 pad = BNXT_MIN_PKT_SIZE - length; 473 if (skb_pad(skb, pad)) { 474 /* SKB already freed. */ 475 tx_buf->skb = NULL; 476 return NETDEV_TX_OK; 477 } 478 length = BNXT_MIN_PKT_SIZE; 479 } 480 481 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 482 483 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 484 dev_kfree_skb_any(skb); 485 tx_buf->skb = NULL; 486 return NETDEV_TX_OK; 487 } 488 489 dma_unmap_addr_set(tx_buf, mapping, mapping); 490 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 491 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 492 493 txbd->tx_bd_haddr = cpu_to_le64(mapping); 494 495 prod = NEXT_TX(prod); 496 txbd1 = (struct tx_bd_ext *) 497 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 498 499 txbd1->tx_bd_hsize_lflags = 0; 500 if (skb_is_gso(skb)) { 501 u32 hdr_len; 502 503 if (skb->encapsulation) 504 hdr_len = skb_inner_network_offset(skb) + 505 skb_inner_network_header_len(skb) + 506 inner_tcp_hdrlen(skb); 507 else 508 hdr_len = skb_transport_offset(skb) + 509 tcp_hdrlen(skb); 510 511 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 512 TX_BD_FLAGS_T_IPID | 513 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 514 length = skb_shinfo(skb)->gso_size; 515 txbd1->tx_bd_mss = cpu_to_le32(length); 516 length += hdr_len; 517 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 518 txbd1->tx_bd_hsize_lflags = 519 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 520 txbd1->tx_bd_mss = 0; 521 } 522 523 length >>= 9; 524 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 525 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 526 skb->len); 527 i = 0; 528 goto tx_dma_error; 529 } 530 flags |= bnxt_lhint_arr[length]; 531 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 532 533 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 534 txbd1->tx_bd_cfa_action = 535 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 536 for (i = 0; i < last_frag; i++) { 537 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 538 539 prod = NEXT_TX(prod); 540 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 541 542 len = skb_frag_size(frag); 543 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 544 DMA_TO_DEVICE); 545 546 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 547 goto tx_dma_error; 548 549 tx_buf = &txr->tx_buf_ring[prod]; 550 dma_unmap_addr_set(tx_buf, mapping, mapping); 551 552 txbd->tx_bd_haddr = cpu_to_le64(mapping); 553 554 flags = len << TX_BD_LEN_SHIFT; 555 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 556 } 557 558 flags &= ~TX_BD_LEN; 559 txbd->tx_bd_len_flags_type = 560 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 561 TX_BD_FLAGS_PACKET_END); 562 563 netdev_tx_sent_queue(txq, skb->len); 564 565 /* Sync BD data before updating doorbell */ 566 wmb(); 567 568 prod = NEXT_TX(prod); 569 txr->tx_prod = prod; 570 571 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 572 bnxt_db_write(bp, &txr->tx_db, prod); 573 574 tx_done: 575 576 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 577 if (netdev_xmit_more() && !tx_buf->is_push) 578 bnxt_db_write(bp, &txr->tx_db, prod); 579 580 netif_tx_stop_queue(txq); 581 582 /* netif_tx_stop_queue() must be done before checking 583 * tx index in bnxt_tx_avail() below, because in 584 * bnxt_tx_int(), we update tx index before checking for 585 * netif_tx_queue_stopped(). 586 */ 587 smp_mb(); 588 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 589 netif_tx_wake_queue(txq); 590 } 591 return NETDEV_TX_OK; 592 593 tx_dma_error: 594 last_frag = i; 595 596 /* start back at beginning and unmap skb */ 597 prod = txr->tx_prod; 598 tx_buf = &txr->tx_buf_ring[prod]; 599 tx_buf->skb = NULL; 600 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 601 skb_headlen(skb), PCI_DMA_TODEVICE); 602 prod = NEXT_TX(prod); 603 604 /* unmap remaining mapped pages */ 605 for (i = 0; i < last_frag; i++) { 606 prod = NEXT_TX(prod); 607 tx_buf = &txr->tx_buf_ring[prod]; 608 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 609 skb_frag_size(&skb_shinfo(skb)->frags[i]), 610 PCI_DMA_TODEVICE); 611 } 612 613 dev_kfree_skb_any(skb); 614 return NETDEV_TX_OK; 615 } 616 617 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 618 { 619 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 620 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 621 u16 cons = txr->tx_cons; 622 struct pci_dev *pdev = bp->pdev; 623 int i; 624 unsigned int tx_bytes = 0; 625 626 for (i = 0; i < nr_pkts; i++) { 627 struct bnxt_sw_tx_bd *tx_buf; 628 struct sk_buff *skb; 629 int j, last; 630 631 tx_buf = &txr->tx_buf_ring[cons]; 632 cons = NEXT_TX(cons); 633 skb = tx_buf->skb; 634 tx_buf->skb = NULL; 635 636 if (tx_buf->is_push) { 637 tx_buf->is_push = 0; 638 goto next_tx_int; 639 } 640 641 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 642 skb_headlen(skb), PCI_DMA_TODEVICE); 643 last = tx_buf->nr_frags; 644 645 for (j = 0; j < last; j++) { 646 cons = NEXT_TX(cons); 647 tx_buf = &txr->tx_buf_ring[cons]; 648 dma_unmap_page( 649 &pdev->dev, 650 dma_unmap_addr(tx_buf, mapping), 651 skb_frag_size(&skb_shinfo(skb)->frags[j]), 652 PCI_DMA_TODEVICE); 653 } 654 655 next_tx_int: 656 cons = NEXT_TX(cons); 657 658 tx_bytes += skb->len; 659 dev_kfree_skb_any(skb); 660 } 661 662 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 663 txr->tx_cons = cons; 664 665 /* Need to make the tx_cons update visible to bnxt_start_xmit() 666 * before checking for netif_tx_queue_stopped(). Without the 667 * memory barrier, there is a small possibility that bnxt_start_xmit() 668 * will miss it and cause the queue to be stopped forever. 669 */ 670 smp_mb(); 671 672 if (unlikely(netif_tx_queue_stopped(txq)) && 673 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 674 __netif_tx_lock(txq, smp_processor_id()); 675 if (netif_tx_queue_stopped(txq) && 676 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 677 txr->dev_state != BNXT_DEV_STATE_CLOSING) 678 netif_tx_wake_queue(txq); 679 __netif_tx_unlock(txq); 680 } 681 } 682 683 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 684 struct bnxt_rx_ring_info *rxr, 685 gfp_t gfp) 686 { 687 struct device *dev = &bp->pdev->dev; 688 struct page *page; 689 690 page = page_pool_dev_alloc_pages(rxr->page_pool); 691 if (!page) 692 return NULL; 693 694 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 695 DMA_ATTR_WEAK_ORDERING); 696 if (dma_mapping_error(dev, *mapping)) { 697 page_pool_recycle_direct(rxr->page_pool, page); 698 return NULL; 699 } 700 *mapping += bp->rx_dma_offset; 701 return page; 702 } 703 704 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 705 gfp_t gfp) 706 { 707 u8 *data; 708 struct pci_dev *pdev = bp->pdev; 709 710 data = kmalloc(bp->rx_buf_size, gfp); 711 if (!data) 712 return NULL; 713 714 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 715 bp->rx_buf_use_size, bp->rx_dir, 716 DMA_ATTR_WEAK_ORDERING); 717 718 if (dma_mapping_error(&pdev->dev, *mapping)) { 719 kfree(data); 720 data = NULL; 721 } 722 return data; 723 } 724 725 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 726 u16 prod, gfp_t gfp) 727 { 728 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 729 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 730 dma_addr_t mapping; 731 732 if (BNXT_RX_PAGE_MODE(bp)) { 733 struct page *page = 734 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); 735 736 if (!page) 737 return -ENOMEM; 738 739 rx_buf->data = page; 740 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 741 } else { 742 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 743 744 if (!data) 745 return -ENOMEM; 746 747 rx_buf->data = data; 748 rx_buf->data_ptr = data + bp->rx_offset; 749 } 750 rx_buf->mapping = mapping; 751 752 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 753 return 0; 754 } 755 756 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 757 { 758 u16 prod = rxr->rx_prod; 759 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 760 struct rx_bd *cons_bd, *prod_bd; 761 762 prod_rx_buf = &rxr->rx_buf_ring[prod]; 763 cons_rx_buf = &rxr->rx_buf_ring[cons]; 764 765 prod_rx_buf->data = data; 766 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 767 768 prod_rx_buf->mapping = cons_rx_buf->mapping; 769 770 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 771 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 772 773 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 774 } 775 776 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 777 { 778 u16 next, max = rxr->rx_agg_bmap_size; 779 780 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 781 if (next >= max) 782 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 783 return next; 784 } 785 786 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 787 struct bnxt_rx_ring_info *rxr, 788 u16 prod, gfp_t gfp) 789 { 790 struct rx_bd *rxbd = 791 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 792 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 793 struct pci_dev *pdev = bp->pdev; 794 struct page *page; 795 dma_addr_t mapping; 796 u16 sw_prod = rxr->rx_sw_agg_prod; 797 unsigned int offset = 0; 798 799 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 800 page = rxr->rx_page; 801 if (!page) { 802 page = alloc_page(gfp); 803 if (!page) 804 return -ENOMEM; 805 rxr->rx_page = page; 806 rxr->rx_page_offset = 0; 807 } 808 offset = rxr->rx_page_offset; 809 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 810 if (rxr->rx_page_offset == PAGE_SIZE) 811 rxr->rx_page = NULL; 812 else 813 get_page(page); 814 } else { 815 page = alloc_page(gfp); 816 if (!page) 817 return -ENOMEM; 818 } 819 820 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 821 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 822 DMA_ATTR_WEAK_ORDERING); 823 if (dma_mapping_error(&pdev->dev, mapping)) { 824 __free_page(page); 825 return -EIO; 826 } 827 828 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 829 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 830 831 __set_bit(sw_prod, rxr->rx_agg_bmap); 832 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 833 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 834 835 rx_agg_buf->page = page; 836 rx_agg_buf->offset = offset; 837 rx_agg_buf->mapping = mapping; 838 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 839 rxbd->rx_bd_opaque = sw_prod; 840 return 0; 841 } 842 843 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 844 struct bnxt_cp_ring_info *cpr, 845 u16 cp_cons, u16 curr) 846 { 847 struct rx_agg_cmp *agg; 848 849 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 850 agg = (struct rx_agg_cmp *) 851 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 852 return agg; 853 } 854 855 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 856 struct bnxt_rx_ring_info *rxr, 857 u16 agg_id, u16 curr) 858 { 859 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 860 861 return &tpa_info->agg_arr[curr]; 862 } 863 864 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 865 u16 start, u32 agg_bufs, bool tpa) 866 { 867 struct bnxt_napi *bnapi = cpr->bnapi; 868 struct bnxt *bp = bnapi->bp; 869 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 870 u16 prod = rxr->rx_agg_prod; 871 u16 sw_prod = rxr->rx_sw_agg_prod; 872 bool p5_tpa = false; 873 u32 i; 874 875 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 876 p5_tpa = true; 877 878 for (i = 0; i < agg_bufs; i++) { 879 u16 cons; 880 struct rx_agg_cmp *agg; 881 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 882 struct rx_bd *prod_bd; 883 struct page *page; 884 885 if (p5_tpa) 886 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 887 else 888 agg = bnxt_get_agg(bp, cpr, idx, start + i); 889 cons = agg->rx_agg_cmp_opaque; 890 __clear_bit(cons, rxr->rx_agg_bmap); 891 892 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 893 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 894 895 __set_bit(sw_prod, rxr->rx_agg_bmap); 896 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 897 cons_rx_buf = &rxr->rx_agg_ring[cons]; 898 899 /* It is possible for sw_prod to be equal to cons, so 900 * set cons_rx_buf->page to NULL first. 901 */ 902 page = cons_rx_buf->page; 903 cons_rx_buf->page = NULL; 904 prod_rx_buf->page = page; 905 prod_rx_buf->offset = cons_rx_buf->offset; 906 907 prod_rx_buf->mapping = cons_rx_buf->mapping; 908 909 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 910 911 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 912 prod_bd->rx_bd_opaque = sw_prod; 913 914 prod = NEXT_RX_AGG(prod); 915 sw_prod = NEXT_RX_AGG(sw_prod); 916 } 917 rxr->rx_agg_prod = prod; 918 rxr->rx_sw_agg_prod = sw_prod; 919 } 920 921 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 922 struct bnxt_rx_ring_info *rxr, 923 u16 cons, void *data, u8 *data_ptr, 924 dma_addr_t dma_addr, 925 unsigned int offset_and_len) 926 { 927 unsigned int payload = offset_and_len >> 16; 928 unsigned int len = offset_and_len & 0xffff; 929 skb_frag_t *frag; 930 struct page *page = data; 931 u16 prod = rxr->rx_prod; 932 struct sk_buff *skb; 933 int off, err; 934 935 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 936 if (unlikely(err)) { 937 bnxt_reuse_rx_data(rxr, cons, data); 938 return NULL; 939 } 940 dma_addr -= bp->rx_dma_offset; 941 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 942 DMA_ATTR_WEAK_ORDERING); 943 page_pool_release_page(rxr->page_pool, page); 944 945 if (unlikely(!payload)) 946 payload = eth_get_headlen(bp->dev, data_ptr, len); 947 948 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 949 if (!skb) { 950 __free_page(page); 951 return NULL; 952 } 953 954 off = (void *)data_ptr - page_address(page); 955 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 956 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 957 payload + NET_IP_ALIGN); 958 959 frag = &skb_shinfo(skb)->frags[0]; 960 skb_frag_size_sub(frag, payload); 961 skb_frag_off_add(frag, payload); 962 skb->data_len -= payload; 963 skb->tail += payload; 964 965 return skb; 966 } 967 968 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 969 struct bnxt_rx_ring_info *rxr, u16 cons, 970 void *data, u8 *data_ptr, 971 dma_addr_t dma_addr, 972 unsigned int offset_and_len) 973 { 974 u16 prod = rxr->rx_prod; 975 struct sk_buff *skb; 976 int err; 977 978 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 979 if (unlikely(err)) { 980 bnxt_reuse_rx_data(rxr, cons, data); 981 return NULL; 982 } 983 984 skb = build_skb(data, 0); 985 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 986 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 987 if (!skb) { 988 kfree(data); 989 return NULL; 990 } 991 992 skb_reserve(skb, bp->rx_offset); 993 skb_put(skb, offset_and_len & 0xffff); 994 return skb; 995 } 996 997 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, 998 struct bnxt_cp_ring_info *cpr, 999 struct sk_buff *skb, u16 idx, 1000 u32 agg_bufs, bool tpa) 1001 { 1002 struct bnxt_napi *bnapi = cpr->bnapi; 1003 struct pci_dev *pdev = bp->pdev; 1004 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1005 u16 prod = rxr->rx_agg_prod; 1006 bool p5_tpa = false; 1007 u32 i; 1008 1009 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 1010 p5_tpa = true; 1011 1012 for (i = 0; i < agg_bufs; i++) { 1013 u16 cons, frag_len; 1014 struct rx_agg_cmp *agg; 1015 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1016 struct page *page; 1017 dma_addr_t mapping; 1018 1019 if (p5_tpa) 1020 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1021 else 1022 agg = bnxt_get_agg(bp, cpr, idx, i); 1023 cons = agg->rx_agg_cmp_opaque; 1024 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1025 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1026 1027 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1028 skb_fill_page_desc(skb, i, cons_rx_buf->page, 1029 cons_rx_buf->offset, frag_len); 1030 __clear_bit(cons, rxr->rx_agg_bmap); 1031 1032 /* It is possible for bnxt_alloc_rx_page() to allocate 1033 * a sw_prod index that equals the cons index, so we 1034 * need to clear the cons entry now. 1035 */ 1036 mapping = cons_rx_buf->mapping; 1037 page = cons_rx_buf->page; 1038 cons_rx_buf->page = NULL; 1039 1040 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1041 struct skb_shared_info *shinfo; 1042 unsigned int nr_frags; 1043 1044 shinfo = skb_shinfo(skb); 1045 nr_frags = --shinfo->nr_frags; 1046 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 1047 1048 dev_kfree_skb(skb); 1049 1050 cons_rx_buf->page = page; 1051 1052 /* Update prod since possibly some pages have been 1053 * allocated already. 1054 */ 1055 rxr->rx_agg_prod = prod; 1056 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1057 return NULL; 1058 } 1059 1060 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1061 PCI_DMA_FROMDEVICE, 1062 DMA_ATTR_WEAK_ORDERING); 1063 1064 skb->data_len += frag_len; 1065 skb->len += frag_len; 1066 skb->truesize += PAGE_SIZE; 1067 1068 prod = NEXT_RX_AGG(prod); 1069 } 1070 rxr->rx_agg_prod = prod; 1071 return skb; 1072 } 1073 1074 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1075 u8 agg_bufs, u32 *raw_cons) 1076 { 1077 u16 last; 1078 struct rx_agg_cmp *agg; 1079 1080 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1081 last = RING_CMP(*raw_cons); 1082 agg = (struct rx_agg_cmp *) 1083 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1084 return RX_AGG_CMP_VALID(agg, *raw_cons); 1085 } 1086 1087 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1088 unsigned int len, 1089 dma_addr_t mapping) 1090 { 1091 struct bnxt *bp = bnapi->bp; 1092 struct pci_dev *pdev = bp->pdev; 1093 struct sk_buff *skb; 1094 1095 skb = napi_alloc_skb(&bnapi->napi, len); 1096 if (!skb) 1097 return NULL; 1098 1099 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1100 bp->rx_dir); 1101 1102 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1103 len + NET_IP_ALIGN); 1104 1105 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1106 bp->rx_dir); 1107 1108 skb_put(skb, len); 1109 return skb; 1110 } 1111 1112 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1113 u32 *raw_cons, void *cmp) 1114 { 1115 struct rx_cmp *rxcmp = cmp; 1116 u32 tmp_raw_cons = *raw_cons; 1117 u8 cmp_type, agg_bufs = 0; 1118 1119 cmp_type = RX_CMP_TYPE(rxcmp); 1120 1121 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1122 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1123 RX_CMP_AGG_BUFS) >> 1124 RX_CMP_AGG_BUFS_SHIFT; 1125 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1126 struct rx_tpa_end_cmp *tpa_end = cmp; 1127 1128 if (bp->flags & BNXT_FLAG_CHIP_P5) 1129 return 0; 1130 1131 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1132 } 1133 1134 if (agg_bufs) { 1135 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1136 return -EBUSY; 1137 } 1138 *raw_cons = tmp_raw_cons; 1139 return 0; 1140 } 1141 1142 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 1143 { 1144 if (BNXT_PF(bp)) 1145 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 1146 else 1147 schedule_delayed_work(&bp->fw_reset_task, delay); 1148 } 1149 1150 static void bnxt_queue_sp_work(struct bnxt *bp) 1151 { 1152 if (BNXT_PF(bp)) 1153 queue_work(bnxt_pf_wq, &bp->sp_task); 1154 else 1155 schedule_work(&bp->sp_task); 1156 } 1157 1158 static void bnxt_cancel_sp_work(struct bnxt *bp) 1159 { 1160 if (BNXT_PF(bp)) 1161 flush_workqueue(bnxt_pf_wq); 1162 else 1163 cancel_work_sync(&bp->sp_task); 1164 } 1165 1166 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1167 { 1168 if (!rxr->bnapi->in_reset) { 1169 rxr->bnapi->in_reset = true; 1170 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1171 bnxt_queue_sp_work(bp); 1172 } 1173 rxr->rx_next_cons = 0xffff; 1174 } 1175 1176 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1177 { 1178 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1179 u16 idx = agg_id & MAX_TPA_P5_MASK; 1180 1181 if (test_bit(idx, map->agg_idx_bmap)) 1182 idx = find_first_zero_bit(map->agg_idx_bmap, 1183 BNXT_AGG_IDX_BMAP_SIZE); 1184 __set_bit(idx, map->agg_idx_bmap); 1185 map->agg_id_tbl[agg_id] = idx; 1186 return idx; 1187 } 1188 1189 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1190 { 1191 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1192 1193 __clear_bit(idx, map->agg_idx_bmap); 1194 } 1195 1196 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1197 { 1198 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1199 1200 return map->agg_id_tbl[agg_id]; 1201 } 1202 1203 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1204 struct rx_tpa_start_cmp *tpa_start, 1205 struct rx_tpa_start_cmp_ext *tpa_start1) 1206 { 1207 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1208 struct bnxt_tpa_info *tpa_info; 1209 u16 cons, prod, agg_id; 1210 struct rx_bd *prod_bd; 1211 dma_addr_t mapping; 1212 1213 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1214 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1215 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1216 } else { 1217 agg_id = TPA_START_AGG_ID(tpa_start); 1218 } 1219 cons = tpa_start->rx_tpa_start_cmp_opaque; 1220 prod = rxr->rx_prod; 1221 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1222 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1223 tpa_info = &rxr->rx_tpa[agg_id]; 1224 1225 if (unlikely(cons != rxr->rx_next_cons || 1226 TPA_START_ERROR(tpa_start))) { 1227 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1228 cons, rxr->rx_next_cons, 1229 TPA_START_ERROR_CODE(tpa_start1)); 1230 bnxt_sched_reset(bp, rxr); 1231 return; 1232 } 1233 /* Store cfa_code in tpa_info to use in tpa_end 1234 * completion processing. 1235 */ 1236 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1237 prod_rx_buf->data = tpa_info->data; 1238 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1239 1240 mapping = tpa_info->mapping; 1241 prod_rx_buf->mapping = mapping; 1242 1243 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1244 1245 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1246 1247 tpa_info->data = cons_rx_buf->data; 1248 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1249 cons_rx_buf->data = NULL; 1250 tpa_info->mapping = cons_rx_buf->mapping; 1251 1252 tpa_info->len = 1253 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1254 RX_TPA_START_CMP_LEN_SHIFT; 1255 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1256 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1257 1258 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1259 tpa_info->gso_type = SKB_GSO_TCPV4; 1260 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1261 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1262 tpa_info->gso_type = SKB_GSO_TCPV6; 1263 tpa_info->rss_hash = 1264 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1265 } else { 1266 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1267 tpa_info->gso_type = 0; 1268 if (netif_msg_rx_err(bp)) 1269 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1270 } 1271 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1272 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1273 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1274 tpa_info->agg_count = 0; 1275 1276 rxr->rx_prod = NEXT_RX(prod); 1277 cons = NEXT_RX(cons); 1278 rxr->rx_next_cons = NEXT_RX(cons); 1279 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1280 1281 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1282 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1283 cons_rx_buf->data = NULL; 1284 } 1285 1286 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1287 { 1288 if (agg_bufs) 1289 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1290 } 1291 1292 #ifdef CONFIG_INET 1293 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1294 { 1295 struct udphdr *uh = NULL; 1296 1297 if (ip_proto == htons(ETH_P_IP)) { 1298 struct iphdr *iph = (struct iphdr *)skb->data; 1299 1300 if (iph->protocol == IPPROTO_UDP) 1301 uh = (struct udphdr *)(iph + 1); 1302 } else { 1303 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1304 1305 if (iph->nexthdr == IPPROTO_UDP) 1306 uh = (struct udphdr *)(iph + 1); 1307 } 1308 if (uh) { 1309 if (uh->check) 1310 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1311 else 1312 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1313 } 1314 } 1315 #endif 1316 1317 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1318 int payload_off, int tcp_ts, 1319 struct sk_buff *skb) 1320 { 1321 #ifdef CONFIG_INET 1322 struct tcphdr *th; 1323 int len, nw_off; 1324 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1325 u32 hdr_info = tpa_info->hdr_info; 1326 bool loopback = false; 1327 1328 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1329 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1330 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1331 1332 /* If the packet is an internal loopback packet, the offsets will 1333 * have an extra 4 bytes. 1334 */ 1335 if (inner_mac_off == 4) { 1336 loopback = true; 1337 } else if (inner_mac_off > 4) { 1338 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1339 ETH_HLEN - 2)); 1340 1341 /* We only support inner iPv4/ipv6. If we don't see the 1342 * correct protocol ID, it must be a loopback packet where 1343 * the offsets are off by 4. 1344 */ 1345 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1346 loopback = true; 1347 } 1348 if (loopback) { 1349 /* internal loopback packet, subtract all offsets by 4 */ 1350 inner_ip_off -= 4; 1351 inner_mac_off -= 4; 1352 outer_ip_off -= 4; 1353 } 1354 1355 nw_off = inner_ip_off - ETH_HLEN; 1356 skb_set_network_header(skb, nw_off); 1357 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1358 struct ipv6hdr *iph = ipv6_hdr(skb); 1359 1360 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1361 len = skb->len - skb_transport_offset(skb); 1362 th = tcp_hdr(skb); 1363 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1364 } else { 1365 struct iphdr *iph = ip_hdr(skb); 1366 1367 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1368 len = skb->len - skb_transport_offset(skb); 1369 th = tcp_hdr(skb); 1370 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1371 } 1372 1373 if (inner_mac_off) { /* tunnel */ 1374 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1375 ETH_HLEN - 2)); 1376 1377 bnxt_gro_tunnel(skb, proto); 1378 } 1379 #endif 1380 return skb; 1381 } 1382 1383 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1384 int payload_off, int tcp_ts, 1385 struct sk_buff *skb) 1386 { 1387 #ifdef CONFIG_INET 1388 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1389 u32 hdr_info = tpa_info->hdr_info; 1390 int iphdr_len, nw_off; 1391 1392 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1393 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1394 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1395 1396 nw_off = inner_ip_off - ETH_HLEN; 1397 skb_set_network_header(skb, nw_off); 1398 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1399 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1400 skb_set_transport_header(skb, nw_off + iphdr_len); 1401 1402 if (inner_mac_off) { /* tunnel */ 1403 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1404 ETH_HLEN - 2)); 1405 1406 bnxt_gro_tunnel(skb, proto); 1407 } 1408 #endif 1409 return skb; 1410 } 1411 1412 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1413 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1414 1415 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1416 int payload_off, int tcp_ts, 1417 struct sk_buff *skb) 1418 { 1419 #ifdef CONFIG_INET 1420 struct tcphdr *th; 1421 int len, nw_off, tcp_opt_len = 0; 1422 1423 if (tcp_ts) 1424 tcp_opt_len = 12; 1425 1426 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1427 struct iphdr *iph; 1428 1429 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1430 ETH_HLEN; 1431 skb_set_network_header(skb, nw_off); 1432 iph = ip_hdr(skb); 1433 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1434 len = skb->len - skb_transport_offset(skb); 1435 th = tcp_hdr(skb); 1436 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1437 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1438 struct ipv6hdr *iph; 1439 1440 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1441 ETH_HLEN; 1442 skb_set_network_header(skb, nw_off); 1443 iph = ipv6_hdr(skb); 1444 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1445 len = skb->len - skb_transport_offset(skb); 1446 th = tcp_hdr(skb); 1447 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1448 } else { 1449 dev_kfree_skb_any(skb); 1450 return NULL; 1451 } 1452 1453 if (nw_off) /* tunnel */ 1454 bnxt_gro_tunnel(skb, skb->protocol); 1455 #endif 1456 return skb; 1457 } 1458 1459 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1460 struct bnxt_tpa_info *tpa_info, 1461 struct rx_tpa_end_cmp *tpa_end, 1462 struct rx_tpa_end_cmp_ext *tpa_end1, 1463 struct sk_buff *skb) 1464 { 1465 #ifdef CONFIG_INET 1466 int payload_off; 1467 u16 segs; 1468 1469 segs = TPA_END_TPA_SEGS(tpa_end); 1470 if (segs == 1) 1471 return skb; 1472 1473 NAPI_GRO_CB(skb)->count = segs; 1474 skb_shinfo(skb)->gso_size = 1475 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1476 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1477 if (bp->flags & BNXT_FLAG_CHIP_P5) 1478 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1479 else 1480 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1481 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1482 if (likely(skb)) 1483 tcp_gro_complete(skb); 1484 #endif 1485 return skb; 1486 } 1487 1488 /* Given the cfa_code of a received packet determine which 1489 * netdev (vf-rep or PF) the packet is destined to. 1490 */ 1491 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1492 { 1493 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1494 1495 /* if vf-rep dev is NULL, the must belongs to the PF */ 1496 return dev ? dev : bp->dev; 1497 } 1498 1499 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1500 struct bnxt_cp_ring_info *cpr, 1501 u32 *raw_cons, 1502 struct rx_tpa_end_cmp *tpa_end, 1503 struct rx_tpa_end_cmp_ext *tpa_end1, 1504 u8 *event) 1505 { 1506 struct bnxt_napi *bnapi = cpr->bnapi; 1507 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1508 u8 *data_ptr, agg_bufs; 1509 unsigned int len; 1510 struct bnxt_tpa_info *tpa_info; 1511 dma_addr_t mapping; 1512 struct sk_buff *skb; 1513 u16 idx = 0, agg_id; 1514 void *data; 1515 bool gro; 1516 1517 if (unlikely(bnapi->in_reset)) { 1518 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1519 1520 if (rc < 0) 1521 return ERR_PTR(-EBUSY); 1522 return NULL; 1523 } 1524 1525 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1526 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1527 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1528 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1529 tpa_info = &rxr->rx_tpa[agg_id]; 1530 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1531 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1532 agg_bufs, tpa_info->agg_count); 1533 agg_bufs = tpa_info->agg_count; 1534 } 1535 tpa_info->agg_count = 0; 1536 *event |= BNXT_AGG_EVENT; 1537 bnxt_free_agg_idx(rxr, agg_id); 1538 idx = agg_id; 1539 gro = !!(bp->flags & BNXT_FLAG_GRO); 1540 } else { 1541 agg_id = TPA_END_AGG_ID(tpa_end); 1542 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1543 tpa_info = &rxr->rx_tpa[agg_id]; 1544 idx = RING_CMP(*raw_cons); 1545 if (agg_bufs) { 1546 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1547 return ERR_PTR(-EBUSY); 1548 1549 *event |= BNXT_AGG_EVENT; 1550 idx = NEXT_CMP(idx); 1551 } 1552 gro = !!TPA_END_GRO(tpa_end); 1553 } 1554 data = tpa_info->data; 1555 data_ptr = tpa_info->data_ptr; 1556 prefetch(data_ptr); 1557 len = tpa_info->len; 1558 mapping = tpa_info->mapping; 1559 1560 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1561 bnxt_abort_tpa(cpr, idx, agg_bufs); 1562 if (agg_bufs > MAX_SKB_FRAGS) 1563 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1564 agg_bufs, (int)MAX_SKB_FRAGS); 1565 return NULL; 1566 } 1567 1568 if (len <= bp->rx_copy_thresh) { 1569 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1570 if (!skb) { 1571 bnxt_abort_tpa(cpr, idx, agg_bufs); 1572 return NULL; 1573 } 1574 } else { 1575 u8 *new_data; 1576 dma_addr_t new_mapping; 1577 1578 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1579 if (!new_data) { 1580 bnxt_abort_tpa(cpr, idx, agg_bufs); 1581 return NULL; 1582 } 1583 1584 tpa_info->data = new_data; 1585 tpa_info->data_ptr = new_data + bp->rx_offset; 1586 tpa_info->mapping = new_mapping; 1587 1588 skb = build_skb(data, 0); 1589 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1590 bp->rx_buf_use_size, bp->rx_dir, 1591 DMA_ATTR_WEAK_ORDERING); 1592 1593 if (!skb) { 1594 kfree(data); 1595 bnxt_abort_tpa(cpr, idx, agg_bufs); 1596 return NULL; 1597 } 1598 skb_reserve(skb, bp->rx_offset); 1599 skb_put(skb, len); 1600 } 1601 1602 if (agg_bufs) { 1603 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); 1604 if (!skb) { 1605 /* Page reuse already handled by bnxt_rx_pages(). */ 1606 return NULL; 1607 } 1608 } 1609 1610 skb->protocol = 1611 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1612 1613 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1614 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1615 1616 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1617 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1618 u16 vlan_proto = tpa_info->metadata >> 1619 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1620 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1621 1622 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1623 } 1624 1625 skb_checksum_none_assert(skb); 1626 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1627 skb->ip_summed = CHECKSUM_UNNECESSARY; 1628 skb->csum_level = 1629 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1630 } 1631 1632 if (gro) 1633 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1634 1635 return skb; 1636 } 1637 1638 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1639 struct rx_agg_cmp *rx_agg) 1640 { 1641 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1642 struct bnxt_tpa_info *tpa_info; 1643 1644 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1645 tpa_info = &rxr->rx_tpa[agg_id]; 1646 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1647 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1648 } 1649 1650 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1651 struct sk_buff *skb) 1652 { 1653 if (skb->dev != bp->dev) { 1654 /* this packet belongs to a vf-rep */ 1655 bnxt_vf_rep_rx(bp, skb); 1656 return; 1657 } 1658 skb_record_rx_queue(skb, bnapi->index); 1659 napi_gro_receive(&bnapi->napi, skb); 1660 } 1661 1662 /* returns the following: 1663 * 1 - 1 packet successfully received 1664 * 0 - successful TPA_START, packet not completed yet 1665 * -EBUSY - completion ring does not have all the agg buffers yet 1666 * -ENOMEM - packet aborted due to out of memory 1667 * -EIO - packet aborted due to hw error indicated in BD 1668 */ 1669 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1670 u32 *raw_cons, u8 *event) 1671 { 1672 struct bnxt_napi *bnapi = cpr->bnapi; 1673 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1674 struct net_device *dev = bp->dev; 1675 struct rx_cmp *rxcmp; 1676 struct rx_cmp_ext *rxcmp1; 1677 u32 tmp_raw_cons = *raw_cons; 1678 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1679 struct bnxt_sw_rx_bd *rx_buf; 1680 unsigned int len; 1681 u8 *data_ptr, agg_bufs, cmp_type; 1682 dma_addr_t dma_addr; 1683 struct sk_buff *skb; 1684 void *data; 1685 int rc = 0; 1686 u32 misc; 1687 1688 rxcmp = (struct rx_cmp *) 1689 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1690 1691 cmp_type = RX_CMP_TYPE(rxcmp); 1692 1693 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1694 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1695 goto next_rx_no_prod_no_len; 1696 } 1697 1698 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1699 cp_cons = RING_CMP(tmp_raw_cons); 1700 rxcmp1 = (struct rx_cmp_ext *) 1701 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1702 1703 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1704 return -EBUSY; 1705 1706 prod = rxr->rx_prod; 1707 1708 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1709 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1710 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1711 1712 *event |= BNXT_RX_EVENT; 1713 goto next_rx_no_prod_no_len; 1714 1715 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1716 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1717 (struct rx_tpa_end_cmp *)rxcmp, 1718 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1719 1720 if (IS_ERR(skb)) 1721 return -EBUSY; 1722 1723 rc = -ENOMEM; 1724 if (likely(skb)) { 1725 bnxt_deliver_skb(bp, bnapi, skb); 1726 rc = 1; 1727 } 1728 *event |= BNXT_RX_EVENT; 1729 goto next_rx_no_prod_no_len; 1730 } 1731 1732 cons = rxcmp->rx_cmp_opaque; 1733 if (unlikely(cons != rxr->rx_next_cons)) { 1734 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); 1735 1736 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1737 cons, rxr->rx_next_cons); 1738 bnxt_sched_reset(bp, rxr); 1739 return rc1; 1740 } 1741 rx_buf = &rxr->rx_buf_ring[cons]; 1742 data = rx_buf->data; 1743 data_ptr = rx_buf->data_ptr; 1744 prefetch(data_ptr); 1745 1746 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1747 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1748 1749 if (agg_bufs) { 1750 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1751 return -EBUSY; 1752 1753 cp_cons = NEXT_CMP(cp_cons); 1754 *event |= BNXT_AGG_EVENT; 1755 } 1756 *event |= BNXT_RX_EVENT; 1757 1758 rx_buf->data = NULL; 1759 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1760 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1761 1762 bnxt_reuse_rx_data(rxr, cons, data); 1763 if (agg_bufs) 1764 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 1765 false); 1766 1767 rc = -EIO; 1768 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 1769 bnapi->cp_ring.rx_buf_errors++; 1770 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 1771 netdev_warn(bp->dev, "RX buffer error %x\n", 1772 rx_err); 1773 bnxt_sched_reset(bp, rxr); 1774 } 1775 } 1776 goto next_rx_no_len; 1777 } 1778 1779 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1780 dma_addr = rx_buf->mapping; 1781 1782 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1783 rc = 1; 1784 goto next_rx; 1785 } 1786 1787 if (len <= bp->rx_copy_thresh) { 1788 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1789 bnxt_reuse_rx_data(rxr, cons, data); 1790 if (!skb) { 1791 if (agg_bufs) 1792 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 1793 agg_bufs, false); 1794 rc = -ENOMEM; 1795 goto next_rx; 1796 } 1797 } else { 1798 u32 payload; 1799 1800 if (rx_buf->data_ptr == data_ptr) 1801 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1802 else 1803 payload = 0; 1804 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1805 payload | len); 1806 if (!skb) { 1807 rc = -ENOMEM; 1808 goto next_rx; 1809 } 1810 } 1811 1812 if (agg_bufs) { 1813 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); 1814 if (!skb) { 1815 rc = -ENOMEM; 1816 goto next_rx; 1817 } 1818 } 1819 1820 if (RX_CMP_HASH_VALID(rxcmp)) { 1821 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1822 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1823 1824 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1825 if (hash_type != 1 && hash_type != 3) 1826 type = PKT_HASH_TYPE_L3; 1827 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1828 } 1829 1830 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1831 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1832 1833 if ((rxcmp1->rx_cmp_flags2 & 1834 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1835 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1836 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1837 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1838 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1839 1840 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1841 } 1842 1843 skb_checksum_none_assert(skb); 1844 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1845 if (dev->features & NETIF_F_RXCSUM) { 1846 skb->ip_summed = CHECKSUM_UNNECESSARY; 1847 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1848 } 1849 } else { 1850 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1851 if (dev->features & NETIF_F_RXCSUM) 1852 bnapi->cp_ring.rx_l4_csum_errors++; 1853 } 1854 } 1855 1856 bnxt_deliver_skb(bp, bnapi, skb); 1857 rc = 1; 1858 1859 next_rx: 1860 cpr->rx_packets += 1; 1861 cpr->rx_bytes += len; 1862 1863 next_rx_no_len: 1864 rxr->rx_prod = NEXT_RX(prod); 1865 rxr->rx_next_cons = NEXT_RX(cons); 1866 1867 next_rx_no_prod_no_len: 1868 *raw_cons = tmp_raw_cons; 1869 1870 return rc; 1871 } 1872 1873 /* In netpoll mode, if we are using a combined completion ring, we need to 1874 * discard the rx packets and recycle the buffers. 1875 */ 1876 static int bnxt_force_rx_discard(struct bnxt *bp, 1877 struct bnxt_cp_ring_info *cpr, 1878 u32 *raw_cons, u8 *event) 1879 { 1880 u32 tmp_raw_cons = *raw_cons; 1881 struct rx_cmp_ext *rxcmp1; 1882 struct rx_cmp *rxcmp; 1883 u16 cp_cons; 1884 u8 cmp_type; 1885 1886 cp_cons = RING_CMP(tmp_raw_cons); 1887 rxcmp = (struct rx_cmp *) 1888 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1889 1890 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1891 cp_cons = RING_CMP(tmp_raw_cons); 1892 rxcmp1 = (struct rx_cmp_ext *) 1893 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1894 1895 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1896 return -EBUSY; 1897 1898 cmp_type = RX_CMP_TYPE(rxcmp); 1899 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1900 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1901 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1902 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1903 struct rx_tpa_end_cmp_ext *tpa_end1; 1904 1905 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1906 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1907 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1908 } 1909 return bnxt_rx_pkt(bp, cpr, raw_cons, event); 1910 } 1911 1912 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 1913 { 1914 struct bnxt_fw_health *fw_health = bp->fw_health; 1915 u32 reg = fw_health->regs[reg_idx]; 1916 u32 reg_type, reg_off, val = 0; 1917 1918 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 1919 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 1920 switch (reg_type) { 1921 case BNXT_FW_HEALTH_REG_TYPE_CFG: 1922 pci_read_config_dword(bp->pdev, reg_off, &val); 1923 break; 1924 case BNXT_FW_HEALTH_REG_TYPE_GRC: 1925 reg_off = fw_health->mapped_regs[reg_idx]; 1926 /* fall through */ 1927 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 1928 val = readl(bp->bar0 + reg_off); 1929 break; 1930 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 1931 val = readl(bp->bar1 + reg_off); 1932 break; 1933 } 1934 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 1935 val &= fw_health->fw_reset_inprog_reg_mask; 1936 return val; 1937 } 1938 1939 #define BNXT_GET_EVENT_PORT(data) \ 1940 ((data) & \ 1941 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1942 1943 static int bnxt_async_event_process(struct bnxt *bp, 1944 struct hwrm_async_event_cmpl *cmpl) 1945 { 1946 u16 event_id = le16_to_cpu(cmpl->event_id); 1947 1948 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1949 switch (event_id) { 1950 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1951 u32 data1 = le32_to_cpu(cmpl->event_data1); 1952 struct bnxt_link_info *link_info = &bp->link_info; 1953 1954 if (BNXT_VF(bp)) 1955 goto async_event_process_exit; 1956 1957 /* print unsupported speed warning in forced speed mode only */ 1958 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1959 (data1 & 0x20000)) { 1960 u16 fw_speed = link_info->force_link_speed; 1961 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1962 1963 if (speed != SPEED_UNKNOWN) 1964 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1965 speed); 1966 } 1967 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1968 } 1969 /* fall through */ 1970 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 1971 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 1972 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 1973 /* fall through */ 1974 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1975 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1976 break; 1977 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1978 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1979 break; 1980 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1981 u32 data1 = le32_to_cpu(cmpl->event_data1); 1982 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1983 1984 if (BNXT_VF(bp)) 1985 break; 1986 1987 if (bp->pf.port_id != port_id) 1988 break; 1989 1990 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1991 break; 1992 } 1993 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1994 if (BNXT_PF(bp)) 1995 goto async_event_process_exit; 1996 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1997 break; 1998 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 1999 u32 data1 = le32_to_cpu(cmpl->event_data1); 2000 2001 if (!bp->fw_health) 2002 goto async_event_process_exit; 2003 2004 bp->fw_reset_timestamp = jiffies; 2005 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2006 if (!bp->fw_reset_min_dsecs) 2007 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2008 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2009 if (!bp->fw_reset_max_dsecs) 2010 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2011 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2012 netdev_warn(bp->dev, "Firmware fatal reset event received\n"); 2013 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2014 } else { 2015 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n", 2016 bp->fw_reset_max_dsecs * 100); 2017 } 2018 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2019 break; 2020 } 2021 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2022 struct bnxt_fw_health *fw_health = bp->fw_health; 2023 u32 data1 = le32_to_cpu(cmpl->event_data1); 2024 2025 if (!fw_health) 2026 goto async_event_process_exit; 2027 2028 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); 2029 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2030 if (!fw_health->enabled) 2031 break; 2032 2033 if (netif_msg_drv(bp)) 2034 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n", 2035 fw_health->enabled, fw_health->master, 2036 bnxt_fw_health_readl(bp, 2037 BNXT_FW_RESET_CNT_REG), 2038 bnxt_fw_health_readl(bp, 2039 BNXT_FW_HEALTH_REG)); 2040 fw_health->tmr_multiplier = 2041 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2042 bp->current_interval * 10); 2043 fw_health->tmr_counter = fw_health->tmr_multiplier; 2044 fw_health->last_fw_heartbeat = 2045 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2046 fw_health->last_fw_reset_cnt = 2047 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2048 goto async_event_process_exit; 2049 } 2050 default: 2051 goto async_event_process_exit; 2052 } 2053 bnxt_queue_sp_work(bp); 2054 async_event_process_exit: 2055 bnxt_ulp_async_events(bp, cmpl); 2056 return 0; 2057 } 2058 2059 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2060 { 2061 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2062 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2063 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2064 (struct hwrm_fwd_req_cmpl *)txcmp; 2065 2066 switch (cmpl_type) { 2067 case CMPL_BASE_TYPE_HWRM_DONE: 2068 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2069 if (seq_id == bp->hwrm_intr_seq_id) 2070 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; 2071 else 2072 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 2073 break; 2074 2075 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2076 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2077 2078 if ((vf_id < bp->pf.first_vf_id) || 2079 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2080 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2081 vf_id); 2082 return -EINVAL; 2083 } 2084 2085 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2086 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 2087 bnxt_queue_sp_work(bp); 2088 break; 2089 2090 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2091 bnxt_async_event_process(bp, 2092 (struct hwrm_async_event_cmpl *)txcmp); 2093 2094 default: 2095 break; 2096 } 2097 2098 return 0; 2099 } 2100 2101 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2102 { 2103 struct bnxt_napi *bnapi = dev_instance; 2104 struct bnxt *bp = bnapi->bp; 2105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2106 u32 cons = RING_CMP(cpr->cp_raw_cons); 2107 2108 cpr->event_ctr++; 2109 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2110 napi_schedule(&bnapi->napi); 2111 return IRQ_HANDLED; 2112 } 2113 2114 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2115 { 2116 u32 raw_cons = cpr->cp_raw_cons; 2117 u16 cons = RING_CMP(raw_cons); 2118 struct tx_cmp *txcmp; 2119 2120 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2121 2122 return TX_CMP_VALID(txcmp, raw_cons); 2123 } 2124 2125 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2126 { 2127 struct bnxt_napi *bnapi = dev_instance; 2128 struct bnxt *bp = bnapi->bp; 2129 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2130 u32 cons = RING_CMP(cpr->cp_raw_cons); 2131 u32 int_status; 2132 2133 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2134 2135 if (!bnxt_has_work(bp, cpr)) { 2136 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2137 /* return if erroneous interrupt */ 2138 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2139 return IRQ_NONE; 2140 } 2141 2142 /* disable ring IRQ */ 2143 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2144 2145 /* Return here if interrupt is shared and is disabled. */ 2146 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2147 return IRQ_HANDLED; 2148 2149 napi_schedule(&bnapi->napi); 2150 return IRQ_HANDLED; 2151 } 2152 2153 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2154 int budget) 2155 { 2156 struct bnxt_napi *bnapi = cpr->bnapi; 2157 u32 raw_cons = cpr->cp_raw_cons; 2158 u32 cons; 2159 int tx_pkts = 0; 2160 int rx_pkts = 0; 2161 u8 event = 0; 2162 struct tx_cmp *txcmp; 2163 2164 cpr->has_more_work = 0; 2165 cpr->had_work_done = 1; 2166 while (1) { 2167 int rc; 2168 2169 cons = RING_CMP(raw_cons); 2170 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2171 2172 if (!TX_CMP_VALID(txcmp, raw_cons)) 2173 break; 2174 2175 /* The valid test of the entry must be done first before 2176 * reading any further. 2177 */ 2178 dma_rmb(); 2179 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2180 tx_pkts++; 2181 /* return full budget so NAPI will complete. */ 2182 if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 2183 rx_pkts = budget; 2184 raw_cons = NEXT_RAW_CMP(raw_cons); 2185 if (budget) 2186 cpr->has_more_work = 1; 2187 break; 2188 } 2189 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2190 if (likely(budget)) 2191 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2192 else 2193 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2194 &event); 2195 if (likely(rc >= 0)) 2196 rx_pkts += rc; 2197 /* Increment rx_pkts when rc is -ENOMEM to count towards 2198 * the NAPI budget. Otherwise, we may potentially loop 2199 * here forever if we consistently cannot allocate 2200 * buffers. 2201 */ 2202 else if (rc == -ENOMEM && budget) 2203 rx_pkts++; 2204 else if (rc == -EBUSY) /* partial completion */ 2205 break; 2206 } else if (unlikely((TX_CMP_TYPE(txcmp) == 2207 CMPL_BASE_TYPE_HWRM_DONE) || 2208 (TX_CMP_TYPE(txcmp) == 2209 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 2210 (TX_CMP_TYPE(txcmp) == 2211 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 2212 bnxt_hwrm_handler(bp, txcmp); 2213 } 2214 raw_cons = NEXT_RAW_CMP(raw_cons); 2215 2216 if (rx_pkts && rx_pkts == budget) { 2217 cpr->has_more_work = 1; 2218 break; 2219 } 2220 } 2221 2222 if (event & BNXT_REDIRECT_EVENT) 2223 xdp_do_flush_map(); 2224 2225 if (event & BNXT_TX_EVENT) { 2226 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 2227 u16 prod = txr->tx_prod; 2228 2229 /* Sync BD data before updating doorbell */ 2230 wmb(); 2231 2232 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2233 } 2234 2235 cpr->cp_raw_cons = raw_cons; 2236 bnapi->tx_pkts += tx_pkts; 2237 bnapi->events |= event; 2238 return rx_pkts; 2239 } 2240 2241 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) 2242 { 2243 if (bnapi->tx_pkts) { 2244 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); 2245 bnapi->tx_pkts = 0; 2246 } 2247 2248 if (bnapi->events & BNXT_RX_EVENT) { 2249 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2250 2251 if (bnapi->events & BNXT_AGG_EVENT) 2252 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2253 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2254 } 2255 bnapi->events = 0; 2256 } 2257 2258 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2259 int budget) 2260 { 2261 struct bnxt_napi *bnapi = cpr->bnapi; 2262 int rx_pkts; 2263 2264 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2265 2266 /* ACK completion ring before freeing tx ring and producing new 2267 * buffers in rx/agg rings to prevent overflowing the completion 2268 * ring. 2269 */ 2270 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2271 2272 __bnxt_poll_work_done(bp, bnapi); 2273 return rx_pkts; 2274 } 2275 2276 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2277 { 2278 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2279 struct bnxt *bp = bnapi->bp; 2280 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2281 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2282 struct tx_cmp *txcmp; 2283 struct rx_cmp_ext *rxcmp1; 2284 u32 cp_cons, tmp_raw_cons; 2285 u32 raw_cons = cpr->cp_raw_cons; 2286 u32 rx_pkts = 0; 2287 u8 event = 0; 2288 2289 while (1) { 2290 int rc; 2291 2292 cp_cons = RING_CMP(raw_cons); 2293 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2294 2295 if (!TX_CMP_VALID(txcmp, raw_cons)) 2296 break; 2297 2298 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2299 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2300 cp_cons = RING_CMP(tmp_raw_cons); 2301 rxcmp1 = (struct rx_cmp_ext *) 2302 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2303 2304 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2305 break; 2306 2307 /* force an error to recycle the buffer */ 2308 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2309 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2310 2311 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2312 if (likely(rc == -EIO) && budget) 2313 rx_pkts++; 2314 else if (rc == -EBUSY) /* partial completion */ 2315 break; 2316 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2317 CMPL_BASE_TYPE_HWRM_DONE)) { 2318 bnxt_hwrm_handler(bp, txcmp); 2319 } else { 2320 netdev_err(bp->dev, 2321 "Invalid completion received on special ring\n"); 2322 } 2323 raw_cons = NEXT_RAW_CMP(raw_cons); 2324 2325 if (rx_pkts == budget) 2326 break; 2327 } 2328 2329 cpr->cp_raw_cons = raw_cons; 2330 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2331 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2332 2333 if (event & BNXT_AGG_EVENT) 2334 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2335 2336 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2337 napi_complete_done(napi, rx_pkts); 2338 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2339 } 2340 return rx_pkts; 2341 } 2342 2343 static int bnxt_poll(struct napi_struct *napi, int budget) 2344 { 2345 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2346 struct bnxt *bp = bnapi->bp; 2347 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2348 int work_done = 0; 2349 2350 while (1) { 2351 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2352 2353 if (work_done >= budget) { 2354 if (!budget) 2355 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2356 break; 2357 } 2358 2359 if (!bnxt_has_work(bp, cpr)) { 2360 if (napi_complete_done(napi, work_done)) 2361 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2362 break; 2363 } 2364 } 2365 if (bp->flags & BNXT_FLAG_DIM) { 2366 struct dim_sample dim_sample = {}; 2367 2368 dim_update_sample(cpr->event_ctr, 2369 cpr->rx_packets, 2370 cpr->rx_bytes, 2371 &dim_sample); 2372 net_dim(&cpr->dim, dim_sample); 2373 } 2374 return work_done; 2375 } 2376 2377 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2378 { 2379 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2380 int i, work_done = 0; 2381 2382 for (i = 0; i < 2; i++) { 2383 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2384 2385 if (cpr2) { 2386 work_done += __bnxt_poll_work(bp, cpr2, 2387 budget - work_done); 2388 cpr->has_more_work |= cpr2->has_more_work; 2389 } 2390 } 2391 return work_done; 2392 } 2393 2394 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2395 u64 dbr_type) 2396 { 2397 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2398 int i; 2399 2400 for (i = 0; i < 2; i++) { 2401 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2402 struct bnxt_db_info *db; 2403 2404 if (cpr2 && cpr2->had_work_done) { 2405 db = &cpr2->cp_db; 2406 writeq(db->db_key64 | dbr_type | 2407 RING_CMP(cpr2->cp_raw_cons), db->doorbell); 2408 cpr2->had_work_done = 0; 2409 } 2410 } 2411 __bnxt_poll_work_done(bp, bnapi); 2412 } 2413 2414 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2415 { 2416 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2417 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2418 u32 raw_cons = cpr->cp_raw_cons; 2419 struct bnxt *bp = bnapi->bp; 2420 struct nqe_cn *nqcmp; 2421 int work_done = 0; 2422 u32 cons; 2423 2424 if (cpr->has_more_work) { 2425 cpr->has_more_work = 0; 2426 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2427 } 2428 while (1) { 2429 cons = RING_CMP(raw_cons); 2430 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2431 2432 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2433 if (cpr->has_more_work) 2434 break; 2435 2436 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); 2437 cpr->cp_raw_cons = raw_cons; 2438 if (napi_complete_done(napi, work_done)) 2439 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2440 cpr->cp_raw_cons); 2441 return work_done; 2442 } 2443 2444 /* The valid test of the entry must be done first before 2445 * reading any further. 2446 */ 2447 dma_rmb(); 2448 2449 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2450 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2451 struct bnxt_cp_ring_info *cpr2; 2452 2453 cpr2 = cpr->cp_ring_arr[idx]; 2454 work_done += __bnxt_poll_work(bp, cpr2, 2455 budget - work_done); 2456 cpr->has_more_work |= cpr2->has_more_work; 2457 } else { 2458 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2459 } 2460 raw_cons = NEXT_RAW_CMP(raw_cons); 2461 } 2462 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); 2463 if (raw_cons != cpr->cp_raw_cons) { 2464 cpr->cp_raw_cons = raw_cons; 2465 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 2466 } 2467 return work_done; 2468 } 2469 2470 static void bnxt_free_tx_skbs(struct bnxt *bp) 2471 { 2472 int i, max_idx; 2473 struct pci_dev *pdev = bp->pdev; 2474 2475 if (!bp->tx_ring) 2476 return; 2477 2478 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2479 for (i = 0; i < bp->tx_nr_rings; i++) { 2480 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2481 int j; 2482 2483 for (j = 0; j < max_idx;) { 2484 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2485 struct sk_buff *skb; 2486 int k, last; 2487 2488 if (i < bp->tx_nr_rings_xdp && 2489 tx_buf->action == XDP_REDIRECT) { 2490 dma_unmap_single(&pdev->dev, 2491 dma_unmap_addr(tx_buf, mapping), 2492 dma_unmap_len(tx_buf, len), 2493 PCI_DMA_TODEVICE); 2494 xdp_return_frame(tx_buf->xdpf); 2495 tx_buf->action = 0; 2496 tx_buf->xdpf = NULL; 2497 j++; 2498 continue; 2499 } 2500 2501 skb = tx_buf->skb; 2502 if (!skb) { 2503 j++; 2504 continue; 2505 } 2506 2507 tx_buf->skb = NULL; 2508 2509 if (tx_buf->is_push) { 2510 dev_kfree_skb(skb); 2511 j += 2; 2512 continue; 2513 } 2514 2515 dma_unmap_single(&pdev->dev, 2516 dma_unmap_addr(tx_buf, mapping), 2517 skb_headlen(skb), 2518 PCI_DMA_TODEVICE); 2519 2520 last = tx_buf->nr_frags; 2521 j += 2; 2522 for (k = 0; k < last; k++, j++) { 2523 int ring_idx = j & bp->tx_ring_mask; 2524 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2525 2526 tx_buf = &txr->tx_buf_ring[ring_idx]; 2527 dma_unmap_page( 2528 &pdev->dev, 2529 dma_unmap_addr(tx_buf, mapping), 2530 skb_frag_size(frag), PCI_DMA_TODEVICE); 2531 } 2532 dev_kfree_skb(skb); 2533 } 2534 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2535 } 2536 } 2537 2538 static void bnxt_free_rx_skbs(struct bnxt *bp) 2539 { 2540 int i, max_idx, max_agg_idx; 2541 struct pci_dev *pdev = bp->pdev; 2542 2543 if (!bp->rx_ring) 2544 return; 2545 2546 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2547 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2548 for (i = 0; i < bp->rx_nr_rings; i++) { 2549 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2550 struct bnxt_tpa_idx_map *map; 2551 int j; 2552 2553 if (rxr->rx_tpa) { 2554 for (j = 0; j < bp->max_tpa; j++) { 2555 struct bnxt_tpa_info *tpa_info = 2556 &rxr->rx_tpa[j]; 2557 u8 *data = tpa_info->data; 2558 2559 if (!data) 2560 continue; 2561 2562 dma_unmap_single_attrs(&pdev->dev, 2563 tpa_info->mapping, 2564 bp->rx_buf_use_size, 2565 bp->rx_dir, 2566 DMA_ATTR_WEAK_ORDERING); 2567 2568 tpa_info->data = NULL; 2569 2570 kfree(data); 2571 } 2572 } 2573 2574 for (j = 0; j < max_idx; j++) { 2575 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2576 dma_addr_t mapping = rx_buf->mapping; 2577 void *data = rx_buf->data; 2578 2579 if (!data) 2580 continue; 2581 2582 rx_buf->data = NULL; 2583 2584 if (BNXT_RX_PAGE_MODE(bp)) { 2585 mapping -= bp->rx_dma_offset; 2586 dma_unmap_page_attrs(&pdev->dev, mapping, 2587 PAGE_SIZE, bp->rx_dir, 2588 DMA_ATTR_WEAK_ORDERING); 2589 page_pool_recycle_direct(rxr->page_pool, data); 2590 } else { 2591 dma_unmap_single_attrs(&pdev->dev, mapping, 2592 bp->rx_buf_use_size, 2593 bp->rx_dir, 2594 DMA_ATTR_WEAK_ORDERING); 2595 kfree(data); 2596 } 2597 } 2598 2599 for (j = 0; j < max_agg_idx; j++) { 2600 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2601 &rxr->rx_agg_ring[j]; 2602 struct page *page = rx_agg_buf->page; 2603 2604 if (!page) 2605 continue; 2606 2607 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2608 BNXT_RX_PAGE_SIZE, 2609 PCI_DMA_FROMDEVICE, 2610 DMA_ATTR_WEAK_ORDERING); 2611 2612 rx_agg_buf->page = NULL; 2613 __clear_bit(j, rxr->rx_agg_bmap); 2614 2615 __free_page(page); 2616 } 2617 if (rxr->rx_page) { 2618 __free_page(rxr->rx_page); 2619 rxr->rx_page = NULL; 2620 } 2621 map = rxr->rx_tpa_idx_map; 2622 if (map) 2623 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 2624 } 2625 } 2626 2627 static void bnxt_free_skbs(struct bnxt *bp) 2628 { 2629 bnxt_free_tx_skbs(bp); 2630 bnxt_free_rx_skbs(bp); 2631 } 2632 2633 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2634 { 2635 struct pci_dev *pdev = bp->pdev; 2636 int i; 2637 2638 for (i = 0; i < rmem->nr_pages; i++) { 2639 if (!rmem->pg_arr[i]) 2640 continue; 2641 2642 dma_free_coherent(&pdev->dev, rmem->page_size, 2643 rmem->pg_arr[i], rmem->dma_arr[i]); 2644 2645 rmem->pg_arr[i] = NULL; 2646 } 2647 if (rmem->pg_tbl) { 2648 size_t pg_tbl_size = rmem->nr_pages * 8; 2649 2650 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2651 pg_tbl_size = rmem->page_size; 2652 dma_free_coherent(&pdev->dev, pg_tbl_size, 2653 rmem->pg_tbl, rmem->pg_tbl_map); 2654 rmem->pg_tbl = NULL; 2655 } 2656 if (rmem->vmem_size && *rmem->vmem) { 2657 vfree(*rmem->vmem); 2658 *rmem->vmem = NULL; 2659 } 2660 } 2661 2662 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2663 { 2664 struct pci_dev *pdev = bp->pdev; 2665 u64 valid_bit = 0; 2666 int i; 2667 2668 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 2669 valid_bit = PTU_PTE_VALID; 2670 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 2671 size_t pg_tbl_size = rmem->nr_pages * 8; 2672 2673 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2674 pg_tbl_size = rmem->page_size; 2675 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 2676 &rmem->pg_tbl_map, 2677 GFP_KERNEL); 2678 if (!rmem->pg_tbl) 2679 return -ENOMEM; 2680 } 2681 2682 for (i = 0; i < rmem->nr_pages; i++) { 2683 u64 extra_bits = valid_bit; 2684 2685 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2686 rmem->page_size, 2687 &rmem->dma_arr[i], 2688 GFP_KERNEL); 2689 if (!rmem->pg_arr[i]) 2690 return -ENOMEM; 2691 2692 if (rmem->init_val) 2693 memset(rmem->pg_arr[i], rmem->init_val, 2694 rmem->page_size); 2695 if (rmem->nr_pages > 1 || rmem->depth > 0) { 2696 if (i == rmem->nr_pages - 2 && 2697 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2698 extra_bits |= PTU_PTE_NEXT_TO_LAST; 2699 else if (i == rmem->nr_pages - 1 && 2700 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2701 extra_bits |= PTU_PTE_LAST; 2702 rmem->pg_tbl[i] = 2703 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 2704 } 2705 } 2706 2707 if (rmem->vmem_size) { 2708 *rmem->vmem = vzalloc(rmem->vmem_size); 2709 if (!(*rmem->vmem)) 2710 return -ENOMEM; 2711 } 2712 return 0; 2713 } 2714 2715 static void bnxt_free_tpa_info(struct bnxt *bp) 2716 { 2717 int i; 2718 2719 for (i = 0; i < bp->rx_nr_rings; i++) { 2720 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2721 2722 kfree(rxr->rx_tpa_idx_map); 2723 rxr->rx_tpa_idx_map = NULL; 2724 if (rxr->rx_tpa) { 2725 kfree(rxr->rx_tpa[0].agg_arr); 2726 rxr->rx_tpa[0].agg_arr = NULL; 2727 } 2728 kfree(rxr->rx_tpa); 2729 rxr->rx_tpa = NULL; 2730 } 2731 } 2732 2733 static int bnxt_alloc_tpa_info(struct bnxt *bp) 2734 { 2735 int i, j, total_aggs = 0; 2736 2737 bp->max_tpa = MAX_TPA; 2738 if (bp->flags & BNXT_FLAG_CHIP_P5) { 2739 if (!bp->max_tpa_v2) 2740 return 0; 2741 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 2742 total_aggs = bp->max_tpa * MAX_SKB_FRAGS; 2743 } 2744 2745 for (i = 0; i < bp->rx_nr_rings; i++) { 2746 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2747 struct rx_agg_cmp *agg; 2748 2749 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 2750 GFP_KERNEL); 2751 if (!rxr->rx_tpa) 2752 return -ENOMEM; 2753 2754 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 2755 continue; 2756 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); 2757 rxr->rx_tpa[0].agg_arr = agg; 2758 if (!agg) 2759 return -ENOMEM; 2760 for (j = 1; j < bp->max_tpa; j++) 2761 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; 2762 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 2763 GFP_KERNEL); 2764 if (!rxr->rx_tpa_idx_map) 2765 return -ENOMEM; 2766 } 2767 return 0; 2768 } 2769 2770 static void bnxt_free_rx_rings(struct bnxt *bp) 2771 { 2772 int i; 2773 2774 if (!bp->rx_ring) 2775 return; 2776 2777 bnxt_free_tpa_info(bp); 2778 for (i = 0; i < bp->rx_nr_rings; i++) { 2779 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2780 struct bnxt_ring_struct *ring; 2781 2782 if (rxr->xdp_prog) 2783 bpf_prog_put(rxr->xdp_prog); 2784 2785 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2786 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2787 2788 page_pool_destroy(rxr->page_pool); 2789 rxr->page_pool = NULL; 2790 2791 kfree(rxr->rx_agg_bmap); 2792 rxr->rx_agg_bmap = NULL; 2793 2794 ring = &rxr->rx_ring_struct; 2795 bnxt_free_ring(bp, &ring->ring_mem); 2796 2797 ring = &rxr->rx_agg_ring_struct; 2798 bnxt_free_ring(bp, &ring->ring_mem); 2799 } 2800 } 2801 2802 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 2803 struct bnxt_rx_ring_info *rxr) 2804 { 2805 struct page_pool_params pp = { 0 }; 2806 2807 pp.pool_size = bp->rx_ring_size; 2808 pp.nid = dev_to_node(&bp->pdev->dev); 2809 pp.dev = &bp->pdev->dev; 2810 pp.dma_dir = DMA_BIDIRECTIONAL; 2811 2812 rxr->page_pool = page_pool_create(&pp); 2813 if (IS_ERR(rxr->page_pool)) { 2814 int err = PTR_ERR(rxr->page_pool); 2815 2816 rxr->page_pool = NULL; 2817 return err; 2818 } 2819 return 0; 2820 } 2821 2822 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2823 { 2824 int i, rc = 0, agg_rings = 0; 2825 2826 if (!bp->rx_ring) 2827 return -ENOMEM; 2828 2829 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2830 agg_rings = 1; 2831 2832 for (i = 0; i < bp->rx_nr_rings; i++) { 2833 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2834 struct bnxt_ring_struct *ring; 2835 2836 ring = &rxr->rx_ring_struct; 2837 2838 rc = bnxt_alloc_rx_page_pool(bp, rxr); 2839 if (rc) 2840 return rc; 2841 2842 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); 2843 if (rc < 0) 2844 return rc; 2845 2846 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 2847 MEM_TYPE_PAGE_POOL, 2848 rxr->page_pool); 2849 if (rc) { 2850 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2851 return rc; 2852 } 2853 2854 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2855 if (rc) 2856 return rc; 2857 2858 ring->grp_idx = i; 2859 if (agg_rings) { 2860 u16 mem_size; 2861 2862 ring = &rxr->rx_agg_ring_struct; 2863 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2864 if (rc) 2865 return rc; 2866 2867 ring->grp_idx = i; 2868 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2869 mem_size = rxr->rx_agg_bmap_size / 8; 2870 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2871 if (!rxr->rx_agg_bmap) 2872 return -ENOMEM; 2873 } 2874 } 2875 if (bp->flags & BNXT_FLAG_TPA) 2876 rc = bnxt_alloc_tpa_info(bp); 2877 return rc; 2878 } 2879 2880 static void bnxt_free_tx_rings(struct bnxt *bp) 2881 { 2882 int i; 2883 struct pci_dev *pdev = bp->pdev; 2884 2885 if (!bp->tx_ring) 2886 return; 2887 2888 for (i = 0; i < bp->tx_nr_rings; i++) { 2889 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2890 struct bnxt_ring_struct *ring; 2891 2892 if (txr->tx_push) { 2893 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2894 txr->tx_push, txr->tx_push_mapping); 2895 txr->tx_push = NULL; 2896 } 2897 2898 ring = &txr->tx_ring_struct; 2899 2900 bnxt_free_ring(bp, &ring->ring_mem); 2901 } 2902 } 2903 2904 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2905 { 2906 int i, j, rc; 2907 struct pci_dev *pdev = bp->pdev; 2908 2909 bp->tx_push_size = 0; 2910 if (bp->tx_push_thresh) { 2911 int push_size; 2912 2913 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2914 bp->tx_push_thresh); 2915 2916 if (push_size > 256) { 2917 push_size = 0; 2918 bp->tx_push_thresh = 0; 2919 } 2920 2921 bp->tx_push_size = push_size; 2922 } 2923 2924 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2925 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2926 struct bnxt_ring_struct *ring; 2927 u8 qidx; 2928 2929 ring = &txr->tx_ring_struct; 2930 2931 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2932 if (rc) 2933 return rc; 2934 2935 ring->grp_idx = txr->bnapi->index; 2936 if (bp->tx_push_size) { 2937 dma_addr_t mapping; 2938 2939 /* One pre-allocated DMA buffer to backup 2940 * TX push operation 2941 */ 2942 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2943 bp->tx_push_size, 2944 &txr->tx_push_mapping, 2945 GFP_KERNEL); 2946 2947 if (!txr->tx_push) 2948 return -ENOMEM; 2949 2950 mapping = txr->tx_push_mapping + 2951 sizeof(struct tx_push_bd); 2952 txr->data_mapping = cpu_to_le64(mapping); 2953 } 2954 qidx = bp->tc_to_qidx[j]; 2955 ring->queue_id = bp->q_info[qidx].queue_id; 2956 if (i < bp->tx_nr_rings_xdp) 2957 continue; 2958 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2959 j++; 2960 } 2961 return 0; 2962 } 2963 2964 static void bnxt_free_cp_rings(struct bnxt *bp) 2965 { 2966 int i; 2967 2968 if (!bp->bnapi) 2969 return; 2970 2971 for (i = 0; i < bp->cp_nr_rings; i++) { 2972 struct bnxt_napi *bnapi = bp->bnapi[i]; 2973 struct bnxt_cp_ring_info *cpr; 2974 struct bnxt_ring_struct *ring; 2975 int j; 2976 2977 if (!bnapi) 2978 continue; 2979 2980 cpr = &bnapi->cp_ring; 2981 ring = &cpr->cp_ring_struct; 2982 2983 bnxt_free_ring(bp, &ring->ring_mem); 2984 2985 for (j = 0; j < 2; j++) { 2986 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 2987 2988 if (cpr2) { 2989 ring = &cpr2->cp_ring_struct; 2990 bnxt_free_ring(bp, &ring->ring_mem); 2991 kfree(cpr2); 2992 cpr->cp_ring_arr[j] = NULL; 2993 } 2994 } 2995 } 2996 } 2997 2998 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) 2999 { 3000 struct bnxt_ring_mem_info *rmem; 3001 struct bnxt_ring_struct *ring; 3002 struct bnxt_cp_ring_info *cpr; 3003 int rc; 3004 3005 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); 3006 if (!cpr) 3007 return NULL; 3008 3009 ring = &cpr->cp_ring_struct; 3010 rmem = &ring->ring_mem; 3011 rmem->nr_pages = bp->cp_nr_pages; 3012 rmem->page_size = HW_CMPD_RING_SIZE; 3013 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3014 rmem->dma_arr = cpr->cp_desc_mapping; 3015 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3016 rc = bnxt_alloc_ring(bp, rmem); 3017 if (rc) { 3018 bnxt_free_ring(bp, rmem); 3019 kfree(cpr); 3020 cpr = NULL; 3021 } 3022 return cpr; 3023 } 3024 3025 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3026 { 3027 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3028 int i, rc, ulp_base_vec, ulp_msix; 3029 3030 ulp_msix = bnxt_get_ulp_msix_num(bp); 3031 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3032 for (i = 0; i < bp->cp_nr_rings; i++) { 3033 struct bnxt_napi *bnapi = bp->bnapi[i]; 3034 struct bnxt_cp_ring_info *cpr; 3035 struct bnxt_ring_struct *ring; 3036 3037 if (!bnapi) 3038 continue; 3039 3040 cpr = &bnapi->cp_ring; 3041 cpr->bnapi = bnapi; 3042 ring = &cpr->cp_ring_struct; 3043 3044 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3045 if (rc) 3046 return rc; 3047 3048 if (ulp_msix && i >= ulp_base_vec) 3049 ring->map_idx = i + ulp_msix; 3050 else 3051 ring->map_idx = i; 3052 3053 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 3054 continue; 3055 3056 if (i < bp->rx_nr_rings) { 3057 struct bnxt_cp_ring_info *cpr2 = 3058 bnxt_alloc_cp_sub_ring(bp); 3059 3060 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; 3061 if (!cpr2) 3062 return -ENOMEM; 3063 cpr2->bnapi = bnapi; 3064 } 3065 if ((sh && i < bp->tx_nr_rings) || 3066 (!sh && i >= bp->rx_nr_rings)) { 3067 struct bnxt_cp_ring_info *cpr2 = 3068 bnxt_alloc_cp_sub_ring(bp); 3069 3070 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; 3071 if (!cpr2) 3072 return -ENOMEM; 3073 cpr2->bnapi = bnapi; 3074 } 3075 } 3076 return 0; 3077 } 3078 3079 static void bnxt_init_ring_struct(struct bnxt *bp) 3080 { 3081 int i; 3082 3083 for (i = 0; i < bp->cp_nr_rings; i++) { 3084 struct bnxt_napi *bnapi = bp->bnapi[i]; 3085 struct bnxt_ring_mem_info *rmem; 3086 struct bnxt_cp_ring_info *cpr; 3087 struct bnxt_rx_ring_info *rxr; 3088 struct bnxt_tx_ring_info *txr; 3089 struct bnxt_ring_struct *ring; 3090 3091 if (!bnapi) 3092 continue; 3093 3094 cpr = &bnapi->cp_ring; 3095 ring = &cpr->cp_ring_struct; 3096 rmem = &ring->ring_mem; 3097 rmem->nr_pages = bp->cp_nr_pages; 3098 rmem->page_size = HW_CMPD_RING_SIZE; 3099 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3100 rmem->dma_arr = cpr->cp_desc_mapping; 3101 rmem->vmem_size = 0; 3102 3103 rxr = bnapi->rx_ring; 3104 if (!rxr) 3105 goto skip_rx; 3106 3107 ring = &rxr->rx_ring_struct; 3108 rmem = &ring->ring_mem; 3109 rmem->nr_pages = bp->rx_nr_pages; 3110 rmem->page_size = HW_RXBD_RING_SIZE; 3111 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3112 rmem->dma_arr = rxr->rx_desc_mapping; 3113 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3114 rmem->vmem = (void **)&rxr->rx_buf_ring; 3115 3116 ring = &rxr->rx_agg_ring_struct; 3117 rmem = &ring->ring_mem; 3118 rmem->nr_pages = bp->rx_agg_nr_pages; 3119 rmem->page_size = HW_RXBD_RING_SIZE; 3120 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3121 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3122 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3123 rmem->vmem = (void **)&rxr->rx_agg_ring; 3124 3125 skip_rx: 3126 txr = bnapi->tx_ring; 3127 if (!txr) 3128 continue; 3129 3130 ring = &txr->tx_ring_struct; 3131 rmem = &ring->ring_mem; 3132 rmem->nr_pages = bp->tx_nr_pages; 3133 rmem->page_size = HW_RXBD_RING_SIZE; 3134 rmem->pg_arr = (void **)txr->tx_desc_ring; 3135 rmem->dma_arr = txr->tx_desc_mapping; 3136 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3137 rmem->vmem = (void **)&txr->tx_buf_ring; 3138 } 3139 } 3140 3141 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3142 { 3143 int i; 3144 u32 prod; 3145 struct rx_bd **rx_buf_ring; 3146 3147 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3148 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3149 int j; 3150 struct rx_bd *rxbd; 3151 3152 rxbd = rx_buf_ring[i]; 3153 if (!rxbd) 3154 continue; 3155 3156 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3157 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3158 rxbd->rx_bd_opaque = prod; 3159 } 3160 } 3161 } 3162 3163 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3164 { 3165 struct net_device *dev = bp->dev; 3166 struct bnxt_rx_ring_info *rxr; 3167 struct bnxt_ring_struct *ring; 3168 u32 prod, type; 3169 int i; 3170 3171 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 3172 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 3173 3174 if (NET_IP_ALIGN == 2) 3175 type |= RX_BD_FLAGS_SOP; 3176 3177 rxr = &bp->rx_ring[ring_nr]; 3178 ring = &rxr->rx_ring_struct; 3179 bnxt_init_rxbd_pages(ring, type); 3180 3181 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 3182 bpf_prog_add(bp->xdp_prog, 1); 3183 rxr->xdp_prog = bp->xdp_prog; 3184 } 3185 prod = rxr->rx_prod; 3186 for (i = 0; i < bp->rx_ring_size; i++) { 3187 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 3188 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3189 ring_nr, i, bp->rx_ring_size); 3190 break; 3191 } 3192 prod = NEXT_RX(prod); 3193 } 3194 rxr->rx_prod = prod; 3195 ring->fw_ring_id = INVALID_HW_RING_ID; 3196 3197 ring = &rxr->rx_agg_ring_struct; 3198 ring->fw_ring_id = INVALID_HW_RING_ID; 3199 3200 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3201 return 0; 3202 3203 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 3204 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 3205 3206 bnxt_init_rxbd_pages(ring, type); 3207 3208 prod = rxr->rx_agg_prod; 3209 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3210 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 3211 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3212 ring_nr, i, bp->rx_ring_size); 3213 break; 3214 } 3215 prod = NEXT_RX_AGG(prod); 3216 } 3217 rxr->rx_agg_prod = prod; 3218 3219 if (bp->flags & BNXT_FLAG_TPA) { 3220 if (rxr->rx_tpa) { 3221 u8 *data; 3222 dma_addr_t mapping; 3223 3224 for (i = 0; i < bp->max_tpa; i++) { 3225 data = __bnxt_alloc_rx_data(bp, &mapping, 3226 GFP_KERNEL); 3227 if (!data) 3228 return -ENOMEM; 3229 3230 rxr->rx_tpa[i].data = data; 3231 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3232 rxr->rx_tpa[i].mapping = mapping; 3233 } 3234 } else { 3235 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 3236 return -ENOMEM; 3237 } 3238 } 3239 3240 return 0; 3241 } 3242 3243 static void bnxt_init_cp_rings(struct bnxt *bp) 3244 { 3245 int i, j; 3246 3247 for (i = 0; i < bp->cp_nr_rings; i++) { 3248 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 3249 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3250 3251 ring->fw_ring_id = INVALID_HW_RING_ID; 3252 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3253 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3254 for (j = 0; j < 2; j++) { 3255 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3256 3257 if (!cpr2) 3258 continue; 3259 3260 ring = &cpr2->cp_ring_struct; 3261 ring->fw_ring_id = INVALID_HW_RING_ID; 3262 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3263 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3264 } 3265 } 3266 } 3267 3268 static int bnxt_init_rx_rings(struct bnxt *bp) 3269 { 3270 int i, rc = 0; 3271 3272 if (BNXT_RX_PAGE_MODE(bp)) { 3273 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 3274 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 3275 } else { 3276 bp->rx_offset = BNXT_RX_OFFSET; 3277 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 3278 } 3279 3280 for (i = 0; i < bp->rx_nr_rings; i++) { 3281 rc = bnxt_init_one_rx_ring(bp, i); 3282 if (rc) 3283 break; 3284 } 3285 3286 return rc; 3287 } 3288 3289 static int bnxt_init_tx_rings(struct bnxt *bp) 3290 { 3291 u16 i; 3292 3293 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3294 MAX_SKB_FRAGS + 1); 3295 3296 for (i = 0; i < bp->tx_nr_rings; i++) { 3297 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3298 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3299 3300 ring->fw_ring_id = INVALID_HW_RING_ID; 3301 } 3302 3303 return 0; 3304 } 3305 3306 static void bnxt_free_ring_grps(struct bnxt *bp) 3307 { 3308 kfree(bp->grp_info); 3309 bp->grp_info = NULL; 3310 } 3311 3312 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 3313 { 3314 int i; 3315 3316 if (irq_re_init) { 3317 bp->grp_info = kcalloc(bp->cp_nr_rings, 3318 sizeof(struct bnxt_ring_grp_info), 3319 GFP_KERNEL); 3320 if (!bp->grp_info) 3321 return -ENOMEM; 3322 } 3323 for (i = 0; i < bp->cp_nr_rings; i++) { 3324 if (irq_re_init) 3325 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 3326 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3327 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 3328 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 3329 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3330 } 3331 return 0; 3332 } 3333 3334 static void bnxt_free_vnics(struct bnxt *bp) 3335 { 3336 kfree(bp->vnic_info); 3337 bp->vnic_info = NULL; 3338 bp->nr_vnics = 0; 3339 } 3340 3341 static int bnxt_alloc_vnics(struct bnxt *bp) 3342 { 3343 int num_vnics = 1; 3344 3345 #ifdef CONFIG_RFS_ACCEL 3346 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 3347 num_vnics += bp->rx_nr_rings; 3348 #endif 3349 3350 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3351 num_vnics++; 3352 3353 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 3354 GFP_KERNEL); 3355 if (!bp->vnic_info) 3356 return -ENOMEM; 3357 3358 bp->nr_vnics = num_vnics; 3359 return 0; 3360 } 3361 3362 static void bnxt_init_vnics(struct bnxt *bp) 3363 { 3364 int i; 3365 3366 for (i = 0; i < bp->nr_vnics; i++) { 3367 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3368 int j; 3369 3370 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3371 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3372 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3373 3374 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3375 3376 if (bp->vnic_info[i].rss_hash_key) { 3377 if (i == 0) 3378 prandom_bytes(vnic->rss_hash_key, 3379 HW_HASH_KEY_SIZE); 3380 else 3381 memcpy(vnic->rss_hash_key, 3382 bp->vnic_info[0].rss_hash_key, 3383 HW_HASH_KEY_SIZE); 3384 } 3385 } 3386 } 3387 3388 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 3389 { 3390 int pages; 3391 3392 pages = ring_size / desc_per_pg; 3393 3394 if (!pages) 3395 return 1; 3396 3397 pages++; 3398 3399 while (pages & (pages - 1)) 3400 pages++; 3401 3402 return pages; 3403 } 3404 3405 void bnxt_set_tpa_flags(struct bnxt *bp) 3406 { 3407 bp->flags &= ~BNXT_FLAG_TPA; 3408 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 3409 return; 3410 if (bp->dev->features & NETIF_F_LRO) 3411 bp->flags |= BNXT_FLAG_LRO; 3412 else if (bp->dev->features & NETIF_F_GRO_HW) 3413 bp->flags |= BNXT_FLAG_GRO; 3414 } 3415 3416 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 3417 * be set on entry. 3418 */ 3419 void bnxt_set_ring_params(struct bnxt *bp) 3420 { 3421 u32 ring_size, rx_size, rx_space; 3422 u32 agg_factor = 0, agg_ring_size = 0; 3423 3424 /* 8 for CRC and VLAN */ 3425 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 3426 3427 rx_space = rx_size + NET_SKB_PAD + 3428 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3429 3430 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 3431 ring_size = bp->rx_ring_size; 3432 bp->rx_agg_ring_size = 0; 3433 bp->rx_agg_nr_pages = 0; 3434 3435 if (bp->flags & BNXT_FLAG_TPA) 3436 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 3437 3438 bp->flags &= ~BNXT_FLAG_JUMBO; 3439 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 3440 u32 jumbo_factor; 3441 3442 bp->flags |= BNXT_FLAG_JUMBO; 3443 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 3444 if (jumbo_factor > agg_factor) 3445 agg_factor = jumbo_factor; 3446 } 3447 agg_ring_size = ring_size * agg_factor; 3448 3449 if (agg_ring_size) { 3450 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 3451 RX_DESC_CNT); 3452 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 3453 u32 tmp = agg_ring_size; 3454 3455 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 3456 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 3457 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 3458 tmp, agg_ring_size); 3459 } 3460 bp->rx_agg_ring_size = agg_ring_size; 3461 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 3462 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 3463 rx_space = rx_size + NET_SKB_PAD + 3464 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3465 } 3466 3467 bp->rx_buf_use_size = rx_size; 3468 bp->rx_buf_size = rx_space; 3469 3470 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 3471 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 3472 3473 ring_size = bp->tx_ring_size; 3474 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3475 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3476 3477 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 3478 bp->cp_ring_size = ring_size; 3479 3480 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 3481 if (bp->cp_nr_pages > MAX_CP_PAGES) { 3482 bp->cp_nr_pages = MAX_CP_PAGES; 3483 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 3484 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 3485 ring_size, bp->cp_ring_size); 3486 } 3487 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 3488 bp->cp_ring_mask = bp->cp_bit - 1; 3489 } 3490 3491 /* Changing allocation mode of RX rings. 3492 * TODO: Update when extending xdp_rxq_info to support allocation modes. 3493 */ 3494 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 3495 { 3496 if (page_mode) { 3497 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 3498 return -EOPNOTSUPP; 3499 bp->dev->max_mtu = 3500 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 3501 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 3502 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 3503 bp->rx_dir = DMA_BIDIRECTIONAL; 3504 bp->rx_skb_func = bnxt_rx_page_skb; 3505 /* Disable LRO or GRO_HW */ 3506 netdev_update_features(bp->dev); 3507 } else { 3508 bp->dev->max_mtu = bp->max_mtu; 3509 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 3510 bp->rx_dir = DMA_FROM_DEVICE; 3511 bp->rx_skb_func = bnxt_rx_skb; 3512 } 3513 return 0; 3514 } 3515 3516 static void bnxt_free_vnic_attributes(struct bnxt *bp) 3517 { 3518 int i; 3519 struct bnxt_vnic_info *vnic; 3520 struct pci_dev *pdev = bp->pdev; 3521 3522 if (!bp->vnic_info) 3523 return; 3524 3525 for (i = 0; i < bp->nr_vnics; i++) { 3526 vnic = &bp->vnic_info[i]; 3527 3528 kfree(vnic->fw_grp_ids); 3529 vnic->fw_grp_ids = NULL; 3530 3531 kfree(vnic->uc_list); 3532 vnic->uc_list = NULL; 3533 3534 if (vnic->mc_list) { 3535 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 3536 vnic->mc_list, vnic->mc_list_mapping); 3537 vnic->mc_list = NULL; 3538 } 3539 3540 if (vnic->rss_table) { 3541 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3542 vnic->rss_table, 3543 vnic->rss_table_dma_addr); 3544 vnic->rss_table = NULL; 3545 } 3546 3547 vnic->rss_hash_key = NULL; 3548 vnic->flags = 0; 3549 } 3550 } 3551 3552 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 3553 { 3554 int i, rc = 0, size; 3555 struct bnxt_vnic_info *vnic; 3556 struct pci_dev *pdev = bp->pdev; 3557 int max_rings; 3558 3559 for (i = 0; i < bp->nr_vnics; i++) { 3560 vnic = &bp->vnic_info[i]; 3561 3562 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 3563 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 3564 3565 if (mem_size > 0) { 3566 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 3567 if (!vnic->uc_list) { 3568 rc = -ENOMEM; 3569 goto out; 3570 } 3571 } 3572 } 3573 3574 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 3575 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 3576 vnic->mc_list = 3577 dma_alloc_coherent(&pdev->dev, 3578 vnic->mc_list_size, 3579 &vnic->mc_list_mapping, 3580 GFP_KERNEL); 3581 if (!vnic->mc_list) { 3582 rc = -ENOMEM; 3583 goto out; 3584 } 3585 } 3586 3587 if (bp->flags & BNXT_FLAG_CHIP_P5) 3588 goto vnic_skip_grps; 3589 3590 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3591 max_rings = bp->rx_nr_rings; 3592 else 3593 max_rings = 1; 3594 3595 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 3596 if (!vnic->fw_grp_ids) { 3597 rc = -ENOMEM; 3598 goto out; 3599 } 3600 vnic_skip_grps: 3601 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 3602 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 3603 continue; 3604 3605 /* Allocate rss table and hash key */ 3606 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3607 &vnic->rss_table_dma_addr, 3608 GFP_KERNEL); 3609 if (!vnic->rss_table) { 3610 rc = -ENOMEM; 3611 goto out; 3612 } 3613 3614 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 3615 3616 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 3617 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 3618 } 3619 return 0; 3620 3621 out: 3622 return rc; 3623 } 3624 3625 static void bnxt_free_hwrm_resources(struct bnxt *bp) 3626 { 3627 struct pci_dev *pdev = bp->pdev; 3628 3629 if (bp->hwrm_cmd_resp_addr) { 3630 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3631 bp->hwrm_cmd_resp_dma_addr); 3632 bp->hwrm_cmd_resp_addr = NULL; 3633 } 3634 3635 if (bp->hwrm_cmd_kong_resp_addr) { 3636 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3637 bp->hwrm_cmd_kong_resp_addr, 3638 bp->hwrm_cmd_kong_resp_dma_addr); 3639 bp->hwrm_cmd_kong_resp_addr = NULL; 3640 } 3641 } 3642 3643 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) 3644 { 3645 struct pci_dev *pdev = bp->pdev; 3646 3647 if (bp->hwrm_cmd_kong_resp_addr) 3648 return 0; 3649 3650 bp->hwrm_cmd_kong_resp_addr = 3651 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3652 &bp->hwrm_cmd_kong_resp_dma_addr, 3653 GFP_KERNEL); 3654 if (!bp->hwrm_cmd_kong_resp_addr) 3655 return -ENOMEM; 3656 3657 return 0; 3658 } 3659 3660 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3661 { 3662 struct pci_dev *pdev = bp->pdev; 3663 3664 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3665 &bp->hwrm_cmd_resp_dma_addr, 3666 GFP_KERNEL); 3667 if (!bp->hwrm_cmd_resp_addr) 3668 return -ENOMEM; 3669 3670 return 0; 3671 } 3672 3673 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3674 { 3675 if (bp->hwrm_short_cmd_req_addr) { 3676 struct pci_dev *pdev = bp->pdev; 3677 3678 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3679 bp->hwrm_short_cmd_req_addr, 3680 bp->hwrm_short_cmd_req_dma_addr); 3681 bp->hwrm_short_cmd_req_addr = NULL; 3682 } 3683 } 3684 3685 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3686 { 3687 struct pci_dev *pdev = bp->pdev; 3688 3689 if (bp->hwrm_short_cmd_req_addr) 3690 return 0; 3691 3692 bp->hwrm_short_cmd_req_addr = 3693 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3694 &bp->hwrm_short_cmd_req_dma_addr, 3695 GFP_KERNEL); 3696 if (!bp->hwrm_short_cmd_req_addr) 3697 return -ENOMEM; 3698 3699 return 0; 3700 } 3701 3702 static void bnxt_free_port_stats(struct bnxt *bp) 3703 { 3704 struct pci_dev *pdev = bp->pdev; 3705 3706 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3707 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3708 3709 if (bp->hw_rx_port_stats) { 3710 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3711 bp->hw_rx_port_stats, 3712 bp->hw_rx_port_stats_map); 3713 bp->hw_rx_port_stats = NULL; 3714 } 3715 3716 if (bp->hw_tx_port_stats_ext) { 3717 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), 3718 bp->hw_tx_port_stats_ext, 3719 bp->hw_tx_port_stats_ext_map); 3720 bp->hw_tx_port_stats_ext = NULL; 3721 } 3722 3723 if (bp->hw_rx_port_stats_ext) { 3724 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3725 bp->hw_rx_port_stats_ext, 3726 bp->hw_rx_port_stats_ext_map); 3727 bp->hw_rx_port_stats_ext = NULL; 3728 } 3729 3730 if (bp->hw_pcie_stats) { 3731 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3732 bp->hw_pcie_stats, bp->hw_pcie_stats_map); 3733 bp->hw_pcie_stats = NULL; 3734 } 3735 } 3736 3737 static void bnxt_free_ring_stats(struct bnxt *bp) 3738 { 3739 struct pci_dev *pdev = bp->pdev; 3740 int size, i; 3741 3742 if (!bp->bnapi) 3743 return; 3744 3745 size = bp->hw_ring_stats_size; 3746 3747 for (i = 0; i < bp->cp_nr_rings; i++) { 3748 struct bnxt_napi *bnapi = bp->bnapi[i]; 3749 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3750 3751 if (cpr->hw_stats) { 3752 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3753 cpr->hw_stats_map); 3754 cpr->hw_stats = NULL; 3755 } 3756 } 3757 } 3758 3759 static int bnxt_alloc_stats(struct bnxt *bp) 3760 { 3761 u32 size, i; 3762 struct pci_dev *pdev = bp->pdev; 3763 3764 size = bp->hw_ring_stats_size; 3765 3766 for (i = 0; i < bp->cp_nr_rings; i++) { 3767 struct bnxt_napi *bnapi = bp->bnapi[i]; 3768 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3769 3770 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3771 &cpr->hw_stats_map, 3772 GFP_KERNEL); 3773 if (!cpr->hw_stats) 3774 return -ENOMEM; 3775 3776 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3777 } 3778 3779 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 3780 return 0; 3781 3782 if (bp->hw_rx_port_stats) 3783 goto alloc_ext_stats; 3784 3785 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3786 sizeof(struct tx_port_stats) + 1024; 3787 3788 bp->hw_rx_port_stats = 3789 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3790 &bp->hw_rx_port_stats_map, 3791 GFP_KERNEL); 3792 if (!bp->hw_rx_port_stats) 3793 return -ENOMEM; 3794 3795 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; 3796 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3797 sizeof(struct rx_port_stats) + 512; 3798 bp->flags |= BNXT_FLAG_PORT_STATS; 3799 3800 alloc_ext_stats: 3801 /* Display extended statistics only if FW supports it */ 3802 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 3803 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 3804 return 0; 3805 3806 if (bp->hw_rx_port_stats_ext) 3807 goto alloc_tx_ext_stats; 3808 3809 bp->hw_rx_port_stats_ext = 3810 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3811 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); 3812 if (!bp->hw_rx_port_stats_ext) 3813 return 0; 3814 3815 alloc_tx_ext_stats: 3816 if (bp->hw_tx_port_stats_ext) 3817 goto alloc_pcie_stats; 3818 3819 if (bp->hwrm_spec_code >= 0x10902 || 3820 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 3821 bp->hw_tx_port_stats_ext = 3822 dma_alloc_coherent(&pdev->dev, 3823 sizeof(struct tx_port_stats_ext), 3824 &bp->hw_tx_port_stats_ext_map, 3825 GFP_KERNEL); 3826 } 3827 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3828 3829 alloc_pcie_stats: 3830 if (bp->hw_pcie_stats || 3831 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 3832 return 0; 3833 3834 bp->hw_pcie_stats = 3835 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3836 &bp->hw_pcie_stats_map, GFP_KERNEL); 3837 if (!bp->hw_pcie_stats) 3838 return 0; 3839 3840 bp->flags |= BNXT_FLAG_PCIE_STATS; 3841 return 0; 3842 } 3843 3844 static void bnxt_clear_ring_indices(struct bnxt *bp) 3845 { 3846 int i; 3847 3848 if (!bp->bnapi) 3849 return; 3850 3851 for (i = 0; i < bp->cp_nr_rings; i++) { 3852 struct bnxt_napi *bnapi = bp->bnapi[i]; 3853 struct bnxt_cp_ring_info *cpr; 3854 struct bnxt_rx_ring_info *rxr; 3855 struct bnxt_tx_ring_info *txr; 3856 3857 if (!bnapi) 3858 continue; 3859 3860 cpr = &bnapi->cp_ring; 3861 cpr->cp_raw_cons = 0; 3862 3863 txr = bnapi->tx_ring; 3864 if (txr) { 3865 txr->tx_prod = 0; 3866 txr->tx_cons = 0; 3867 } 3868 3869 rxr = bnapi->rx_ring; 3870 if (rxr) { 3871 rxr->rx_prod = 0; 3872 rxr->rx_agg_prod = 0; 3873 rxr->rx_sw_agg_prod = 0; 3874 rxr->rx_next_cons = 0; 3875 } 3876 } 3877 } 3878 3879 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3880 { 3881 #ifdef CONFIG_RFS_ACCEL 3882 int i; 3883 3884 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3885 * safe to delete the hash table. 3886 */ 3887 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3888 struct hlist_head *head; 3889 struct hlist_node *tmp; 3890 struct bnxt_ntuple_filter *fltr; 3891 3892 head = &bp->ntp_fltr_hash_tbl[i]; 3893 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3894 hlist_del(&fltr->hash); 3895 kfree(fltr); 3896 } 3897 } 3898 if (irq_reinit) { 3899 kfree(bp->ntp_fltr_bmap); 3900 bp->ntp_fltr_bmap = NULL; 3901 } 3902 bp->ntp_fltr_count = 0; 3903 #endif 3904 } 3905 3906 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3907 { 3908 #ifdef CONFIG_RFS_ACCEL 3909 int i, rc = 0; 3910 3911 if (!(bp->flags & BNXT_FLAG_RFS)) 3912 return 0; 3913 3914 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3915 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3916 3917 bp->ntp_fltr_count = 0; 3918 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3919 sizeof(long), 3920 GFP_KERNEL); 3921 3922 if (!bp->ntp_fltr_bmap) 3923 rc = -ENOMEM; 3924 3925 return rc; 3926 #else 3927 return 0; 3928 #endif 3929 } 3930 3931 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3932 { 3933 bnxt_free_vnic_attributes(bp); 3934 bnxt_free_tx_rings(bp); 3935 bnxt_free_rx_rings(bp); 3936 bnxt_free_cp_rings(bp); 3937 bnxt_free_ntp_fltrs(bp, irq_re_init); 3938 if (irq_re_init) { 3939 bnxt_free_ring_stats(bp); 3940 bnxt_free_ring_grps(bp); 3941 bnxt_free_vnics(bp); 3942 kfree(bp->tx_ring_map); 3943 bp->tx_ring_map = NULL; 3944 kfree(bp->tx_ring); 3945 bp->tx_ring = NULL; 3946 kfree(bp->rx_ring); 3947 bp->rx_ring = NULL; 3948 kfree(bp->bnapi); 3949 bp->bnapi = NULL; 3950 } else { 3951 bnxt_clear_ring_indices(bp); 3952 } 3953 } 3954 3955 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3956 { 3957 int i, j, rc, size, arr_size; 3958 void *bnapi; 3959 3960 if (irq_re_init) { 3961 /* Allocate bnapi mem pointer array and mem block for 3962 * all queues 3963 */ 3964 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3965 bp->cp_nr_rings); 3966 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3967 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3968 if (!bnapi) 3969 return -ENOMEM; 3970 3971 bp->bnapi = bnapi; 3972 bnapi += arr_size; 3973 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3974 bp->bnapi[i] = bnapi; 3975 bp->bnapi[i]->index = i; 3976 bp->bnapi[i]->bp = bp; 3977 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3978 struct bnxt_cp_ring_info *cpr = 3979 &bp->bnapi[i]->cp_ring; 3980 3981 cpr->cp_ring_struct.ring_mem.flags = 3982 BNXT_RMEM_RING_PTE_FLAG; 3983 } 3984 } 3985 3986 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3987 sizeof(struct bnxt_rx_ring_info), 3988 GFP_KERNEL); 3989 if (!bp->rx_ring) 3990 return -ENOMEM; 3991 3992 for (i = 0; i < bp->rx_nr_rings; i++) { 3993 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3994 3995 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3996 rxr->rx_ring_struct.ring_mem.flags = 3997 BNXT_RMEM_RING_PTE_FLAG; 3998 rxr->rx_agg_ring_struct.ring_mem.flags = 3999 BNXT_RMEM_RING_PTE_FLAG; 4000 } 4001 rxr->bnapi = bp->bnapi[i]; 4002 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4003 } 4004 4005 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4006 sizeof(struct bnxt_tx_ring_info), 4007 GFP_KERNEL); 4008 if (!bp->tx_ring) 4009 return -ENOMEM; 4010 4011 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4012 GFP_KERNEL); 4013 4014 if (!bp->tx_ring_map) 4015 return -ENOMEM; 4016 4017 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4018 j = 0; 4019 else 4020 j = bp->rx_nr_rings; 4021 4022 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 4023 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4024 4025 if (bp->flags & BNXT_FLAG_CHIP_P5) 4026 txr->tx_ring_struct.ring_mem.flags = 4027 BNXT_RMEM_RING_PTE_FLAG; 4028 txr->bnapi = bp->bnapi[j]; 4029 bp->bnapi[j]->tx_ring = txr; 4030 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4031 if (i >= bp->tx_nr_rings_xdp) { 4032 txr->txq_index = i - bp->tx_nr_rings_xdp; 4033 bp->bnapi[j]->tx_int = bnxt_tx_int; 4034 } else { 4035 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 4036 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 4037 } 4038 } 4039 4040 rc = bnxt_alloc_stats(bp); 4041 if (rc) 4042 goto alloc_mem_err; 4043 4044 rc = bnxt_alloc_ntp_fltrs(bp); 4045 if (rc) 4046 goto alloc_mem_err; 4047 4048 rc = bnxt_alloc_vnics(bp); 4049 if (rc) 4050 goto alloc_mem_err; 4051 } 4052 4053 bnxt_init_ring_struct(bp); 4054 4055 rc = bnxt_alloc_rx_rings(bp); 4056 if (rc) 4057 goto alloc_mem_err; 4058 4059 rc = bnxt_alloc_tx_rings(bp); 4060 if (rc) 4061 goto alloc_mem_err; 4062 4063 rc = bnxt_alloc_cp_rings(bp); 4064 if (rc) 4065 goto alloc_mem_err; 4066 4067 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4068 BNXT_VNIC_UCAST_FLAG; 4069 rc = bnxt_alloc_vnic_attributes(bp); 4070 if (rc) 4071 goto alloc_mem_err; 4072 return 0; 4073 4074 alloc_mem_err: 4075 bnxt_free_mem(bp, true); 4076 return rc; 4077 } 4078 4079 static void bnxt_disable_int(struct bnxt *bp) 4080 { 4081 int i; 4082 4083 if (!bp->bnapi) 4084 return; 4085 4086 for (i = 0; i < bp->cp_nr_rings; i++) { 4087 struct bnxt_napi *bnapi = bp->bnapi[i]; 4088 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4089 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4090 4091 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4092 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4093 } 4094 } 4095 4096 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4097 { 4098 struct bnxt_napi *bnapi = bp->bnapi[n]; 4099 struct bnxt_cp_ring_info *cpr; 4100 4101 cpr = &bnapi->cp_ring; 4102 return cpr->cp_ring_struct.map_idx; 4103 } 4104 4105 static void bnxt_disable_int_sync(struct bnxt *bp) 4106 { 4107 int i; 4108 4109 atomic_inc(&bp->intr_sem); 4110 4111 bnxt_disable_int(bp); 4112 for (i = 0; i < bp->cp_nr_rings; i++) { 4113 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 4114 4115 synchronize_irq(bp->irq_tbl[map_idx].vector); 4116 } 4117 } 4118 4119 static void bnxt_enable_int(struct bnxt *bp) 4120 { 4121 int i; 4122 4123 atomic_set(&bp->intr_sem, 0); 4124 for (i = 0; i < bp->cp_nr_rings; i++) { 4125 struct bnxt_napi *bnapi = bp->bnapi[i]; 4126 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4127 4128 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 4129 } 4130 } 4131 4132 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 4133 u16 cmpl_ring, u16 target_id) 4134 { 4135 struct input *req = request; 4136 4137 req->req_type = cpu_to_le16(req_type); 4138 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4139 req->target_id = cpu_to_le16(target_id); 4140 if (bnxt_kong_hwrm_message(bp, req)) 4141 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 4142 else 4143 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 4144 } 4145 4146 static int bnxt_hwrm_to_stderr(u32 hwrm_err) 4147 { 4148 switch (hwrm_err) { 4149 case HWRM_ERR_CODE_SUCCESS: 4150 return 0; 4151 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: 4152 return -EACCES; 4153 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: 4154 return -ENOSPC; 4155 case HWRM_ERR_CODE_INVALID_PARAMS: 4156 case HWRM_ERR_CODE_INVALID_FLAGS: 4157 case HWRM_ERR_CODE_INVALID_ENABLES: 4158 case HWRM_ERR_CODE_UNSUPPORTED_TLV: 4159 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: 4160 return -EINVAL; 4161 case HWRM_ERR_CODE_NO_BUFFER: 4162 return -ENOMEM; 4163 case HWRM_ERR_CODE_HOT_RESET_PROGRESS: 4164 case HWRM_ERR_CODE_BUSY: 4165 return -EAGAIN; 4166 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: 4167 return -EOPNOTSUPP; 4168 default: 4169 return -EIO; 4170 } 4171 } 4172 4173 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 4174 int timeout, bool silent) 4175 { 4176 int i, intr_process, rc, tmo_count; 4177 struct input *req = msg; 4178 u32 *data = msg; 4179 __le32 *resp_len; 4180 u8 *valid; 4181 u16 cp_ring_id, len = 0; 4182 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4183 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4184 struct hwrm_short_input short_input = {0}; 4185 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4186 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; 4187 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4188 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4189 4190 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4191 return -EBUSY; 4192 4193 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4194 if (msg_len > bp->hwrm_max_ext_req_len || 4195 !bp->hwrm_short_cmd_req_addr) 4196 return -EINVAL; 4197 } 4198 4199 if (bnxt_hwrm_kong_chnl(bp, req)) { 4200 dst = BNXT_HWRM_CHNL_KONG; 4201 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4202 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4203 resp = bp->hwrm_cmd_kong_resp_addr; 4204 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; 4205 } 4206 4207 memset(resp, 0, PAGE_SIZE); 4208 cp_ring_id = le16_to_cpu(req->cmpl_ring); 4209 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 4210 4211 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); 4212 /* currently supports only one outstanding message */ 4213 if (intr_process) 4214 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 4215 4216 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 4217 msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4218 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 4219 u16 max_msg_len; 4220 4221 /* Set boundary for maximum extended request length for short 4222 * cmd format. If passed up from device use the max supported 4223 * internal req length. 4224 */ 4225 max_msg_len = bp->hwrm_max_ext_req_len; 4226 4227 memcpy(short_cmd_req, req, msg_len); 4228 if (msg_len < max_msg_len) 4229 memset(short_cmd_req + msg_len, 0, 4230 max_msg_len - msg_len); 4231 4232 short_input.req_type = req->req_type; 4233 short_input.signature = 4234 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 4235 short_input.size = cpu_to_le16(msg_len); 4236 short_input.req_addr = 4237 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 4238 4239 data = (u32 *)&short_input; 4240 msg_len = sizeof(short_input); 4241 4242 /* Sync memory write before updating doorbell */ 4243 wmb(); 4244 4245 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 4246 } 4247 4248 /* Write request msg to hwrm channel */ 4249 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); 4250 4251 for (i = msg_len; i < max_req_len; i += 4) 4252 writel(0, bp->bar0 + bar_offset + i); 4253 4254 /* Ring channel doorbell */ 4255 writel(1, bp->bar0 + doorbell_offset); 4256 4257 if (!pci_is_enabled(bp->pdev)) 4258 return 0; 4259 4260 if (!timeout) 4261 timeout = DFLT_HWRM_CMD_TIMEOUT; 4262 /* convert timeout to usec */ 4263 timeout *= 1000; 4264 4265 i = 0; 4266 /* Short timeout for the first few iterations: 4267 * number of loops = number of loops for short timeout + 4268 * number of loops for standard timeout. 4269 */ 4270 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4271 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4272 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4273 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); 4274 4275 if (intr_process) { 4276 u16 seq_id = bp->hwrm_intr_seq_id; 4277 4278 /* Wait until hwrm response cmpl interrupt is processed */ 4279 while (bp->hwrm_intr_seq_id != (u16)~seq_id && 4280 i++ < tmo_count) { 4281 /* Abort the wait for completion if the FW health 4282 * check has failed. 4283 */ 4284 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4285 return -EBUSY; 4286 /* on first few passes, just barely sleep */ 4287 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4288 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4289 HWRM_SHORT_MAX_TIMEOUT); 4290 else 4291 usleep_range(HWRM_MIN_TIMEOUT, 4292 HWRM_MAX_TIMEOUT); 4293 } 4294 4295 if (bp->hwrm_intr_seq_id != (u16)~seq_id) { 4296 if (!silent) 4297 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 4298 le16_to_cpu(req->req_type)); 4299 return -EBUSY; 4300 } 4301 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4302 HWRM_RESP_LEN_SFT; 4303 valid = resp_addr + len - 1; 4304 } else { 4305 int j; 4306 4307 /* Check if response len is updated */ 4308 for (i = 0; i < tmo_count; i++) { 4309 /* Abort the wait for completion if the FW health 4310 * check has failed. 4311 */ 4312 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4313 return -EBUSY; 4314 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4315 HWRM_RESP_LEN_SFT; 4316 if (len) 4317 break; 4318 /* on first few passes, just barely sleep */ 4319 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4320 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4321 HWRM_SHORT_MAX_TIMEOUT); 4322 else 4323 usleep_range(HWRM_MIN_TIMEOUT, 4324 HWRM_MAX_TIMEOUT); 4325 } 4326 4327 if (i >= tmo_count) { 4328 if (!silent) 4329 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 4330 HWRM_TOTAL_TIMEOUT(i), 4331 le16_to_cpu(req->req_type), 4332 le16_to_cpu(req->seq_id), len); 4333 return -EBUSY; 4334 } 4335 4336 /* Last byte of resp contains valid bit */ 4337 valid = resp_addr + len - 1; 4338 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4339 /* make sure we read from updated DMA memory */ 4340 dma_rmb(); 4341 if (*valid) 4342 break; 4343 usleep_range(1, 5); 4344 } 4345 4346 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 4347 if (!silent) 4348 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 4349 HWRM_TOTAL_TIMEOUT(i), 4350 le16_to_cpu(req->req_type), 4351 le16_to_cpu(req->seq_id), len, 4352 *valid); 4353 return -EBUSY; 4354 } 4355 } 4356 4357 /* Zero valid bit for compatibility. Valid bit in an older spec 4358 * may become a new field in a newer spec. We must make sure that 4359 * a new field not implemented by old spec will read zero. 4360 */ 4361 *valid = 0; 4362 rc = le16_to_cpu(resp->error_code); 4363 if (rc && !silent) 4364 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 4365 le16_to_cpu(resp->req_type), 4366 le16_to_cpu(resp->seq_id), rc); 4367 return bnxt_hwrm_to_stderr(rc); 4368 } 4369 4370 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4371 { 4372 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 4373 } 4374 4375 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4376 int timeout) 4377 { 4378 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4379 } 4380 4381 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4382 { 4383 int rc; 4384 4385 mutex_lock(&bp->hwrm_cmd_lock); 4386 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 4387 mutex_unlock(&bp->hwrm_cmd_lock); 4388 return rc; 4389 } 4390 4391 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4392 int timeout) 4393 { 4394 int rc; 4395 4396 mutex_lock(&bp->hwrm_cmd_lock); 4397 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4398 mutex_unlock(&bp->hwrm_cmd_lock); 4399 return rc; 4400 } 4401 4402 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 4403 bool async_only) 4404 { 4405 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; 4406 struct hwrm_func_drv_rgtr_input req = {0}; 4407 DECLARE_BITMAP(async_events_bmap, 256); 4408 u32 *events = (u32 *)async_events_bmap; 4409 u32 flags; 4410 int rc, i; 4411 4412 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 4413 4414 req.enables = 4415 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 4416 FUNC_DRV_RGTR_REQ_ENABLES_VER | 4417 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4418 4419 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 4420 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 4421 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 4422 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 4423 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 4424 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 4425 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 4426 req.flags = cpu_to_le32(flags); 4427 req.ver_maj_8b = DRV_VER_MAJ; 4428 req.ver_min_8b = DRV_VER_MIN; 4429 req.ver_upd_8b = DRV_VER_UPD; 4430 req.ver_maj = cpu_to_le16(DRV_VER_MAJ); 4431 req.ver_min = cpu_to_le16(DRV_VER_MIN); 4432 req.ver_upd = cpu_to_le16(DRV_VER_UPD); 4433 4434 if (BNXT_PF(bp)) { 4435 u32 data[8]; 4436 int i; 4437 4438 memset(data, 0, sizeof(data)); 4439 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 4440 u16 cmd = bnxt_vf_req_snif[i]; 4441 unsigned int bit, idx; 4442 4443 idx = cmd / 32; 4444 bit = cmd % 32; 4445 data[idx] |= 1 << bit; 4446 } 4447 4448 for (i = 0; i < 8; i++) 4449 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 4450 4451 req.enables |= 4452 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 4453 } 4454 4455 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 4456 req.flags |= cpu_to_le32( 4457 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 4458 4459 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 4460 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 4461 u16 event_id = bnxt_async_events_arr[i]; 4462 4463 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4464 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4465 continue; 4466 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4467 } 4468 if (bmap && bmap_size) { 4469 for (i = 0; i < bmap_size; i++) { 4470 if (test_bit(i, bmap)) 4471 __set_bit(i, async_events_bmap); 4472 } 4473 } 4474 for (i = 0; i < 8; i++) 4475 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 4476 4477 if (async_only) 4478 req.enables = 4479 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4480 4481 mutex_lock(&bp->hwrm_cmd_lock); 4482 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4483 if (!rc) { 4484 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 4485 if (resp->flags & 4486 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 4487 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4488 } 4489 mutex_unlock(&bp->hwrm_cmd_lock); 4490 return rc; 4491 } 4492 4493 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4494 { 4495 struct hwrm_func_drv_unrgtr_input req = {0}; 4496 4497 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 4498 return 0; 4499 4500 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 4501 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4502 } 4503 4504 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4505 { 4506 u32 rc = 0; 4507 struct hwrm_tunnel_dst_port_free_input req = {0}; 4508 4509 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 4510 req.tunnel_type = tunnel_type; 4511 4512 switch (tunnel_type) { 4513 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4514 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 4515 break; 4516 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 4517 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 4518 break; 4519 default: 4520 break; 4521 } 4522 4523 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4524 if (rc) 4525 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 4526 rc); 4527 return rc; 4528 } 4529 4530 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 4531 u8 tunnel_type) 4532 { 4533 u32 rc = 0; 4534 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 4535 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4536 4537 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 4538 4539 req.tunnel_type = tunnel_type; 4540 req.tunnel_dst_port_val = port; 4541 4542 mutex_lock(&bp->hwrm_cmd_lock); 4543 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4544 if (rc) { 4545 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 4546 rc); 4547 goto err_out; 4548 } 4549 4550 switch (tunnel_type) { 4551 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 4552 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 4553 break; 4554 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 4555 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 4556 break; 4557 default: 4558 break; 4559 } 4560 4561 err_out: 4562 mutex_unlock(&bp->hwrm_cmd_lock); 4563 return rc; 4564 } 4565 4566 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 4567 { 4568 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 4569 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4570 4571 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 4572 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4573 4574 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4575 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4576 req.mask = cpu_to_le32(vnic->rx_mask); 4577 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4578 } 4579 4580 #ifdef CONFIG_RFS_ACCEL 4581 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 4582 struct bnxt_ntuple_filter *fltr) 4583 { 4584 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 4585 4586 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 4587 req.ntuple_filter_id = fltr->filter_id; 4588 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4589 } 4590 4591 #define BNXT_NTP_FLTR_FLAGS \ 4592 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 4593 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 4594 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 4595 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 4596 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 4597 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 4598 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 4599 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 4600 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 4601 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 4602 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 4603 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 4604 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 4605 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 4606 4607 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 4608 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 4609 4610 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 4611 struct bnxt_ntuple_filter *fltr) 4612 { 4613 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 4614 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 4615 struct flow_keys *keys = &fltr->fkeys; 4616 struct bnxt_vnic_info *vnic; 4617 u32 flags = 0; 4618 int rc = 0; 4619 4620 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 4621 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 4622 4623 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 4624 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 4625 req.dst_id = cpu_to_le16(fltr->rxq); 4626 } else { 4627 vnic = &bp->vnic_info[fltr->rxq + 1]; 4628 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 4629 } 4630 req.flags = cpu_to_le32(flags); 4631 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 4632 4633 req.ethertype = htons(ETH_P_IP); 4634 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 4635 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 4636 req.ip_protocol = keys->basic.ip_proto; 4637 4638 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 4639 int i; 4640 4641 req.ethertype = htons(ETH_P_IPV6); 4642 req.ip_addr_type = 4643 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 4644 *(struct in6_addr *)&req.src_ipaddr[0] = 4645 keys->addrs.v6addrs.src; 4646 *(struct in6_addr *)&req.dst_ipaddr[0] = 4647 keys->addrs.v6addrs.dst; 4648 for (i = 0; i < 4; i++) { 4649 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4650 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4651 } 4652 } else { 4653 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 4654 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4655 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 4656 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4657 } 4658 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 4659 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 4660 req.tunnel_type = 4661 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 4662 } 4663 4664 req.src_port = keys->ports.src; 4665 req.src_port_mask = cpu_to_be16(0xffff); 4666 req.dst_port = keys->ports.dst; 4667 req.dst_port_mask = cpu_to_be16(0xffff); 4668 4669 mutex_lock(&bp->hwrm_cmd_lock); 4670 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4671 if (!rc) { 4672 resp = bnxt_get_hwrm_resp_addr(bp, &req); 4673 fltr->filter_id = resp->ntuple_filter_id; 4674 } 4675 mutex_unlock(&bp->hwrm_cmd_lock); 4676 return rc; 4677 } 4678 #endif 4679 4680 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 4681 u8 *mac_addr) 4682 { 4683 u32 rc = 0; 4684 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 4685 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4686 4687 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 4688 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 4689 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 4690 req.flags |= 4691 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 4692 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 4693 req.enables = 4694 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 4695 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 4696 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 4697 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 4698 req.l2_addr_mask[0] = 0xff; 4699 req.l2_addr_mask[1] = 0xff; 4700 req.l2_addr_mask[2] = 0xff; 4701 req.l2_addr_mask[3] = 0xff; 4702 req.l2_addr_mask[4] = 0xff; 4703 req.l2_addr_mask[5] = 0xff; 4704 4705 mutex_lock(&bp->hwrm_cmd_lock); 4706 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4707 if (!rc) 4708 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 4709 resp->l2_filter_id; 4710 mutex_unlock(&bp->hwrm_cmd_lock); 4711 return rc; 4712 } 4713 4714 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 4715 { 4716 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 4717 int rc = 0; 4718 4719 /* Any associated ntuple filters will also be cleared by firmware. */ 4720 mutex_lock(&bp->hwrm_cmd_lock); 4721 for (i = 0; i < num_of_vnics; i++) { 4722 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4723 4724 for (j = 0; j < vnic->uc_filter_count; j++) { 4725 struct hwrm_cfa_l2_filter_free_input req = {0}; 4726 4727 bnxt_hwrm_cmd_hdr_init(bp, &req, 4728 HWRM_CFA_L2_FILTER_FREE, -1, -1); 4729 4730 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 4731 4732 rc = _hwrm_send_message(bp, &req, sizeof(req), 4733 HWRM_CMD_TIMEOUT); 4734 } 4735 vnic->uc_filter_count = 0; 4736 } 4737 mutex_unlock(&bp->hwrm_cmd_lock); 4738 4739 return rc; 4740 } 4741 4742 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 4743 { 4744 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4745 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 4746 struct hwrm_vnic_tpa_cfg_input req = {0}; 4747 4748 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 4749 return 0; 4750 4751 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 4752 4753 if (tpa_flags) { 4754 u16 mss = bp->dev->mtu - 40; 4755 u32 nsegs, n, segs = 0, flags; 4756 4757 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 4758 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 4759 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 4760 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 4761 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 4762 if (tpa_flags & BNXT_FLAG_GRO) 4763 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 4764 4765 req.flags = cpu_to_le32(flags); 4766 4767 req.enables = 4768 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 4769 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 4770 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 4771 4772 /* Number of segs are log2 units, and first packet is not 4773 * included as part of this units. 4774 */ 4775 if (mss <= BNXT_RX_PAGE_SIZE) { 4776 n = BNXT_RX_PAGE_SIZE / mss; 4777 nsegs = (MAX_SKB_FRAGS - 1) * n; 4778 } else { 4779 n = mss / BNXT_RX_PAGE_SIZE; 4780 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 4781 n++; 4782 nsegs = (MAX_SKB_FRAGS - n) / n; 4783 } 4784 4785 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4786 segs = MAX_TPA_SEGS_P5; 4787 max_aggs = bp->max_tpa; 4788 } else { 4789 segs = ilog2(nsegs); 4790 } 4791 req.max_agg_segs = cpu_to_le16(segs); 4792 req.max_aggs = cpu_to_le16(max_aggs); 4793 4794 req.min_agg_len = cpu_to_le32(512); 4795 } 4796 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4797 4798 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4799 } 4800 4801 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 4802 { 4803 struct bnxt_ring_grp_info *grp_info; 4804 4805 grp_info = &bp->grp_info[ring->grp_idx]; 4806 return grp_info->cp_fw_ring_id; 4807 } 4808 4809 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 4810 { 4811 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4812 struct bnxt_napi *bnapi = rxr->bnapi; 4813 struct bnxt_cp_ring_info *cpr; 4814 4815 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; 4816 return cpr->cp_ring_struct.fw_ring_id; 4817 } else { 4818 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 4819 } 4820 } 4821 4822 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 4823 { 4824 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4825 struct bnxt_napi *bnapi = txr->bnapi; 4826 struct bnxt_cp_ring_info *cpr; 4827 4828 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; 4829 return cpr->cp_ring_struct.fw_ring_id; 4830 } else { 4831 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 4832 } 4833 } 4834 4835 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 4836 { 4837 u32 i, j, max_rings; 4838 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4839 struct hwrm_vnic_rss_cfg_input req = {0}; 4840 4841 if ((bp->flags & BNXT_FLAG_CHIP_P5) || 4842 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 4843 return 0; 4844 4845 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4846 if (set_rss) { 4847 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4848 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4849 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 4850 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4851 max_rings = bp->rx_nr_rings - 1; 4852 else 4853 max_rings = bp->rx_nr_rings; 4854 } else { 4855 max_rings = 1; 4856 } 4857 4858 /* Fill the RSS indirection table with ring group ids */ 4859 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 4860 if (j == max_rings) 4861 j = 0; 4862 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 4863 } 4864 4865 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4866 req.hash_key_tbl_addr = 4867 cpu_to_le64(vnic->rss_hash_key_dma_addr); 4868 } 4869 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 4870 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4871 } 4872 4873 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 4874 { 4875 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4876 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; 4877 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4878 struct hwrm_vnic_rss_cfg_input req = {0}; 4879 4880 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4881 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4882 if (!set_rss) { 4883 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4884 return 0; 4885 } 4886 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4887 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4888 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4889 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 4890 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 4891 for (i = 0, k = 0; i < nr_ctxs; i++) { 4892 __le16 *ring_tbl = vnic->rss_table; 4893 int rc; 4894 4895 req.ring_table_pair_index = i; 4896 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 4897 for (j = 0; j < 64; j++) { 4898 u16 ring_id; 4899 4900 ring_id = rxr->rx_ring_struct.fw_ring_id; 4901 *ring_tbl++ = cpu_to_le16(ring_id); 4902 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 4903 *ring_tbl++ = cpu_to_le16(ring_id); 4904 rxr++; 4905 k++; 4906 if (k == max_rings) { 4907 k = 0; 4908 rxr = &bp->rx_ring[0]; 4909 } 4910 } 4911 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4912 if (rc) 4913 return rc; 4914 } 4915 return 0; 4916 } 4917 4918 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 4919 { 4920 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4921 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 4922 4923 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 4924 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 4925 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 4926 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 4927 req.enables = 4928 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 4929 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 4930 /* thresholds not implemented in firmware yet */ 4931 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 4932 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 4933 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4934 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4935 } 4936 4937 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 4938 u16 ctx_idx) 4939 { 4940 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 4941 4942 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 4943 req.rss_cos_lb_ctx_id = 4944 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 4945 4946 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4947 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 4948 } 4949 4950 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 4951 { 4952 int i, j; 4953 4954 for (i = 0; i < bp->nr_vnics; i++) { 4955 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4956 4957 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 4958 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 4959 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 4960 } 4961 } 4962 bp->rsscos_nr_ctxs = 0; 4963 } 4964 4965 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 4966 { 4967 int rc; 4968 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 4969 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 4970 bp->hwrm_cmd_resp_addr; 4971 4972 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 4973 -1); 4974 4975 mutex_lock(&bp->hwrm_cmd_lock); 4976 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4977 if (!rc) 4978 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 4979 le16_to_cpu(resp->rss_cos_lb_ctx_id); 4980 mutex_unlock(&bp->hwrm_cmd_lock); 4981 4982 return rc; 4983 } 4984 4985 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 4986 { 4987 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 4988 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 4989 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 4990 } 4991 4992 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 4993 { 4994 unsigned int ring = 0, grp_idx; 4995 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4996 struct hwrm_vnic_cfg_input req = {0}; 4997 u16 def_vlan = 0; 4998 4999 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 5000 5001 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5002 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5003 5004 req.default_rx_ring_id = 5005 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5006 req.default_cmpl_ring_id = 5007 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5008 req.enables = 5009 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5010 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5011 goto vnic_mru; 5012 } 5013 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5014 /* Only RSS support for now TBD: COS & LB */ 5015 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5016 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5017 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5018 VNIC_CFG_REQ_ENABLES_MRU); 5019 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5020 req.rss_rule = 5021 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5022 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5023 VNIC_CFG_REQ_ENABLES_MRU); 5024 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5025 } else { 5026 req.rss_rule = cpu_to_le16(0xffff); 5027 } 5028 5029 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5030 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5031 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5032 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5033 } else { 5034 req.cos_rule = cpu_to_le16(0xffff); 5035 } 5036 5037 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5038 ring = 0; 5039 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5040 ring = vnic_id - 1; 5041 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5042 ring = bp->rx_nr_rings - 1; 5043 5044 grp_idx = bp->rx_ring[ring].bnapi->index; 5045 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5046 req.lb_rule = cpu_to_le16(0xffff); 5047 vnic_mru: 5048 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 5049 VLAN_HLEN); 5050 5051 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5052 #ifdef CONFIG_BNXT_SRIOV 5053 if (BNXT_VF(bp)) 5054 def_vlan = bp->vf.vlan; 5055 #endif 5056 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5057 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5058 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 5059 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5060 5061 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5062 } 5063 5064 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5065 { 5066 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5067 struct hwrm_vnic_free_input req = {0}; 5068 5069 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 5070 req.vnic_id = 5071 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5072 5073 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5074 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5075 } 5076 } 5077 5078 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5079 { 5080 u16 i; 5081 5082 for (i = 0; i < bp->nr_vnics; i++) 5083 bnxt_hwrm_vnic_free_one(bp, i); 5084 } 5085 5086 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5087 unsigned int start_rx_ring_idx, 5088 unsigned int nr_rings) 5089 { 5090 int rc = 0; 5091 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5092 struct hwrm_vnic_alloc_input req = {0}; 5093 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5094 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5095 5096 if (bp->flags & BNXT_FLAG_CHIP_P5) 5097 goto vnic_no_ring_grps; 5098 5099 /* map ring groups to this vnic */ 5100 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5101 grp_idx = bp->rx_ring[i].bnapi->index; 5102 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5103 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5104 j, nr_rings); 5105 break; 5106 } 5107 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5108 } 5109 5110 vnic_no_ring_grps: 5111 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5112 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5113 if (vnic_id == 0) 5114 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5115 5116 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 5117 5118 mutex_lock(&bp->hwrm_cmd_lock); 5119 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5120 if (!rc) 5121 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5122 mutex_unlock(&bp->hwrm_cmd_lock); 5123 return rc; 5124 } 5125 5126 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5127 { 5128 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5129 struct hwrm_vnic_qcaps_input req = {0}; 5130 int rc; 5131 5132 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5133 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); 5134 if (bp->hwrm_spec_code < 0x10600) 5135 return 0; 5136 5137 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 5138 mutex_lock(&bp->hwrm_cmd_lock); 5139 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5140 if (!rc) { 5141 u32 flags = le32_to_cpu(resp->flags); 5142 5143 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 5144 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5145 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 5146 if (flags & 5147 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5148 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5149 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5150 if (bp->max_tpa_v2) 5151 bp->hw_ring_stats_size = 5152 sizeof(struct ctx_hw_stats_ext); 5153 } 5154 mutex_unlock(&bp->hwrm_cmd_lock); 5155 return rc; 5156 } 5157 5158 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5159 { 5160 u16 i; 5161 u32 rc = 0; 5162 5163 if (bp->flags & BNXT_FLAG_CHIP_P5) 5164 return 0; 5165 5166 mutex_lock(&bp->hwrm_cmd_lock); 5167 for (i = 0; i < bp->rx_nr_rings; i++) { 5168 struct hwrm_ring_grp_alloc_input req = {0}; 5169 struct hwrm_ring_grp_alloc_output *resp = 5170 bp->hwrm_cmd_resp_addr; 5171 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5172 5173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 5174 5175 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5176 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5177 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5178 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5179 5180 rc = _hwrm_send_message(bp, &req, sizeof(req), 5181 HWRM_CMD_TIMEOUT); 5182 if (rc) 5183 break; 5184 5185 bp->grp_info[grp_idx].fw_grp_id = 5186 le32_to_cpu(resp->ring_group_id); 5187 } 5188 mutex_unlock(&bp->hwrm_cmd_lock); 5189 return rc; 5190 } 5191 5192 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 5193 { 5194 u16 i; 5195 struct hwrm_ring_grp_free_input req = {0}; 5196 5197 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) 5198 return; 5199 5200 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 5201 5202 mutex_lock(&bp->hwrm_cmd_lock); 5203 for (i = 0; i < bp->cp_nr_rings; i++) { 5204 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 5205 continue; 5206 req.ring_group_id = 5207 cpu_to_le32(bp->grp_info[i].fw_grp_id); 5208 5209 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5210 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 5211 } 5212 mutex_unlock(&bp->hwrm_cmd_lock); 5213 } 5214 5215 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 5216 struct bnxt_ring_struct *ring, 5217 u32 ring_type, u32 map_index) 5218 { 5219 int rc = 0, err = 0; 5220 struct hwrm_ring_alloc_input req = {0}; 5221 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5222 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 5223 struct bnxt_ring_grp_info *grp_info; 5224 u16 ring_id; 5225 5226 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 5227 5228 req.enables = 0; 5229 if (rmem->nr_pages > 1) { 5230 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 5231 /* Page size is in log2 units */ 5232 req.page_size = BNXT_PAGE_SHIFT; 5233 req.page_tbl_depth = 1; 5234 } else { 5235 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 5236 } 5237 req.fbo = 0; 5238 /* Association of ring index with doorbell index and MSIX number */ 5239 req.logical_id = cpu_to_le16(map_index); 5240 5241 switch (ring_type) { 5242 case HWRM_RING_ALLOC_TX: { 5243 struct bnxt_tx_ring_info *txr; 5244 5245 txr = container_of(ring, struct bnxt_tx_ring_info, 5246 tx_ring_struct); 5247 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 5248 /* Association of transmit ring with completion ring */ 5249 grp_info = &bp->grp_info[ring->grp_idx]; 5250 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 5251 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 5252 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5253 req.queue_id = cpu_to_le16(ring->queue_id); 5254 break; 5255 } 5256 case HWRM_RING_ALLOC_RX: 5257 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5258 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 5259 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5260 u16 flags = 0; 5261 5262 /* Association of rx ring with stats context */ 5263 grp_info = &bp->grp_info[ring->grp_idx]; 5264 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 5265 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5266 req.enables |= cpu_to_le32( 5267 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5268 if (NET_IP_ALIGN == 2) 5269 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 5270 req.flags = cpu_to_le16(flags); 5271 } 5272 break; 5273 case HWRM_RING_ALLOC_AGG: 5274 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5275 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 5276 /* Association of agg ring with rx ring */ 5277 grp_info = &bp->grp_info[ring->grp_idx]; 5278 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 5279 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 5280 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5281 req.enables |= cpu_to_le32( 5282 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 5283 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5284 } else { 5285 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5286 } 5287 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 5288 break; 5289 case HWRM_RING_ALLOC_CMPL: 5290 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 5291 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5292 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5293 /* Association of cp ring with nq */ 5294 grp_info = &bp->grp_info[map_index]; 5295 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 5296 req.cq_handle = cpu_to_le64(ring->handle); 5297 req.enables |= cpu_to_le32( 5298 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 5299 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 5300 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5301 } 5302 break; 5303 case HWRM_RING_ALLOC_NQ: 5304 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 5305 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5306 if (bp->flags & BNXT_FLAG_USING_MSIX) 5307 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5308 break; 5309 default: 5310 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 5311 ring_type); 5312 return -1; 5313 } 5314 5315 mutex_lock(&bp->hwrm_cmd_lock); 5316 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5317 err = le16_to_cpu(resp->error_code); 5318 ring_id = le16_to_cpu(resp->ring_id); 5319 mutex_unlock(&bp->hwrm_cmd_lock); 5320 5321 if (rc || err) { 5322 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 5323 ring_type, rc, err); 5324 return -EIO; 5325 } 5326 ring->fw_ring_id = ring_id; 5327 return rc; 5328 } 5329 5330 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 5331 { 5332 int rc; 5333 5334 if (BNXT_PF(bp)) { 5335 struct hwrm_func_cfg_input req = {0}; 5336 5337 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5338 req.fid = cpu_to_le16(0xffff); 5339 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5340 req.async_event_cr = cpu_to_le16(idx); 5341 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5342 } else { 5343 struct hwrm_func_vf_cfg_input req = {0}; 5344 5345 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 5346 req.enables = 5347 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5348 req.async_event_cr = cpu_to_le16(idx); 5349 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5350 } 5351 return rc; 5352 } 5353 5354 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 5355 u32 map_idx, u32 xid) 5356 { 5357 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5358 if (BNXT_PF(bp)) 5359 db->doorbell = bp->bar1 + 0x10000; 5360 else 5361 db->doorbell = bp->bar1 + 0x4000; 5362 switch (ring_type) { 5363 case HWRM_RING_ALLOC_TX: 5364 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 5365 break; 5366 case HWRM_RING_ALLOC_RX: 5367 case HWRM_RING_ALLOC_AGG: 5368 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 5369 break; 5370 case HWRM_RING_ALLOC_CMPL: 5371 db->db_key64 = DBR_PATH_L2; 5372 break; 5373 case HWRM_RING_ALLOC_NQ: 5374 db->db_key64 = DBR_PATH_L2; 5375 break; 5376 } 5377 db->db_key64 |= (u64)xid << DBR_XID_SFT; 5378 } else { 5379 db->doorbell = bp->bar1 + map_idx * 0x80; 5380 switch (ring_type) { 5381 case HWRM_RING_ALLOC_TX: 5382 db->db_key32 = DB_KEY_TX; 5383 break; 5384 case HWRM_RING_ALLOC_RX: 5385 case HWRM_RING_ALLOC_AGG: 5386 db->db_key32 = DB_KEY_RX; 5387 break; 5388 case HWRM_RING_ALLOC_CMPL: 5389 db->db_key32 = DB_KEY_CP; 5390 break; 5391 } 5392 } 5393 } 5394 5395 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5396 { 5397 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 5398 int i, rc = 0; 5399 u32 type; 5400 5401 if (bp->flags & BNXT_FLAG_CHIP_P5) 5402 type = HWRM_RING_ALLOC_NQ; 5403 else 5404 type = HWRM_RING_ALLOC_CMPL; 5405 for (i = 0; i < bp->cp_nr_rings; i++) { 5406 struct bnxt_napi *bnapi = bp->bnapi[i]; 5407 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5408 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5409 u32 map_idx = ring->map_idx; 5410 unsigned int vector; 5411 5412 vector = bp->irq_tbl[map_idx].vector; 5413 disable_irq_nosync(vector); 5414 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5415 if (rc) { 5416 enable_irq(vector); 5417 goto err_out; 5418 } 5419 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 5420 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5421 enable_irq(vector); 5422 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 5423 5424 if (!i) { 5425 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 5426 if (rc) 5427 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 5428 } 5429 } 5430 5431 type = HWRM_RING_ALLOC_TX; 5432 for (i = 0; i < bp->tx_nr_rings; i++) { 5433 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5434 struct bnxt_ring_struct *ring; 5435 u32 map_idx; 5436 5437 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5438 struct bnxt_napi *bnapi = txr->bnapi; 5439 struct bnxt_cp_ring_info *cpr, *cpr2; 5440 u32 type2 = HWRM_RING_ALLOC_CMPL; 5441 5442 cpr = &bnapi->cp_ring; 5443 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; 5444 ring = &cpr2->cp_ring_struct; 5445 ring->handle = BNXT_TX_HDL; 5446 map_idx = bnapi->index; 5447 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5448 if (rc) 5449 goto err_out; 5450 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5451 ring->fw_ring_id); 5452 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5453 } 5454 ring = &txr->tx_ring_struct; 5455 map_idx = i; 5456 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5457 if (rc) 5458 goto err_out; 5459 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 5460 } 5461 5462 type = HWRM_RING_ALLOC_RX; 5463 for (i = 0; i < bp->rx_nr_rings; i++) { 5464 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5465 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5466 struct bnxt_napi *bnapi = rxr->bnapi; 5467 u32 map_idx = bnapi->index; 5468 5469 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5470 if (rc) 5471 goto err_out; 5472 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5473 /* If we have agg rings, post agg buffers first. */ 5474 if (!agg_rings) 5475 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5476 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5477 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5478 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5479 u32 type2 = HWRM_RING_ALLOC_CMPL; 5480 struct bnxt_cp_ring_info *cpr2; 5481 5482 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; 5483 ring = &cpr2->cp_ring_struct; 5484 ring->handle = BNXT_RX_HDL; 5485 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5486 if (rc) 5487 goto err_out; 5488 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5489 ring->fw_ring_id); 5490 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5491 } 5492 } 5493 5494 if (agg_rings) { 5495 type = HWRM_RING_ALLOC_AGG; 5496 for (i = 0; i < bp->rx_nr_rings; i++) { 5497 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5498 struct bnxt_ring_struct *ring = 5499 &rxr->rx_agg_ring_struct; 5500 u32 grp_idx = ring->grp_idx; 5501 u32 map_idx = grp_idx + bp->rx_nr_rings; 5502 5503 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5504 if (rc) 5505 goto err_out; 5506 5507 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5508 ring->fw_ring_id); 5509 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5510 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5511 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5512 } 5513 } 5514 err_out: 5515 return rc; 5516 } 5517 5518 static int hwrm_ring_free_send_msg(struct bnxt *bp, 5519 struct bnxt_ring_struct *ring, 5520 u32 ring_type, int cmpl_ring_id) 5521 { 5522 int rc; 5523 struct hwrm_ring_free_input req = {0}; 5524 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 5525 u16 error_code; 5526 5527 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 5528 return 0; 5529 5530 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 5531 req.ring_type = ring_type; 5532 req.ring_id = cpu_to_le16(ring->fw_ring_id); 5533 5534 mutex_lock(&bp->hwrm_cmd_lock); 5535 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5536 error_code = le16_to_cpu(resp->error_code); 5537 mutex_unlock(&bp->hwrm_cmd_lock); 5538 5539 if (rc || error_code) { 5540 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 5541 ring_type, rc, error_code); 5542 return -EIO; 5543 } 5544 return 0; 5545 } 5546 5547 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 5548 { 5549 u32 type; 5550 int i; 5551 5552 if (!bp->bnapi) 5553 return; 5554 5555 for (i = 0; i < bp->tx_nr_rings; i++) { 5556 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5557 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5558 5559 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5560 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 5561 5562 hwrm_ring_free_send_msg(bp, ring, 5563 RING_FREE_REQ_RING_TYPE_TX, 5564 close_path ? cmpl_ring_id : 5565 INVALID_HW_RING_ID); 5566 ring->fw_ring_id = INVALID_HW_RING_ID; 5567 } 5568 } 5569 5570 for (i = 0; i < bp->rx_nr_rings; i++) { 5571 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5572 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5573 u32 grp_idx = rxr->bnapi->index; 5574 5575 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5576 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5577 5578 hwrm_ring_free_send_msg(bp, ring, 5579 RING_FREE_REQ_RING_TYPE_RX, 5580 close_path ? cmpl_ring_id : 5581 INVALID_HW_RING_ID); 5582 ring->fw_ring_id = INVALID_HW_RING_ID; 5583 bp->grp_info[grp_idx].rx_fw_ring_id = 5584 INVALID_HW_RING_ID; 5585 } 5586 } 5587 5588 if (bp->flags & BNXT_FLAG_CHIP_P5) 5589 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 5590 else 5591 type = RING_FREE_REQ_RING_TYPE_RX; 5592 for (i = 0; i < bp->rx_nr_rings; i++) { 5593 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5594 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5595 u32 grp_idx = rxr->bnapi->index; 5596 5597 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5598 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5599 5600 hwrm_ring_free_send_msg(bp, ring, type, 5601 close_path ? cmpl_ring_id : 5602 INVALID_HW_RING_ID); 5603 ring->fw_ring_id = INVALID_HW_RING_ID; 5604 bp->grp_info[grp_idx].agg_fw_ring_id = 5605 INVALID_HW_RING_ID; 5606 } 5607 } 5608 5609 /* The completion rings are about to be freed. After that the 5610 * IRQ doorbell will not work anymore. So we need to disable 5611 * IRQ here. 5612 */ 5613 bnxt_disable_int_sync(bp); 5614 5615 if (bp->flags & BNXT_FLAG_CHIP_P5) 5616 type = RING_FREE_REQ_RING_TYPE_NQ; 5617 else 5618 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 5619 for (i = 0; i < bp->cp_nr_rings; i++) { 5620 struct bnxt_napi *bnapi = bp->bnapi[i]; 5621 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5622 struct bnxt_ring_struct *ring; 5623 int j; 5624 5625 for (j = 0; j < 2; j++) { 5626 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 5627 5628 if (cpr2) { 5629 ring = &cpr2->cp_ring_struct; 5630 if (ring->fw_ring_id == INVALID_HW_RING_ID) 5631 continue; 5632 hwrm_ring_free_send_msg(bp, ring, 5633 RING_FREE_REQ_RING_TYPE_L2_CMPL, 5634 INVALID_HW_RING_ID); 5635 ring->fw_ring_id = INVALID_HW_RING_ID; 5636 } 5637 } 5638 ring = &cpr->cp_ring_struct; 5639 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5640 hwrm_ring_free_send_msg(bp, ring, type, 5641 INVALID_HW_RING_ID); 5642 ring->fw_ring_id = INVALID_HW_RING_ID; 5643 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 5644 } 5645 } 5646 } 5647 5648 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5649 bool shared); 5650 5651 static int bnxt_hwrm_get_rings(struct bnxt *bp) 5652 { 5653 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5654 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5655 struct hwrm_func_qcfg_input req = {0}; 5656 int rc; 5657 5658 if (bp->hwrm_spec_code < 0x10601) 5659 return 0; 5660 5661 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5662 req.fid = cpu_to_le16(0xffff); 5663 mutex_lock(&bp->hwrm_cmd_lock); 5664 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5665 if (rc) { 5666 mutex_unlock(&bp->hwrm_cmd_lock); 5667 return rc; 5668 } 5669 5670 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5671 if (BNXT_NEW_RM(bp)) { 5672 u16 cp, stats; 5673 5674 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 5675 hw_resc->resv_hw_ring_grps = 5676 le32_to_cpu(resp->alloc_hw_ring_grps); 5677 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 5678 cp = le16_to_cpu(resp->alloc_cmpl_rings); 5679 stats = le16_to_cpu(resp->alloc_stat_ctx); 5680 hw_resc->resv_irqs = cp; 5681 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5682 int rx = hw_resc->resv_rx_rings; 5683 int tx = hw_resc->resv_tx_rings; 5684 5685 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5686 rx >>= 1; 5687 if (cp < (rx + tx)) { 5688 bnxt_trim_rings(bp, &rx, &tx, cp, false); 5689 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5690 rx <<= 1; 5691 hw_resc->resv_rx_rings = rx; 5692 hw_resc->resv_tx_rings = tx; 5693 } 5694 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 5695 hw_resc->resv_hw_ring_grps = rx; 5696 } 5697 hw_resc->resv_cp_rings = cp; 5698 hw_resc->resv_stat_ctxs = stats; 5699 } 5700 mutex_unlock(&bp->hwrm_cmd_lock); 5701 return 0; 5702 } 5703 5704 /* Caller must hold bp->hwrm_cmd_lock */ 5705 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 5706 { 5707 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5708 struct hwrm_func_qcfg_input req = {0}; 5709 int rc; 5710 5711 if (bp->hwrm_spec_code < 0x10601) 5712 return 0; 5713 5714 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5715 req.fid = cpu_to_le16(fid); 5716 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5717 if (!rc) 5718 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5719 5720 return rc; 5721 } 5722 5723 static bool bnxt_rfs_supported(struct bnxt *bp); 5724 5725 static void 5726 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, 5727 int tx_rings, int rx_rings, int ring_grps, 5728 int cp_rings, int stats, int vnics) 5729 { 5730 u32 enables = 0; 5731 5732 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); 5733 req->fid = cpu_to_le16(0xffff); 5734 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5735 req->num_tx_rings = cpu_to_le16(tx_rings); 5736 if (BNXT_NEW_RM(bp)) { 5737 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 5738 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5739 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5740 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 5741 enables |= tx_rings + ring_grps ? 5742 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5743 enables |= rx_rings ? 5744 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5745 } else { 5746 enables |= cp_rings ? 5747 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5748 enables |= ring_grps ? 5749 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 5750 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5751 } 5752 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 5753 5754 req->num_rx_rings = cpu_to_le16(rx_rings); 5755 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5756 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5757 req->num_msix = cpu_to_le16(cp_rings); 5758 req->num_rsscos_ctxs = 5759 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5760 } else { 5761 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5762 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5763 req->num_rsscos_ctxs = cpu_to_le16(1); 5764 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 5765 bnxt_rfs_supported(bp)) 5766 req->num_rsscos_ctxs = 5767 cpu_to_le16(ring_grps + 1); 5768 } 5769 req->num_stat_ctxs = cpu_to_le16(stats); 5770 req->num_vnics = cpu_to_le16(vnics); 5771 } 5772 req->enables = cpu_to_le32(enables); 5773 } 5774 5775 static void 5776 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, 5777 struct hwrm_func_vf_cfg_input *req, int tx_rings, 5778 int rx_rings, int ring_grps, int cp_rings, 5779 int stats, int vnics) 5780 { 5781 u32 enables = 0; 5782 5783 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); 5784 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5785 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 5786 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5787 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5788 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5789 enables |= tx_rings + ring_grps ? 5790 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5791 } else { 5792 enables |= cp_rings ? 5793 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5794 enables |= ring_grps ? 5795 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 5796 } 5797 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 5798 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 5799 5800 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5801 req->num_tx_rings = cpu_to_le16(tx_rings); 5802 req->num_rx_rings = cpu_to_le16(rx_rings); 5803 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5804 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5805 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5806 } else { 5807 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5808 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5809 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 5810 } 5811 req->num_stat_ctxs = cpu_to_le16(stats); 5812 req->num_vnics = cpu_to_le16(vnics); 5813 5814 req->enables = cpu_to_le32(enables); 5815 } 5816 5817 static int 5818 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5819 int ring_grps, int cp_rings, int stats, int vnics) 5820 { 5821 struct hwrm_func_cfg_input req = {0}; 5822 int rc; 5823 5824 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5825 cp_rings, stats, vnics); 5826 if (!req.enables) 5827 return 0; 5828 5829 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5830 if (rc) 5831 return rc; 5832 5833 if (bp->hwrm_spec_code < 0x10601) 5834 bp->hw_resc.resv_tx_rings = tx_rings; 5835 5836 return bnxt_hwrm_get_rings(bp); 5837 } 5838 5839 static int 5840 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5841 int ring_grps, int cp_rings, int stats, int vnics) 5842 { 5843 struct hwrm_func_vf_cfg_input req = {0}; 5844 int rc; 5845 5846 if (!BNXT_NEW_RM(bp)) { 5847 bp->hw_resc.resv_tx_rings = tx_rings; 5848 return 0; 5849 } 5850 5851 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5852 cp_rings, stats, vnics); 5853 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5854 if (rc) 5855 return rc; 5856 5857 return bnxt_hwrm_get_rings(bp); 5858 } 5859 5860 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 5861 int cp, int stat, int vnic) 5862 { 5863 if (BNXT_PF(bp)) 5864 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 5865 vnic); 5866 else 5867 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 5868 vnic); 5869 } 5870 5871 int bnxt_nq_rings_in_use(struct bnxt *bp) 5872 { 5873 int cp = bp->cp_nr_rings; 5874 int ulp_msix, ulp_base; 5875 5876 ulp_msix = bnxt_get_ulp_msix_num(bp); 5877 if (ulp_msix) { 5878 ulp_base = bnxt_get_ulp_msix_base(bp); 5879 cp += ulp_msix; 5880 if ((ulp_base + ulp_msix) > cp) 5881 cp = ulp_base + ulp_msix; 5882 } 5883 return cp; 5884 } 5885 5886 static int bnxt_cp_rings_in_use(struct bnxt *bp) 5887 { 5888 int cp; 5889 5890 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 5891 return bnxt_nq_rings_in_use(bp); 5892 5893 cp = bp->tx_nr_rings + bp->rx_nr_rings; 5894 return cp; 5895 } 5896 5897 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 5898 { 5899 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 5900 int cp = bp->cp_nr_rings; 5901 5902 if (!ulp_stat) 5903 return cp; 5904 5905 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 5906 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 5907 5908 return cp + ulp_stat; 5909 } 5910 5911 static bool bnxt_need_reserve_rings(struct bnxt *bp) 5912 { 5913 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5914 int cp = bnxt_cp_rings_in_use(bp); 5915 int nq = bnxt_nq_rings_in_use(bp); 5916 int rx = bp->rx_nr_rings, stat; 5917 int vnic = 1, grp = rx; 5918 5919 if (bp->hwrm_spec_code < 0x10601) 5920 return false; 5921 5922 if (hw_resc->resv_tx_rings != bp->tx_nr_rings) 5923 return true; 5924 5925 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5926 vnic = rx + 1; 5927 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5928 rx <<= 1; 5929 stat = bnxt_get_func_stat_ctxs(bp); 5930 if (BNXT_NEW_RM(bp) && 5931 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 5932 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 5933 (hw_resc->resv_hw_ring_grps != grp && 5934 !(bp->flags & BNXT_FLAG_CHIP_P5)))) 5935 return true; 5936 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && 5937 hw_resc->resv_irqs != nq) 5938 return true; 5939 return false; 5940 } 5941 5942 static int __bnxt_reserve_rings(struct bnxt *bp) 5943 { 5944 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5945 int cp = bnxt_nq_rings_in_use(bp); 5946 int tx = bp->tx_nr_rings; 5947 int rx = bp->rx_nr_rings; 5948 int grp, rx_rings, rc; 5949 int vnic = 1, stat; 5950 bool sh = false; 5951 5952 if (!bnxt_need_reserve_rings(bp)) 5953 return 0; 5954 5955 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5956 sh = true; 5957 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5958 vnic = rx + 1; 5959 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5960 rx <<= 1; 5961 grp = bp->rx_nr_rings; 5962 stat = bnxt_get_func_stat_ctxs(bp); 5963 5964 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 5965 if (rc) 5966 return rc; 5967 5968 tx = hw_resc->resv_tx_rings; 5969 if (BNXT_NEW_RM(bp)) { 5970 rx = hw_resc->resv_rx_rings; 5971 cp = hw_resc->resv_irqs; 5972 grp = hw_resc->resv_hw_ring_grps; 5973 vnic = hw_resc->resv_vnics; 5974 stat = hw_resc->resv_stat_ctxs; 5975 } 5976 5977 rx_rings = rx; 5978 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5979 if (rx >= 2) { 5980 rx_rings = rx >> 1; 5981 } else { 5982 if (netif_running(bp->dev)) 5983 return -ENOMEM; 5984 5985 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 5986 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 5987 bp->dev->hw_features &= ~NETIF_F_LRO; 5988 bp->dev->features &= ~NETIF_F_LRO; 5989 bnxt_set_ring_params(bp); 5990 } 5991 } 5992 rx_rings = min_t(int, rx_rings, grp); 5993 cp = min_t(int, cp, bp->cp_nr_rings); 5994 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 5995 stat -= bnxt_get_ulp_stat_ctxs(bp); 5996 cp = min_t(int, cp, stat); 5997 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 5998 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5999 rx = rx_rings << 1; 6000 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; 6001 bp->tx_nr_rings = tx; 6002 bp->rx_nr_rings = rx_rings; 6003 bp->cp_nr_rings = cp; 6004 6005 if (!tx || !rx || !cp || !grp || !vnic || !stat) 6006 return -ENOMEM; 6007 6008 return rc; 6009 } 6010 6011 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6012 int ring_grps, int cp_rings, int stats, 6013 int vnics) 6014 { 6015 struct hwrm_func_vf_cfg_input req = {0}; 6016 u32 flags; 6017 6018 if (!BNXT_NEW_RM(bp)) 6019 return 0; 6020 6021 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6022 cp_rings, stats, vnics); 6023 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6024 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6025 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6026 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6027 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6028 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6029 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6030 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6031 6032 req.flags = cpu_to_le32(flags); 6033 return hwrm_send_message_silent(bp, &req, sizeof(req), 6034 HWRM_CMD_TIMEOUT); 6035 } 6036 6037 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6038 int ring_grps, int cp_rings, int stats, 6039 int vnics) 6040 { 6041 struct hwrm_func_cfg_input req = {0}; 6042 u32 flags; 6043 6044 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6045 cp_rings, stats, vnics); 6046 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6047 if (BNXT_NEW_RM(bp)) { 6048 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6049 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6050 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6051 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6052 if (bp->flags & BNXT_FLAG_CHIP_P5) 6053 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6054 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6055 else 6056 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6057 } 6058 6059 req.flags = cpu_to_le32(flags); 6060 return hwrm_send_message_silent(bp, &req, sizeof(req), 6061 HWRM_CMD_TIMEOUT); 6062 } 6063 6064 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6065 int ring_grps, int cp_rings, int stats, 6066 int vnics) 6067 { 6068 if (bp->hwrm_spec_code < 0x10801) 6069 return 0; 6070 6071 if (BNXT_PF(bp)) 6072 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6073 ring_grps, cp_rings, stats, 6074 vnics); 6075 6076 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6077 cp_rings, stats, vnics); 6078 } 6079 6080 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6081 { 6082 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6083 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6084 struct hwrm_ring_aggint_qcaps_input req = {0}; 6085 int rc; 6086 6087 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6088 coal_cap->num_cmpl_dma_aggr_max = 63; 6089 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6090 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6091 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6092 coal_cap->int_lat_tmr_min_max = 65535; 6093 coal_cap->int_lat_tmr_max_max = 65535; 6094 coal_cap->num_cmpl_aggr_int_max = 65535; 6095 coal_cap->timer_units = 80; 6096 6097 if (bp->hwrm_spec_code < 0x10902) 6098 return; 6099 6100 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); 6101 mutex_lock(&bp->hwrm_cmd_lock); 6102 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6103 if (!rc) { 6104 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 6105 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 6106 coal_cap->num_cmpl_dma_aggr_max = 6107 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 6108 coal_cap->num_cmpl_dma_aggr_during_int_max = 6109 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 6110 coal_cap->cmpl_aggr_dma_tmr_max = 6111 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 6112 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 6113 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 6114 coal_cap->int_lat_tmr_min_max = 6115 le16_to_cpu(resp->int_lat_tmr_min_max); 6116 coal_cap->int_lat_tmr_max_max = 6117 le16_to_cpu(resp->int_lat_tmr_max_max); 6118 coal_cap->num_cmpl_aggr_int_max = 6119 le16_to_cpu(resp->num_cmpl_aggr_int_max); 6120 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 6121 } 6122 mutex_unlock(&bp->hwrm_cmd_lock); 6123 } 6124 6125 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 6126 { 6127 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6128 6129 return usec * 1000 / coal_cap->timer_units; 6130 } 6131 6132 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 6133 struct bnxt_coal *hw_coal, 6134 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6135 { 6136 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6137 u32 cmpl_params = coal_cap->cmpl_params; 6138 u16 val, tmr, max, flags = 0; 6139 6140 max = hw_coal->bufs_per_record * 128; 6141 if (hw_coal->budget) 6142 max = hw_coal->bufs_per_record * hw_coal->budget; 6143 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 6144 6145 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 6146 req->num_cmpl_aggr_int = cpu_to_le16(val); 6147 6148 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 6149 req->num_cmpl_dma_aggr = cpu_to_le16(val); 6150 6151 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 6152 coal_cap->num_cmpl_dma_aggr_during_int_max); 6153 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 6154 6155 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 6156 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 6157 req->int_lat_tmr_max = cpu_to_le16(tmr); 6158 6159 /* min timer set to 1/2 of interrupt timer */ 6160 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 6161 val = tmr / 2; 6162 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 6163 req->int_lat_tmr_min = cpu_to_le16(val); 6164 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6165 } 6166 6167 /* buf timer set to 1/4 of interrupt timer */ 6168 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 6169 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 6170 6171 if (cmpl_params & 6172 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 6173 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 6174 val = clamp_t(u16, tmr, 1, 6175 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 6176 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 6177 req->enables |= 6178 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 6179 } 6180 6181 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 6182 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 6183 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 6184 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 6185 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 6186 req->flags = cpu_to_le16(flags); 6187 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 6188 } 6189 6190 /* Caller holds bp->hwrm_cmd_lock */ 6191 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 6192 struct bnxt_coal *hw_coal) 6193 { 6194 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; 6195 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6196 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6197 u32 nq_params = coal_cap->nq_params; 6198 u16 tmr; 6199 6200 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 6201 return 0; 6202 6203 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, 6204 -1, -1); 6205 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 6206 req.flags = 6207 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 6208 6209 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 6210 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 6211 req.int_lat_tmr_min = cpu_to_le16(tmr); 6212 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6213 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6214 } 6215 6216 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 6217 { 6218 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; 6219 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6220 struct bnxt_coal coal; 6221 6222 /* Tick values in micro seconds. 6223 * 1 coal_buf x bufs_per_record = 1 completion record. 6224 */ 6225 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 6226 6227 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 6228 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 6229 6230 if (!bnapi->rx_ring) 6231 return -ENODEV; 6232 6233 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6234 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6235 6236 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); 6237 6238 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 6239 6240 return hwrm_send_message(bp, &req_rx, sizeof(req_rx), 6241 HWRM_CMD_TIMEOUT); 6242 } 6243 6244 int bnxt_hwrm_set_coal(struct bnxt *bp) 6245 { 6246 int i, rc = 0; 6247 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 6248 req_tx = {0}, *req; 6249 6250 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6251 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6252 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 6253 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6254 6255 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); 6256 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); 6257 6258 mutex_lock(&bp->hwrm_cmd_lock); 6259 for (i = 0; i < bp->cp_nr_rings; i++) { 6260 struct bnxt_napi *bnapi = bp->bnapi[i]; 6261 struct bnxt_coal *hw_coal; 6262 u16 ring_id; 6263 6264 req = &req_rx; 6265 if (!bnapi->rx_ring) { 6266 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6267 req = &req_tx; 6268 } else { 6269 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 6270 } 6271 req->ring_id = cpu_to_le16(ring_id); 6272 6273 rc = _hwrm_send_message(bp, req, sizeof(*req), 6274 HWRM_CMD_TIMEOUT); 6275 if (rc) 6276 break; 6277 6278 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6279 continue; 6280 6281 if (bnapi->rx_ring && bnapi->tx_ring) { 6282 req = &req_tx; 6283 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6284 req->ring_id = cpu_to_le16(ring_id); 6285 rc = _hwrm_send_message(bp, req, sizeof(*req), 6286 HWRM_CMD_TIMEOUT); 6287 if (rc) 6288 break; 6289 } 6290 if (bnapi->rx_ring) 6291 hw_coal = &bp->rx_coal; 6292 else 6293 hw_coal = &bp->tx_coal; 6294 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 6295 } 6296 mutex_unlock(&bp->hwrm_cmd_lock); 6297 return rc; 6298 } 6299 6300 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 6301 { 6302 struct hwrm_stat_ctx_free_input req = {0}; 6303 int i; 6304 6305 if (!bp->bnapi) 6306 return; 6307 6308 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6309 return; 6310 6311 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 6312 6313 mutex_lock(&bp->hwrm_cmd_lock); 6314 for (i = 0; i < bp->cp_nr_rings; i++) { 6315 struct bnxt_napi *bnapi = bp->bnapi[i]; 6316 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6317 6318 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 6319 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 6320 6321 _hwrm_send_message(bp, &req, sizeof(req), 6322 HWRM_CMD_TIMEOUT); 6323 6324 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 6325 } 6326 } 6327 mutex_unlock(&bp->hwrm_cmd_lock); 6328 } 6329 6330 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 6331 { 6332 int rc = 0, i; 6333 struct hwrm_stat_ctx_alloc_input req = {0}; 6334 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6335 6336 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6337 return 0; 6338 6339 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 6340 6341 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 6342 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 6343 6344 mutex_lock(&bp->hwrm_cmd_lock); 6345 for (i = 0; i < bp->cp_nr_rings; i++) { 6346 struct bnxt_napi *bnapi = bp->bnapi[i]; 6347 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6348 6349 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 6350 6351 rc = _hwrm_send_message(bp, &req, sizeof(req), 6352 HWRM_CMD_TIMEOUT); 6353 if (rc) 6354 break; 6355 6356 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 6357 6358 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 6359 } 6360 mutex_unlock(&bp->hwrm_cmd_lock); 6361 return rc; 6362 } 6363 6364 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 6365 { 6366 struct hwrm_func_qcfg_input req = {0}; 6367 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6368 u16 flags; 6369 int rc; 6370 6371 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 6372 req.fid = cpu_to_le16(0xffff); 6373 mutex_lock(&bp->hwrm_cmd_lock); 6374 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6375 if (rc) 6376 goto func_qcfg_exit; 6377 6378 #ifdef CONFIG_BNXT_SRIOV 6379 if (BNXT_VF(bp)) { 6380 struct bnxt_vf_info *vf = &bp->vf; 6381 6382 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 6383 } else { 6384 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 6385 } 6386 #endif 6387 flags = le16_to_cpu(resp->flags); 6388 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 6389 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 6390 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 6391 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 6392 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 6393 } 6394 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 6395 bp->flags |= BNXT_FLAG_MULTI_HOST; 6396 6397 switch (resp->port_partition_type) { 6398 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 6399 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 6400 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 6401 bp->port_partition_type = resp->port_partition_type; 6402 break; 6403 } 6404 if (bp->hwrm_spec_code < 0x10707 || 6405 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 6406 bp->br_mode = BRIDGE_MODE_VEB; 6407 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 6408 bp->br_mode = BRIDGE_MODE_VEPA; 6409 else 6410 bp->br_mode = BRIDGE_MODE_UNDEF; 6411 6412 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 6413 if (!bp->max_mtu) 6414 bp->max_mtu = BNXT_MAX_MTU; 6415 6416 func_qcfg_exit: 6417 mutex_unlock(&bp->hwrm_cmd_lock); 6418 return rc; 6419 } 6420 6421 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 6422 { 6423 struct hwrm_func_backing_store_qcaps_input req = {0}; 6424 struct hwrm_func_backing_store_qcaps_output *resp = 6425 bp->hwrm_cmd_resp_addr; 6426 int rc; 6427 6428 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 6429 return 0; 6430 6431 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); 6432 mutex_lock(&bp->hwrm_cmd_lock); 6433 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6434 if (!rc) { 6435 struct bnxt_ctx_pg_info *ctx_pg; 6436 struct bnxt_ctx_mem_info *ctx; 6437 int i; 6438 6439 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 6440 if (!ctx) { 6441 rc = -ENOMEM; 6442 goto ctx_err; 6443 } 6444 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL); 6445 if (!ctx_pg) { 6446 kfree(ctx); 6447 rc = -ENOMEM; 6448 goto ctx_err; 6449 } 6450 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++) 6451 ctx->tqm_mem[i] = ctx_pg; 6452 6453 bp->ctx = ctx; 6454 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); 6455 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 6456 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 6457 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); 6458 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 6459 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); 6460 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); 6461 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 6462 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); 6463 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); 6464 ctx->vnic_max_vnic_entries = 6465 le16_to_cpu(resp->vnic_max_vnic_entries); 6466 ctx->vnic_max_ring_table_entries = 6467 le16_to_cpu(resp->vnic_max_ring_table_entries); 6468 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); 6469 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); 6470 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); 6471 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); 6472 ctx->tqm_min_entries_per_ring = 6473 le32_to_cpu(resp->tqm_min_entries_per_ring); 6474 ctx->tqm_max_entries_per_ring = 6475 le32_to_cpu(resp->tqm_max_entries_per_ring); 6476 ctx->tqm_entries_multiple = resp->tqm_entries_multiple; 6477 if (!ctx->tqm_entries_multiple) 6478 ctx->tqm_entries_multiple = 1; 6479 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); 6480 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); 6481 ctx->mrav_num_entries_units = 6482 le16_to_cpu(resp->mrav_num_entries_units); 6483 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); 6484 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); 6485 ctx->ctx_kind_initializer = resp->ctx_kind_initializer; 6486 } else { 6487 rc = 0; 6488 } 6489 ctx_err: 6490 mutex_unlock(&bp->hwrm_cmd_lock); 6491 return rc; 6492 } 6493 6494 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 6495 __le64 *pg_dir) 6496 { 6497 u8 pg_size = 0; 6498 6499 if (BNXT_PAGE_SHIFT == 13) 6500 pg_size = 1 << 4; 6501 else if (BNXT_PAGE_SIZE == 16) 6502 pg_size = 2 << 4; 6503 6504 *pg_attr = pg_size; 6505 if (rmem->depth >= 1) { 6506 if (rmem->depth == 2) 6507 *pg_attr |= 2; 6508 else 6509 *pg_attr |= 1; 6510 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 6511 } else { 6512 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 6513 } 6514 } 6515 6516 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 6517 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 6518 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 6519 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 6520 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 6521 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 6522 6523 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 6524 { 6525 struct hwrm_func_backing_store_cfg_input req = {0}; 6526 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6527 struct bnxt_ctx_pg_info *ctx_pg; 6528 __le32 *num_entries; 6529 __le64 *pg_dir; 6530 u32 flags = 0; 6531 u8 *pg_attr; 6532 u32 ena; 6533 int i; 6534 6535 if (!ctx) 6536 return 0; 6537 6538 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); 6539 req.enables = cpu_to_le32(enables); 6540 6541 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 6542 ctx_pg = &ctx->qp_mem; 6543 req.qp_num_entries = cpu_to_le32(ctx_pg->entries); 6544 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); 6545 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); 6546 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); 6547 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6548 &req.qpc_pg_size_qpc_lvl, 6549 &req.qpc_page_dir); 6550 } 6551 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 6552 ctx_pg = &ctx->srq_mem; 6553 req.srq_num_entries = cpu_to_le32(ctx_pg->entries); 6554 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); 6555 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); 6556 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6557 &req.srq_pg_size_srq_lvl, 6558 &req.srq_page_dir); 6559 } 6560 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 6561 ctx_pg = &ctx->cq_mem; 6562 req.cq_num_entries = cpu_to_le32(ctx_pg->entries); 6563 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); 6564 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); 6565 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, 6566 &req.cq_page_dir); 6567 } 6568 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 6569 ctx_pg = &ctx->vnic_mem; 6570 req.vnic_num_vnic_entries = 6571 cpu_to_le16(ctx->vnic_max_vnic_entries); 6572 req.vnic_num_ring_table_entries = 6573 cpu_to_le16(ctx->vnic_max_ring_table_entries); 6574 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); 6575 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6576 &req.vnic_pg_size_vnic_lvl, 6577 &req.vnic_page_dir); 6578 } 6579 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 6580 ctx_pg = &ctx->stat_mem; 6581 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); 6582 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); 6583 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6584 &req.stat_pg_size_stat_lvl, 6585 &req.stat_page_dir); 6586 } 6587 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 6588 ctx_pg = &ctx->mrav_mem; 6589 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); 6590 if (ctx->mrav_num_entries_units) 6591 flags |= 6592 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 6593 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); 6594 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6595 &req.mrav_pg_size_mrav_lvl, 6596 &req.mrav_page_dir); 6597 } 6598 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 6599 ctx_pg = &ctx->tim_mem; 6600 req.tim_num_entries = cpu_to_le32(ctx_pg->entries); 6601 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); 6602 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6603 &req.tim_pg_size_tim_lvl, 6604 &req.tim_page_dir); 6605 } 6606 for (i = 0, num_entries = &req.tqm_sp_num_entries, 6607 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, 6608 pg_dir = &req.tqm_sp_page_dir, 6609 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; 6610 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 6611 if (!(enables & ena)) 6612 continue; 6613 6614 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); 6615 ctx_pg = ctx->tqm_mem[i]; 6616 *num_entries = cpu_to_le32(ctx_pg->entries); 6617 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 6618 } 6619 req.flags = cpu_to_le32(flags); 6620 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6621 } 6622 6623 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 6624 struct bnxt_ctx_pg_info *ctx_pg) 6625 { 6626 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6627 6628 rmem->page_size = BNXT_PAGE_SIZE; 6629 rmem->pg_arr = ctx_pg->ctx_pg_arr; 6630 rmem->dma_arr = ctx_pg->ctx_dma_arr; 6631 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 6632 if (rmem->depth >= 1) 6633 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 6634 return bnxt_alloc_ring(bp, rmem); 6635 } 6636 6637 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 6638 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 6639 u8 depth, bool use_init_val) 6640 { 6641 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6642 int rc; 6643 6644 if (!mem_size) 6645 return 0; 6646 6647 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6648 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 6649 ctx_pg->nr_pages = 0; 6650 return -EINVAL; 6651 } 6652 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 6653 int nr_tbls, i; 6654 6655 rmem->depth = 2; 6656 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 6657 GFP_KERNEL); 6658 if (!ctx_pg->ctx_pg_tbl) 6659 return -ENOMEM; 6660 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 6661 rmem->nr_pages = nr_tbls; 6662 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6663 if (rc) 6664 return rc; 6665 for (i = 0; i < nr_tbls; i++) { 6666 struct bnxt_ctx_pg_info *pg_tbl; 6667 6668 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 6669 if (!pg_tbl) 6670 return -ENOMEM; 6671 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 6672 rmem = &pg_tbl->ring_mem; 6673 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 6674 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6675 rmem->depth = 1; 6676 rmem->nr_pages = MAX_CTX_PAGES; 6677 if (use_init_val) 6678 rmem->init_val = bp->ctx->ctx_kind_initializer; 6679 if (i == (nr_tbls - 1)) { 6680 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 6681 6682 if (rem) 6683 rmem->nr_pages = rem; 6684 } 6685 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6686 if (rc) 6687 break; 6688 } 6689 } else { 6690 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6691 if (rmem->nr_pages > 1 || depth) 6692 rmem->depth = 1; 6693 if (use_init_val) 6694 rmem->init_val = bp->ctx->ctx_kind_initializer; 6695 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6696 } 6697 return rc; 6698 } 6699 6700 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 6701 struct bnxt_ctx_pg_info *ctx_pg) 6702 { 6703 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6704 6705 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 6706 ctx_pg->ctx_pg_tbl) { 6707 int i, nr_tbls = rmem->nr_pages; 6708 6709 for (i = 0; i < nr_tbls; i++) { 6710 struct bnxt_ctx_pg_info *pg_tbl; 6711 struct bnxt_ring_mem_info *rmem2; 6712 6713 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 6714 if (!pg_tbl) 6715 continue; 6716 rmem2 = &pg_tbl->ring_mem; 6717 bnxt_free_ring(bp, rmem2); 6718 ctx_pg->ctx_pg_arr[i] = NULL; 6719 kfree(pg_tbl); 6720 ctx_pg->ctx_pg_tbl[i] = NULL; 6721 } 6722 kfree(ctx_pg->ctx_pg_tbl); 6723 ctx_pg->ctx_pg_tbl = NULL; 6724 } 6725 bnxt_free_ring(bp, rmem); 6726 ctx_pg->nr_pages = 0; 6727 } 6728 6729 static void bnxt_free_ctx_mem(struct bnxt *bp) 6730 { 6731 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6732 int i; 6733 6734 if (!ctx) 6735 return; 6736 6737 if (ctx->tqm_mem[0]) { 6738 for (i = 0; i < bp->max_q + 1; i++) 6739 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); 6740 kfree(ctx->tqm_mem[0]); 6741 ctx->tqm_mem[0] = NULL; 6742 } 6743 6744 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); 6745 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); 6746 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); 6747 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); 6748 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); 6749 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); 6750 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); 6751 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 6752 } 6753 6754 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 6755 { 6756 struct bnxt_ctx_pg_info *ctx_pg; 6757 struct bnxt_ctx_mem_info *ctx; 6758 u32 mem_size, ena, entries; 6759 u32 num_mr, num_ah; 6760 u32 extra_srqs = 0; 6761 u32 extra_qps = 0; 6762 u8 pg_lvl = 1; 6763 int i, rc; 6764 6765 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 6766 if (rc) { 6767 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 6768 rc); 6769 return rc; 6770 } 6771 ctx = bp->ctx; 6772 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 6773 return 0; 6774 6775 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 6776 pg_lvl = 2; 6777 extra_qps = 65536; 6778 extra_srqs = 8192; 6779 } 6780 6781 ctx_pg = &ctx->qp_mem; 6782 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + 6783 extra_qps; 6784 mem_size = ctx->qp_entry_size * ctx_pg->entries; 6785 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6786 if (rc) 6787 return rc; 6788 6789 ctx_pg = &ctx->srq_mem; 6790 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; 6791 mem_size = ctx->srq_entry_size * ctx_pg->entries; 6792 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6793 if (rc) 6794 return rc; 6795 6796 ctx_pg = &ctx->cq_mem; 6797 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; 6798 mem_size = ctx->cq_entry_size * ctx_pg->entries; 6799 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6800 if (rc) 6801 return rc; 6802 6803 ctx_pg = &ctx->vnic_mem; 6804 ctx_pg->entries = ctx->vnic_max_vnic_entries + 6805 ctx->vnic_max_ring_table_entries; 6806 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 6807 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6808 if (rc) 6809 return rc; 6810 6811 ctx_pg = &ctx->stat_mem; 6812 ctx_pg->entries = ctx->stat_max_entries; 6813 mem_size = ctx->stat_entry_size * ctx_pg->entries; 6814 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6815 if (rc) 6816 return rc; 6817 6818 ena = 0; 6819 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 6820 goto skip_rdma; 6821 6822 ctx_pg = &ctx->mrav_mem; 6823 /* 128K extra is needed to accommodate static AH context 6824 * allocation by f/w. 6825 */ 6826 num_mr = 1024 * 256; 6827 num_ah = 1024 * 128; 6828 ctx_pg->entries = num_mr + num_ah; 6829 mem_size = ctx->mrav_entry_size * ctx_pg->entries; 6830 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); 6831 if (rc) 6832 return rc; 6833 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 6834 if (ctx->mrav_num_entries_units) 6835 ctx_pg->entries = 6836 ((num_mr / ctx->mrav_num_entries_units) << 16) | 6837 (num_ah / ctx->mrav_num_entries_units); 6838 6839 ctx_pg = &ctx->tim_mem; 6840 ctx_pg->entries = ctx->qp_mem.entries; 6841 mem_size = ctx->tim_entry_size * ctx_pg->entries; 6842 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6843 if (rc) 6844 return rc; 6845 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 6846 6847 skip_rdma: 6848 entries = ctx->qp_max_l2_entries + extra_qps; 6849 entries = roundup(entries, ctx->tqm_entries_multiple); 6850 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, 6851 ctx->tqm_max_entries_per_ring); 6852 for (i = 0; i < bp->max_q + 1; i++) { 6853 ctx_pg = ctx->tqm_mem[i]; 6854 ctx_pg->entries = entries; 6855 mem_size = ctx->tqm_entry_size * entries; 6856 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6857 if (rc) 6858 return rc; 6859 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 6860 } 6861 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 6862 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 6863 if (rc) { 6864 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 6865 rc); 6866 return rc; 6867 } 6868 ctx->flags |= BNXT_CTX_FLAG_INITED; 6869 return 0; 6870 } 6871 6872 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 6873 { 6874 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6875 struct hwrm_func_resource_qcaps_input req = {0}; 6876 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6877 int rc; 6878 6879 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); 6880 req.fid = cpu_to_le16(0xffff); 6881 6882 mutex_lock(&bp->hwrm_cmd_lock); 6883 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), 6884 HWRM_CMD_TIMEOUT); 6885 if (rc) 6886 goto hwrm_func_resc_qcaps_exit; 6887 6888 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 6889 if (!all) 6890 goto hwrm_func_resc_qcaps_exit; 6891 6892 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 6893 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6894 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 6895 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6896 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 6897 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6898 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 6899 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6900 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 6901 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 6902 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 6903 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6904 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 6905 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6906 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 6907 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6908 6909 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6910 u16 max_msix = le16_to_cpu(resp->max_msix); 6911 6912 hw_resc->max_nqs = max_msix; 6913 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 6914 } 6915 6916 if (BNXT_PF(bp)) { 6917 struct bnxt_pf_info *pf = &bp->pf; 6918 6919 pf->vf_resv_strategy = 6920 le16_to_cpu(resp->vf_reservation_strategy); 6921 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 6922 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 6923 } 6924 hwrm_func_resc_qcaps_exit: 6925 mutex_unlock(&bp->hwrm_cmd_lock); 6926 return rc; 6927 } 6928 6929 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 6930 { 6931 int rc = 0; 6932 struct hwrm_func_qcaps_input req = {0}; 6933 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6934 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6935 u32 flags; 6936 6937 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 6938 req.fid = cpu_to_le16(0xffff); 6939 6940 mutex_lock(&bp->hwrm_cmd_lock); 6941 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6942 if (rc) 6943 goto hwrm_func_qcaps_exit; 6944 6945 flags = le32_to_cpu(resp->flags); 6946 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 6947 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 6948 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 6949 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 6950 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 6951 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 6952 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 6953 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 6954 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 6955 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 6956 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 6957 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 6958 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 6959 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 6960 6961 bp->tx_push_thresh = 0; 6962 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) 6963 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 6964 6965 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6966 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6967 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6968 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6969 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 6970 if (!hw_resc->max_hw_ring_grps) 6971 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 6972 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6973 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6974 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6975 6976 if (BNXT_PF(bp)) { 6977 struct bnxt_pf_info *pf = &bp->pf; 6978 6979 pf->fw_fid = le16_to_cpu(resp->fid); 6980 pf->port_id = le16_to_cpu(resp->port_id); 6981 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 6982 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 6983 pf->max_vfs = le16_to_cpu(resp->max_vfs); 6984 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 6985 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 6986 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 6987 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 6988 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 6989 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 6990 bp->flags &= ~BNXT_FLAG_WOL_CAP; 6991 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 6992 bp->flags |= BNXT_FLAG_WOL_CAP; 6993 } else { 6994 #ifdef CONFIG_BNXT_SRIOV 6995 struct bnxt_vf_info *vf = &bp->vf; 6996 6997 vf->fw_fid = le16_to_cpu(resp->fid); 6998 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 6999 #endif 7000 } 7001 7002 hwrm_func_qcaps_exit: 7003 mutex_unlock(&bp->hwrm_cmd_lock); 7004 return rc; 7005 } 7006 7007 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 7008 7009 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 7010 { 7011 int rc; 7012 7013 rc = __bnxt_hwrm_func_qcaps(bp); 7014 if (rc) 7015 return rc; 7016 rc = bnxt_hwrm_queue_qportcfg(bp); 7017 if (rc) { 7018 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 7019 return rc; 7020 } 7021 if (bp->hwrm_spec_code >= 0x10803) { 7022 rc = bnxt_alloc_ctx_mem(bp); 7023 if (rc) 7024 return rc; 7025 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 7026 if (!rc) 7027 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 7028 } 7029 return 0; 7030 } 7031 7032 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 7033 { 7034 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; 7035 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 7036 int rc = 0; 7037 u32 flags; 7038 7039 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 7040 return 0; 7041 7042 resp = bp->hwrm_cmd_resp_addr; 7043 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); 7044 7045 mutex_lock(&bp->hwrm_cmd_lock); 7046 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7047 if (rc) 7048 goto hwrm_cfa_adv_qcaps_exit; 7049 7050 flags = le32_to_cpu(resp->flags); 7051 if (flags & 7052 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 7053 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 7054 7055 hwrm_cfa_adv_qcaps_exit: 7056 mutex_unlock(&bp->hwrm_cmd_lock); 7057 return rc; 7058 } 7059 7060 static int bnxt_map_fw_health_regs(struct bnxt *bp) 7061 { 7062 struct bnxt_fw_health *fw_health = bp->fw_health; 7063 u32 reg_base = 0xffffffff; 7064 int i; 7065 7066 /* Only pre-map the monitoring GRC registers using window 3 */ 7067 for (i = 0; i < 4; i++) { 7068 u32 reg = fw_health->regs[i]; 7069 7070 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 7071 continue; 7072 if (reg_base == 0xffffffff) 7073 reg_base = reg & BNXT_GRC_BASE_MASK; 7074 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 7075 return -ERANGE; 7076 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE + 7077 (reg & BNXT_GRC_OFFSET_MASK); 7078 } 7079 if (reg_base == 0xffffffff) 7080 return 0; 7081 7082 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 7083 BNXT_FW_HEALTH_WIN_MAP_OFF); 7084 return 0; 7085 } 7086 7087 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 7088 { 7089 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7090 struct bnxt_fw_health *fw_health = bp->fw_health; 7091 struct hwrm_error_recovery_qcfg_input req = {0}; 7092 int rc, i; 7093 7094 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7095 return 0; 7096 7097 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); 7098 mutex_lock(&bp->hwrm_cmd_lock); 7099 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7100 if (rc) 7101 goto err_recovery_out; 7102 fw_health->flags = le32_to_cpu(resp->flags); 7103 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 7104 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 7105 rc = -EINVAL; 7106 goto err_recovery_out; 7107 } 7108 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 7109 fw_health->master_func_wait_dsecs = 7110 le32_to_cpu(resp->master_func_wait_period); 7111 fw_health->normal_func_wait_dsecs = 7112 le32_to_cpu(resp->normal_func_wait_period); 7113 fw_health->post_reset_wait_dsecs = 7114 le32_to_cpu(resp->master_func_wait_period_after_reset); 7115 fw_health->post_reset_max_wait_dsecs = 7116 le32_to_cpu(resp->max_bailout_time_after_reset); 7117 fw_health->regs[BNXT_FW_HEALTH_REG] = 7118 le32_to_cpu(resp->fw_health_status_reg); 7119 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 7120 le32_to_cpu(resp->fw_heartbeat_reg); 7121 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 7122 le32_to_cpu(resp->fw_reset_cnt_reg); 7123 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 7124 le32_to_cpu(resp->reset_inprogress_reg); 7125 fw_health->fw_reset_inprog_reg_mask = 7126 le32_to_cpu(resp->reset_inprogress_reg_mask); 7127 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 7128 if (fw_health->fw_reset_seq_cnt >= 16) { 7129 rc = -EINVAL; 7130 goto err_recovery_out; 7131 } 7132 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 7133 fw_health->fw_reset_seq_regs[i] = 7134 le32_to_cpu(resp->reset_reg[i]); 7135 fw_health->fw_reset_seq_vals[i] = 7136 le32_to_cpu(resp->reset_reg_val[i]); 7137 fw_health->fw_reset_seq_delay_msec[i] = 7138 resp->delay_after_reset[i]; 7139 } 7140 err_recovery_out: 7141 mutex_unlock(&bp->hwrm_cmd_lock); 7142 if (!rc) 7143 rc = bnxt_map_fw_health_regs(bp); 7144 if (rc) 7145 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7146 return rc; 7147 } 7148 7149 static int bnxt_hwrm_func_reset(struct bnxt *bp) 7150 { 7151 struct hwrm_func_reset_input req = {0}; 7152 7153 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 7154 req.enables = 0; 7155 7156 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 7157 } 7158 7159 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 7160 { 7161 int rc = 0; 7162 struct hwrm_queue_qportcfg_input req = {0}; 7163 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 7164 u8 i, j, *qptr; 7165 bool no_rdma; 7166 7167 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 7168 7169 mutex_lock(&bp->hwrm_cmd_lock); 7170 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7171 if (rc) 7172 goto qportcfg_exit; 7173 7174 if (!resp->max_configurable_queues) { 7175 rc = -EINVAL; 7176 goto qportcfg_exit; 7177 } 7178 bp->max_tc = resp->max_configurable_queues; 7179 bp->max_lltc = resp->max_configurable_lossless_queues; 7180 if (bp->max_tc > BNXT_MAX_QUEUE) 7181 bp->max_tc = BNXT_MAX_QUEUE; 7182 7183 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 7184 qptr = &resp->queue_id0; 7185 for (i = 0, j = 0; i < bp->max_tc; i++) { 7186 bp->q_info[j].queue_id = *qptr; 7187 bp->q_ids[i] = *qptr++; 7188 bp->q_info[j].queue_profile = *qptr++; 7189 bp->tc_to_qidx[j] = j; 7190 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 7191 (no_rdma && BNXT_PF(bp))) 7192 j++; 7193 } 7194 bp->max_q = bp->max_tc; 7195 bp->max_tc = max_t(u8, j, 1); 7196 7197 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 7198 bp->max_tc = 1; 7199 7200 if (bp->max_lltc > bp->max_tc) 7201 bp->max_lltc = bp->max_tc; 7202 7203 qportcfg_exit: 7204 mutex_unlock(&bp->hwrm_cmd_lock); 7205 return rc; 7206 } 7207 7208 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) 7209 { 7210 struct hwrm_ver_get_input req = {0}; 7211 int rc; 7212 7213 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 7214 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 7215 req.hwrm_intf_min = HWRM_VERSION_MINOR; 7216 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 7217 7218 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, 7219 silent); 7220 return rc; 7221 } 7222 7223 static int bnxt_hwrm_ver_get(struct bnxt *bp) 7224 { 7225 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 7226 u32 dev_caps_cfg; 7227 int rc; 7228 7229 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 7230 mutex_lock(&bp->hwrm_cmd_lock); 7231 rc = __bnxt_hwrm_ver_get(bp, false); 7232 if (rc) 7233 goto hwrm_ver_get_exit; 7234 7235 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 7236 7237 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 7238 resp->hwrm_intf_min_8b << 8 | 7239 resp->hwrm_intf_upd_8b; 7240 if (resp->hwrm_intf_maj_8b < 1) { 7241 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 7242 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7243 resp->hwrm_intf_upd_8b); 7244 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 7245 } 7246 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", 7247 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, 7248 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); 7249 7250 if (strlen(resp->active_pkg_name)) { 7251 int fw_ver_len = strlen(bp->fw_ver_str); 7252 7253 snprintf(bp->fw_ver_str + fw_ver_len, 7254 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 7255 resp->active_pkg_name); 7256 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 7257 } 7258 7259 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 7260 if (!bp->hwrm_cmd_timeout) 7261 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 7262 7263 if (resp->hwrm_intf_maj_8b >= 1) { 7264 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 7265 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 7266 } 7267 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 7268 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 7269 7270 bp->chip_num = le16_to_cpu(resp->chip_num); 7271 bp->chip_rev = resp->chip_rev; 7272 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 7273 !resp->chip_metal) 7274 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 7275 7276 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 7277 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 7278 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 7279 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 7280 7281 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 7282 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 7283 7284 if (dev_caps_cfg & 7285 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 7286 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 7287 7288 if (dev_caps_cfg & 7289 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 7290 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 7291 7292 if (dev_caps_cfg & 7293 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 7294 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 7295 7296 hwrm_ver_get_exit: 7297 mutex_unlock(&bp->hwrm_cmd_lock); 7298 return rc; 7299 } 7300 7301 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 7302 { 7303 struct hwrm_fw_set_time_input req = {0}; 7304 struct tm tm; 7305 time64_t now = ktime_get_real_seconds(); 7306 7307 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 7308 bp->hwrm_spec_code < 0x10400) 7309 return -EOPNOTSUPP; 7310 7311 time64_to_tm(now, 0, &tm); 7312 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 7313 req.year = cpu_to_le16(1900 + tm.tm_year); 7314 req.month = 1 + tm.tm_mon; 7315 req.day = tm.tm_mday; 7316 req.hour = tm.tm_hour; 7317 req.minute = tm.tm_min; 7318 req.second = tm.tm_sec; 7319 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7320 } 7321 7322 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 7323 { 7324 struct bnxt_pf_info *pf = &bp->pf; 7325 struct hwrm_port_qstats_input req = {0}; 7326 7327 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 7328 return 0; 7329 7330 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 7331 req.port_id = cpu_to_le16(pf->port_id); 7332 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 7333 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 7334 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7335 } 7336 7337 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) 7338 { 7339 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 7340 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; 7341 struct hwrm_port_qstats_ext_input req = {0}; 7342 struct bnxt_pf_info *pf = &bp->pf; 7343 u32 tx_stat_size; 7344 int rc; 7345 7346 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 7347 return 0; 7348 7349 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 7350 req.port_id = cpu_to_le16(pf->port_id); 7351 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 7352 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); 7353 tx_stat_size = bp->hw_tx_port_stats_ext ? 7354 sizeof(*bp->hw_tx_port_stats_ext) : 0; 7355 req.tx_stat_size = cpu_to_le16(tx_stat_size); 7356 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); 7357 mutex_lock(&bp->hwrm_cmd_lock); 7358 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7359 if (!rc) { 7360 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 7361 bp->fw_tx_stats_ext_size = tx_stat_size ? 7362 le16_to_cpu(resp->tx_stat_size) / 8 : 0; 7363 } else { 7364 bp->fw_rx_stats_ext_size = 0; 7365 bp->fw_tx_stats_ext_size = 0; 7366 } 7367 if (bp->fw_tx_stats_ext_size <= 7368 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 7369 mutex_unlock(&bp->hwrm_cmd_lock); 7370 bp->pri2cos_valid = 0; 7371 return rc; 7372 } 7373 7374 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 7375 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 7376 7377 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); 7378 if (!rc) { 7379 struct hwrm_queue_pri2cos_qcfg_output *resp2; 7380 u8 *pri2cos; 7381 int i, j; 7382 7383 resp2 = bp->hwrm_cmd_resp_addr; 7384 pri2cos = &resp2->pri0_cos_queue_id; 7385 for (i = 0; i < 8; i++) { 7386 u8 queue_id = pri2cos[i]; 7387 u8 queue_idx; 7388 7389 /* Per port queue IDs start from 0, 10, 20, etc */ 7390 queue_idx = queue_id % 10; 7391 if (queue_idx > BNXT_MAX_QUEUE) { 7392 bp->pri2cos_valid = false; 7393 goto qstats_done; 7394 } 7395 for (j = 0; j < bp->max_q; j++) { 7396 if (bp->q_ids[j] == queue_id) 7397 bp->pri2cos_idx[i] = queue_idx; 7398 } 7399 } 7400 bp->pri2cos_valid = 1; 7401 } 7402 qstats_done: 7403 mutex_unlock(&bp->hwrm_cmd_lock); 7404 return rc; 7405 } 7406 7407 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) 7408 { 7409 struct hwrm_pcie_qstats_input req = {0}; 7410 7411 if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) 7412 return 0; 7413 7414 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 7415 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); 7416 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); 7417 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7418 } 7419 7420 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 7421 { 7422 if (bp->vxlan_port_cnt) { 7423 bnxt_hwrm_tunnel_dst_port_free( 7424 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7425 } 7426 bp->vxlan_port_cnt = 0; 7427 if (bp->nge_port_cnt) { 7428 bnxt_hwrm_tunnel_dst_port_free( 7429 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7430 } 7431 bp->nge_port_cnt = 0; 7432 } 7433 7434 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 7435 { 7436 int rc, i; 7437 u32 tpa_flags = 0; 7438 7439 if (set_tpa) 7440 tpa_flags = bp->flags & BNXT_FLAG_TPA; 7441 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 7442 return 0; 7443 for (i = 0; i < bp->nr_vnics; i++) { 7444 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 7445 if (rc) { 7446 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 7447 i, rc); 7448 return rc; 7449 } 7450 } 7451 return 0; 7452 } 7453 7454 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 7455 { 7456 int i; 7457 7458 for (i = 0; i < bp->nr_vnics; i++) 7459 bnxt_hwrm_vnic_set_rss(bp, i, false); 7460 } 7461 7462 static void bnxt_clear_vnic(struct bnxt *bp) 7463 { 7464 if (!bp->vnic_info) 7465 return; 7466 7467 bnxt_hwrm_clear_vnic_filter(bp); 7468 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 7469 /* clear all RSS setting before free vnic ctx */ 7470 bnxt_hwrm_clear_vnic_rss(bp); 7471 bnxt_hwrm_vnic_ctx_free(bp); 7472 } 7473 /* before free the vnic, undo the vnic tpa settings */ 7474 if (bp->flags & BNXT_FLAG_TPA) 7475 bnxt_set_tpa(bp, false); 7476 bnxt_hwrm_vnic_free(bp); 7477 if (bp->flags & BNXT_FLAG_CHIP_P5) 7478 bnxt_hwrm_vnic_ctx_free(bp); 7479 } 7480 7481 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7482 bool irq_re_init) 7483 { 7484 bnxt_clear_vnic(bp); 7485 bnxt_hwrm_ring_free(bp, close_path); 7486 bnxt_hwrm_ring_grp_free(bp); 7487 if (irq_re_init) { 7488 bnxt_hwrm_stat_ctx_free(bp); 7489 bnxt_hwrm_free_tunnel_ports(bp); 7490 } 7491 } 7492 7493 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 7494 { 7495 struct hwrm_func_cfg_input req = {0}; 7496 7497 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7498 req.fid = cpu_to_le16(0xffff); 7499 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 7500 if (br_mode == BRIDGE_MODE_VEB) 7501 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 7502 else if (br_mode == BRIDGE_MODE_VEPA) 7503 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 7504 else 7505 return -EINVAL; 7506 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7507 } 7508 7509 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 7510 { 7511 struct hwrm_func_cfg_input req = {0}; 7512 7513 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 7514 return 0; 7515 7516 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7517 req.fid = cpu_to_le16(0xffff); 7518 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 7519 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 7520 if (size == 128) 7521 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 7522 7523 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7524 } 7525 7526 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7527 { 7528 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 7529 int rc; 7530 7531 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 7532 goto skip_rss_ctx; 7533 7534 /* allocate context for vnic */ 7535 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 7536 if (rc) { 7537 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7538 vnic_id, rc); 7539 goto vnic_setup_err; 7540 } 7541 bp->rsscos_nr_ctxs++; 7542 7543 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7544 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 7545 if (rc) { 7546 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 7547 vnic_id, rc); 7548 goto vnic_setup_err; 7549 } 7550 bp->rsscos_nr_ctxs++; 7551 } 7552 7553 skip_rss_ctx: 7554 /* configure default vnic, ring grp */ 7555 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7556 if (rc) { 7557 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7558 vnic_id, rc); 7559 goto vnic_setup_err; 7560 } 7561 7562 /* Enable RSS hashing on vnic */ 7563 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 7564 if (rc) { 7565 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 7566 vnic_id, rc); 7567 goto vnic_setup_err; 7568 } 7569 7570 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7571 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7572 if (rc) { 7573 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7574 vnic_id, rc); 7575 } 7576 } 7577 7578 vnic_setup_err: 7579 return rc; 7580 } 7581 7582 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 7583 { 7584 int rc, i, nr_ctxs; 7585 7586 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 7587 for (i = 0; i < nr_ctxs; i++) { 7588 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 7589 if (rc) { 7590 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 7591 vnic_id, i, rc); 7592 break; 7593 } 7594 bp->rsscos_nr_ctxs++; 7595 } 7596 if (i < nr_ctxs) 7597 return -ENOMEM; 7598 7599 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 7600 if (rc) { 7601 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 7602 vnic_id, rc); 7603 return rc; 7604 } 7605 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7606 if (rc) { 7607 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7608 vnic_id, rc); 7609 return rc; 7610 } 7611 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7612 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7613 if (rc) { 7614 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7615 vnic_id, rc); 7616 } 7617 } 7618 return rc; 7619 } 7620 7621 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7622 { 7623 if (bp->flags & BNXT_FLAG_CHIP_P5) 7624 return __bnxt_setup_vnic_p5(bp, vnic_id); 7625 else 7626 return __bnxt_setup_vnic(bp, vnic_id); 7627 } 7628 7629 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 7630 { 7631 #ifdef CONFIG_RFS_ACCEL 7632 int i, rc = 0; 7633 7634 if (bp->flags & BNXT_FLAG_CHIP_P5) 7635 return 0; 7636 7637 for (i = 0; i < bp->rx_nr_rings; i++) { 7638 struct bnxt_vnic_info *vnic; 7639 u16 vnic_id = i + 1; 7640 u16 ring_id = i; 7641 7642 if (vnic_id >= bp->nr_vnics) 7643 break; 7644 7645 vnic = &bp->vnic_info[vnic_id]; 7646 vnic->flags |= BNXT_VNIC_RFS_FLAG; 7647 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 7648 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 7649 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 7650 if (rc) { 7651 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7652 vnic_id, rc); 7653 break; 7654 } 7655 rc = bnxt_setup_vnic(bp, vnic_id); 7656 if (rc) 7657 break; 7658 } 7659 return rc; 7660 #else 7661 return 0; 7662 #endif 7663 } 7664 7665 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 7666 static bool bnxt_promisc_ok(struct bnxt *bp) 7667 { 7668 #ifdef CONFIG_BNXT_SRIOV 7669 if (BNXT_VF(bp) && !bp->vf.vlan) 7670 return false; 7671 #endif 7672 return true; 7673 } 7674 7675 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 7676 { 7677 unsigned int rc = 0; 7678 7679 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 7680 if (rc) { 7681 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7682 rc); 7683 return rc; 7684 } 7685 7686 rc = bnxt_hwrm_vnic_cfg(bp, 1); 7687 if (rc) { 7688 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7689 rc); 7690 return rc; 7691 } 7692 return rc; 7693 } 7694 7695 static int bnxt_cfg_rx_mode(struct bnxt *); 7696 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 7697 7698 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 7699 { 7700 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 7701 int rc = 0; 7702 unsigned int rx_nr_rings = bp->rx_nr_rings; 7703 7704 if (irq_re_init) { 7705 rc = bnxt_hwrm_stat_ctx_alloc(bp); 7706 if (rc) { 7707 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 7708 rc); 7709 goto err_out; 7710 } 7711 } 7712 7713 rc = bnxt_hwrm_ring_alloc(bp); 7714 if (rc) { 7715 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 7716 goto err_out; 7717 } 7718 7719 rc = bnxt_hwrm_ring_grp_alloc(bp); 7720 if (rc) { 7721 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 7722 goto err_out; 7723 } 7724 7725 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7726 rx_nr_rings--; 7727 7728 /* default vnic 0 */ 7729 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 7730 if (rc) { 7731 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 7732 goto err_out; 7733 } 7734 7735 rc = bnxt_setup_vnic(bp, 0); 7736 if (rc) 7737 goto err_out; 7738 7739 if (bp->flags & BNXT_FLAG_RFS) { 7740 rc = bnxt_alloc_rfs_vnics(bp); 7741 if (rc) 7742 goto err_out; 7743 } 7744 7745 if (bp->flags & BNXT_FLAG_TPA) { 7746 rc = bnxt_set_tpa(bp, true); 7747 if (rc) 7748 goto err_out; 7749 } 7750 7751 if (BNXT_VF(bp)) 7752 bnxt_update_vf_mac(bp); 7753 7754 /* Filter for default vnic 0 */ 7755 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 7756 if (rc) { 7757 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 7758 goto err_out; 7759 } 7760 vnic->uc_filter_count = 1; 7761 7762 vnic->rx_mask = 0; 7763 if (bp->dev->flags & IFF_BROADCAST) 7764 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 7765 7766 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 7767 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 7768 7769 if (bp->dev->flags & IFF_ALLMULTI) { 7770 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 7771 vnic->mc_list_count = 0; 7772 } else { 7773 u32 mask = 0; 7774 7775 bnxt_mc_list_updated(bp, &mask); 7776 vnic->rx_mask |= mask; 7777 } 7778 7779 rc = bnxt_cfg_rx_mode(bp); 7780 if (rc) 7781 goto err_out; 7782 7783 rc = bnxt_hwrm_set_coal(bp); 7784 if (rc) 7785 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 7786 rc); 7787 7788 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7789 rc = bnxt_setup_nitroa0_vnic(bp); 7790 if (rc) 7791 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 7792 rc); 7793 } 7794 7795 if (BNXT_VF(bp)) { 7796 bnxt_hwrm_func_qcfg(bp); 7797 netdev_update_features(bp->dev); 7798 } 7799 7800 return 0; 7801 7802 err_out: 7803 bnxt_hwrm_resource_free(bp, 0, true); 7804 7805 return rc; 7806 } 7807 7808 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 7809 { 7810 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 7811 return 0; 7812 } 7813 7814 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 7815 { 7816 bnxt_init_cp_rings(bp); 7817 bnxt_init_rx_rings(bp); 7818 bnxt_init_tx_rings(bp); 7819 bnxt_init_ring_grps(bp, irq_re_init); 7820 bnxt_init_vnics(bp); 7821 7822 return bnxt_init_chip(bp, irq_re_init); 7823 } 7824 7825 static int bnxt_set_real_num_queues(struct bnxt *bp) 7826 { 7827 int rc; 7828 struct net_device *dev = bp->dev; 7829 7830 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 7831 bp->tx_nr_rings_xdp); 7832 if (rc) 7833 return rc; 7834 7835 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 7836 if (rc) 7837 return rc; 7838 7839 #ifdef CONFIG_RFS_ACCEL 7840 if (bp->flags & BNXT_FLAG_RFS) 7841 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 7842 #endif 7843 7844 return rc; 7845 } 7846 7847 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7848 bool shared) 7849 { 7850 int _rx = *rx, _tx = *tx; 7851 7852 if (shared) { 7853 *rx = min_t(int, _rx, max); 7854 *tx = min_t(int, _tx, max); 7855 } else { 7856 if (max < 2) 7857 return -ENOMEM; 7858 7859 while (_rx + _tx > max) { 7860 if (_rx > _tx && _rx > 1) 7861 _rx--; 7862 else if (_tx > 1) 7863 _tx--; 7864 } 7865 *rx = _rx; 7866 *tx = _tx; 7867 } 7868 return 0; 7869 } 7870 7871 static void bnxt_setup_msix(struct bnxt *bp) 7872 { 7873 const int len = sizeof(bp->irq_tbl[0].name); 7874 struct net_device *dev = bp->dev; 7875 int tcs, i; 7876 7877 tcs = netdev_get_num_tc(dev); 7878 if (tcs) { 7879 int i, off, count; 7880 7881 for (i = 0; i < tcs; i++) { 7882 count = bp->tx_nr_rings_per_tc; 7883 off = i * count; 7884 netdev_set_tc_queue(dev, i, count, off); 7885 } 7886 } 7887 7888 for (i = 0; i < bp->cp_nr_rings; i++) { 7889 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 7890 char *attr; 7891 7892 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7893 attr = "TxRx"; 7894 else if (i < bp->rx_nr_rings) 7895 attr = "rx"; 7896 else 7897 attr = "tx"; 7898 7899 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 7900 attr, i); 7901 bp->irq_tbl[map_idx].handler = bnxt_msix; 7902 } 7903 } 7904 7905 static void bnxt_setup_inta(struct bnxt *bp) 7906 { 7907 const int len = sizeof(bp->irq_tbl[0].name); 7908 7909 if (netdev_get_num_tc(bp->dev)) 7910 netdev_reset_tc(bp->dev); 7911 7912 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 7913 0); 7914 bp->irq_tbl[0].handler = bnxt_inta; 7915 } 7916 7917 static int bnxt_setup_int_mode(struct bnxt *bp) 7918 { 7919 int rc; 7920 7921 if (bp->flags & BNXT_FLAG_USING_MSIX) 7922 bnxt_setup_msix(bp); 7923 else 7924 bnxt_setup_inta(bp); 7925 7926 rc = bnxt_set_real_num_queues(bp); 7927 return rc; 7928 } 7929 7930 #ifdef CONFIG_RFS_ACCEL 7931 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 7932 { 7933 return bp->hw_resc.max_rsscos_ctxs; 7934 } 7935 7936 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 7937 { 7938 return bp->hw_resc.max_vnics; 7939 } 7940 #endif 7941 7942 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 7943 { 7944 return bp->hw_resc.max_stat_ctxs; 7945 } 7946 7947 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 7948 { 7949 return bp->hw_resc.max_cp_rings; 7950 } 7951 7952 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 7953 { 7954 unsigned int cp = bp->hw_resc.max_cp_rings; 7955 7956 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 7957 cp -= bnxt_get_ulp_msix_num(bp); 7958 7959 return cp; 7960 } 7961 7962 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 7963 { 7964 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7965 7966 if (bp->flags & BNXT_FLAG_CHIP_P5) 7967 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 7968 7969 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 7970 } 7971 7972 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 7973 { 7974 bp->hw_resc.max_irqs = max_irqs; 7975 } 7976 7977 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 7978 { 7979 unsigned int cp; 7980 7981 cp = bnxt_get_max_func_cp_rings_for_en(bp); 7982 if (bp->flags & BNXT_FLAG_CHIP_P5) 7983 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 7984 else 7985 return cp - bp->cp_nr_rings; 7986 } 7987 7988 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 7989 { 7990 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 7991 } 7992 7993 int bnxt_get_avail_msix(struct bnxt *bp, int num) 7994 { 7995 int max_cp = bnxt_get_max_func_cp_rings(bp); 7996 int max_irq = bnxt_get_max_func_irqs(bp); 7997 int total_req = bp->cp_nr_rings + num; 7998 int max_idx, avail_msix; 7999 8000 max_idx = bp->total_irqs; 8001 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8002 max_idx = min_t(int, bp->total_irqs, max_cp); 8003 avail_msix = max_idx - bp->cp_nr_rings; 8004 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 8005 return avail_msix; 8006 8007 if (max_irq < total_req) { 8008 num = max_irq - bp->cp_nr_rings; 8009 if (num <= 0) 8010 return 0; 8011 } 8012 return num; 8013 } 8014 8015 static int bnxt_get_num_msix(struct bnxt *bp) 8016 { 8017 if (!BNXT_NEW_RM(bp)) 8018 return bnxt_get_max_func_irqs(bp); 8019 8020 return bnxt_nq_rings_in_use(bp); 8021 } 8022 8023 static int bnxt_init_msix(struct bnxt *bp) 8024 { 8025 int i, total_vecs, max, rc = 0, min = 1, ulp_msix; 8026 struct msix_entry *msix_ent; 8027 8028 total_vecs = bnxt_get_num_msix(bp); 8029 max = bnxt_get_max_func_irqs(bp); 8030 if (total_vecs > max) 8031 total_vecs = max; 8032 8033 if (!total_vecs) 8034 return 0; 8035 8036 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 8037 if (!msix_ent) 8038 return -ENOMEM; 8039 8040 for (i = 0; i < total_vecs; i++) { 8041 msix_ent[i].entry = i; 8042 msix_ent[i].vector = 0; 8043 } 8044 8045 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 8046 min = 2; 8047 8048 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 8049 ulp_msix = bnxt_get_ulp_msix_num(bp); 8050 if (total_vecs < 0 || total_vecs < ulp_msix) { 8051 rc = -ENODEV; 8052 goto msix_setup_exit; 8053 } 8054 8055 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 8056 if (bp->irq_tbl) { 8057 for (i = 0; i < total_vecs; i++) 8058 bp->irq_tbl[i].vector = msix_ent[i].vector; 8059 8060 bp->total_irqs = total_vecs; 8061 /* Trim rings based upon num of vectors allocated */ 8062 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 8063 total_vecs - ulp_msix, min == 1); 8064 if (rc) 8065 goto msix_setup_exit; 8066 8067 bp->cp_nr_rings = (min == 1) ? 8068 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 8069 bp->tx_nr_rings + bp->rx_nr_rings; 8070 8071 } else { 8072 rc = -ENOMEM; 8073 goto msix_setup_exit; 8074 } 8075 bp->flags |= BNXT_FLAG_USING_MSIX; 8076 kfree(msix_ent); 8077 return 0; 8078 8079 msix_setup_exit: 8080 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 8081 kfree(bp->irq_tbl); 8082 bp->irq_tbl = NULL; 8083 pci_disable_msix(bp->pdev); 8084 kfree(msix_ent); 8085 return rc; 8086 } 8087 8088 static int bnxt_init_inta(struct bnxt *bp) 8089 { 8090 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 8091 if (!bp->irq_tbl) 8092 return -ENOMEM; 8093 8094 bp->total_irqs = 1; 8095 bp->rx_nr_rings = 1; 8096 bp->tx_nr_rings = 1; 8097 bp->cp_nr_rings = 1; 8098 bp->flags |= BNXT_FLAG_SHARED_RINGS; 8099 bp->irq_tbl[0].vector = bp->pdev->irq; 8100 return 0; 8101 } 8102 8103 static int bnxt_init_int_mode(struct bnxt *bp) 8104 { 8105 int rc = 0; 8106 8107 if (bp->flags & BNXT_FLAG_MSIX_CAP) 8108 rc = bnxt_init_msix(bp); 8109 8110 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 8111 /* fallback to INTA */ 8112 rc = bnxt_init_inta(bp); 8113 } 8114 return rc; 8115 } 8116 8117 static void bnxt_clear_int_mode(struct bnxt *bp) 8118 { 8119 if (bp->flags & BNXT_FLAG_USING_MSIX) 8120 pci_disable_msix(bp->pdev); 8121 8122 kfree(bp->irq_tbl); 8123 bp->irq_tbl = NULL; 8124 bp->flags &= ~BNXT_FLAG_USING_MSIX; 8125 } 8126 8127 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 8128 { 8129 int tcs = netdev_get_num_tc(bp->dev); 8130 bool irq_cleared = false; 8131 int rc; 8132 8133 if (!bnxt_need_reserve_rings(bp)) 8134 return 0; 8135 8136 if (irq_re_init && BNXT_NEW_RM(bp) && 8137 bnxt_get_num_msix(bp) != bp->total_irqs) { 8138 bnxt_ulp_irq_stop(bp); 8139 bnxt_clear_int_mode(bp); 8140 irq_cleared = true; 8141 } 8142 rc = __bnxt_reserve_rings(bp); 8143 if (irq_cleared) { 8144 if (!rc) 8145 rc = bnxt_init_int_mode(bp); 8146 bnxt_ulp_irq_restart(bp, rc); 8147 } 8148 if (rc) { 8149 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 8150 return rc; 8151 } 8152 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { 8153 netdev_err(bp->dev, "tx ring reservation failure\n"); 8154 netdev_reset_tc(bp->dev); 8155 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8156 return -ENOMEM; 8157 } 8158 return 0; 8159 } 8160 8161 static void bnxt_free_irq(struct bnxt *bp) 8162 { 8163 struct bnxt_irq *irq; 8164 int i; 8165 8166 #ifdef CONFIG_RFS_ACCEL 8167 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 8168 bp->dev->rx_cpu_rmap = NULL; 8169 #endif 8170 if (!bp->irq_tbl || !bp->bnapi) 8171 return; 8172 8173 for (i = 0; i < bp->cp_nr_rings; i++) { 8174 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8175 8176 irq = &bp->irq_tbl[map_idx]; 8177 if (irq->requested) { 8178 if (irq->have_cpumask) { 8179 irq_set_affinity_hint(irq->vector, NULL); 8180 free_cpumask_var(irq->cpu_mask); 8181 irq->have_cpumask = 0; 8182 } 8183 free_irq(irq->vector, bp->bnapi[i]); 8184 } 8185 8186 irq->requested = 0; 8187 } 8188 } 8189 8190 static int bnxt_request_irq(struct bnxt *bp) 8191 { 8192 int i, j, rc = 0; 8193 unsigned long flags = 0; 8194 #ifdef CONFIG_RFS_ACCEL 8195 struct cpu_rmap *rmap; 8196 #endif 8197 8198 rc = bnxt_setup_int_mode(bp); 8199 if (rc) { 8200 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 8201 rc); 8202 return rc; 8203 } 8204 #ifdef CONFIG_RFS_ACCEL 8205 rmap = bp->dev->rx_cpu_rmap; 8206 #endif 8207 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 8208 flags = IRQF_SHARED; 8209 8210 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 8211 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8212 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 8213 8214 #ifdef CONFIG_RFS_ACCEL 8215 if (rmap && bp->bnapi[i]->rx_ring) { 8216 rc = irq_cpu_rmap_add(rmap, irq->vector); 8217 if (rc) 8218 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 8219 j); 8220 j++; 8221 } 8222 #endif 8223 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 8224 bp->bnapi[i]); 8225 if (rc) 8226 break; 8227 8228 irq->requested = 1; 8229 8230 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 8231 int numa_node = dev_to_node(&bp->pdev->dev); 8232 8233 irq->have_cpumask = 1; 8234 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 8235 irq->cpu_mask); 8236 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 8237 if (rc) { 8238 netdev_warn(bp->dev, 8239 "Set affinity failed, IRQ = %d\n", 8240 irq->vector); 8241 break; 8242 } 8243 } 8244 } 8245 return rc; 8246 } 8247 8248 static void bnxt_del_napi(struct bnxt *bp) 8249 { 8250 int i; 8251 8252 if (!bp->bnapi) 8253 return; 8254 8255 for (i = 0; i < bp->cp_nr_rings; i++) { 8256 struct bnxt_napi *bnapi = bp->bnapi[i]; 8257 8258 napi_hash_del(&bnapi->napi); 8259 netif_napi_del(&bnapi->napi); 8260 } 8261 /* We called napi_hash_del() before netif_napi_del(), we need 8262 * to respect an RCU grace period before freeing napi structures. 8263 */ 8264 synchronize_net(); 8265 } 8266 8267 static void bnxt_init_napi(struct bnxt *bp) 8268 { 8269 int i; 8270 unsigned int cp_nr_rings = bp->cp_nr_rings; 8271 struct bnxt_napi *bnapi; 8272 8273 if (bp->flags & BNXT_FLAG_USING_MSIX) { 8274 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 8275 8276 if (bp->flags & BNXT_FLAG_CHIP_P5) 8277 poll_fn = bnxt_poll_p5; 8278 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8279 cp_nr_rings--; 8280 for (i = 0; i < cp_nr_rings; i++) { 8281 bnapi = bp->bnapi[i]; 8282 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); 8283 } 8284 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8285 bnapi = bp->bnapi[cp_nr_rings]; 8286 netif_napi_add(bp->dev, &bnapi->napi, 8287 bnxt_poll_nitroa0, 64); 8288 } 8289 } else { 8290 bnapi = bp->bnapi[0]; 8291 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 8292 } 8293 } 8294 8295 static void bnxt_disable_napi(struct bnxt *bp) 8296 { 8297 int i; 8298 8299 if (!bp->bnapi) 8300 return; 8301 8302 for (i = 0; i < bp->cp_nr_rings; i++) { 8303 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8304 8305 if (bp->bnapi[i]->rx_ring) 8306 cancel_work_sync(&cpr->dim.work); 8307 8308 napi_disable(&bp->bnapi[i]->napi); 8309 } 8310 } 8311 8312 static void bnxt_enable_napi(struct bnxt *bp) 8313 { 8314 int i; 8315 8316 for (i = 0; i < bp->cp_nr_rings; i++) { 8317 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8318 bp->bnapi[i]->in_reset = false; 8319 8320 if (bp->bnapi[i]->rx_ring) { 8321 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 8322 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 8323 } 8324 napi_enable(&bp->bnapi[i]->napi); 8325 } 8326 } 8327 8328 void bnxt_tx_disable(struct bnxt *bp) 8329 { 8330 int i; 8331 struct bnxt_tx_ring_info *txr; 8332 8333 if (bp->tx_ring) { 8334 for (i = 0; i < bp->tx_nr_rings; i++) { 8335 txr = &bp->tx_ring[i]; 8336 txr->dev_state = BNXT_DEV_STATE_CLOSING; 8337 } 8338 } 8339 /* Stop all TX queues */ 8340 netif_tx_disable(bp->dev); 8341 netif_carrier_off(bp->dev); 8342 } 8343 8344 void bnxt_tx_enable(struct bnxt *bp) 8345 { 8346 int i; 8347 struct bnxt_tx_ring_info *txr; 8348 8349 for (i = 0; i < bp->tx_nr_rings; i++) { 8350 txr = &bp->tx_ring[i]; 8351 txr->dev_state = 0; 8352 } 8353 netif_tx_wake_all_queues(bp->dev); 8354 if (bp->link_info.link_up) 8355 netif_carrier_on(bp->dev); 8356 } 8357 8358 static void bnxt_report_link(struct bnxt *bp) 8359 { 8360 if (bp->link_info.link_up) { 8361 const char *duplex; 8362 const char *flow_ctrl; 8363 u32 speed; 8364 u16 fec; 8365 8366 netif_carrier_on(bp->dev); 8367 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 8368 duplex = "full"; 8369 else 8370 duplex = "half"; 8371 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 8372 flow_ctrl = "ON - receive & transmit"; 8373 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 8374 flow_ctrl = "ON - transmit"; 8375 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 8376 flow_ctrl = "ON - receive"; 8377 else 8378 flow_ctrl = "none"; 8379 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 8380 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 8381 speed, duplex, flow_ctrl); 8382 if (bp->flags & BNXT_FLAG_EEE_CAP) 8383 netdev_info(bp->dev, "EEE is %s\n", 8384 bp->eee.eee_active ? "active" : 8385 "not active"); 8386 fec = bp->link_info.fec_cfg; 8387 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 8388 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 8389 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 8390 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 8391 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 8392 } else { 8393 netif_carrier_off(bp->dev); 8394 netdev_err(bp->dev, "NIC Link is Down\n"); 8395 } 8396 } 8397 8398 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 8399 { 8400 int rc = 0; 8401 struct hwrm_port_phy_qcaps_input req = {0}; 8402 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8403 struct bnxt_link_info *link_info = &bp->link_info; 8404 8405 bp->flags &= ~BNXT_FLAG_EEE_CAP; 8406 if (bp->test_info) 8407 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | 8408 BNXT_TEST_FL_AN_PHY_LPBK); 8409 if (bp->hwrm_spec_code < 0x10201) 8410 return 0; 8411 8412 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 8413 8414 mutex_lock(&bp->hwrm_cmd_lock); 8415 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8416 if (rc) 8417 goto hwrm_phy_qcaps_exit; 8418 8419 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 8420 struct ethtool_eee *eee = &bp->eee; 8421 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 8422 8423 bp->flags |= BNXT_FLAG_EEE_CAP; 8424 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8425 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 8426 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 8427 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 8428 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 8429 } 8430 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { 8431 if (bp->test_info) 8432 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; 8433 } 8434 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { 8435 if (bp->test_info) 8436 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; 8437 } 8438 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { 8439 if (BNXT_PF(bp)) 8440 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; 8441 } 8442 if (resp->supported_speeds_auto_mode) 8443 link_info->support_auto_speeds = 8444 le16_to_cpu(resp->supported_speeds_auto_mode); 8445 8446 bp->port_count = resp->port_cnt; 8447 8448 hwrm_phy_qcaps_exit: 8449 mutex_unlock(&bp->hwrm_cmd_lock); 8450 return rc; 8451 } 8452 8453 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 8454 { 8455 int rc = 0; 8456 struct bnxt_link_info *link_info = &bp->link_info; 8457 struct hwrm_port_phy_qcfg_input req = {0}; 8458 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8459 u8 link_up = link_info->link_up; 8460 u16 diff; 8461 8462 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 8463 8464 mutex_lock(&bp->hwrm_cmd_lock); 8465 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8466 if (rc) { 8467 mutex_unlock(&bp->hwrm_cmd_lock); 8468 return rc; 8469 } 8470 8471 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 8472 link_info->phy_link_status = resp->link; 8473 link_info->duplex = resp->duplex_cfg; 8474 if (bp->hwrm_spec_code >= 0x10800) 8475 link_info->duplex = resp->duplex_state; 8476 link_info->pause = resp->pause; 8477 link_info->auto_mode = resp->auto_mode; 8478 link_info->auto_pause_setting = resp->auto_pause; 8479 link_info->lp_pause = resp->link_partner_adv_pause; 8480 link_info->force_pause_setting = resp->force_pause; 8481 link_info->duplex_setting = resp->duplex_cfg; 8482 if (link_info->phy_link_status == BNXT_LINK_LINK) 8483 link_info->link_speed = le16_to_cpu(resp->link_speed); 8484 else 8485 link_info->link_speed = 0; 8486 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 8487 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 8488 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 8489 link_info->lp_auto_link_speeds = 8490 le16_to_cpu(resp->link_partner_adv_speeds); 8491 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 8492 link_info->phy_ver[0] = resp->phy_maj; 8493 link_info->phy_ver[1] = resp->phy_min; 8494 link_info->phy_ver[2] = resp->phy_bld; 8495 link_info->media_type = resp->media_type; 8496 link_info->phy_type = resp->phy_type; 8497 link_info->transceiver = resp->xcvr_pkg_type; 8498 link_info->phy_addr = resp->eee_config_phy_addr & 8499 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 8500 link_info->module_status = resp->module_status; 8501 8502 if (bp->flags & BNXT_FLAG_EEE_CAP) { 8503 struct ethtool_eee *eee = &bp->eee; 8504 u16 fw_speeds; 8505 8506 eee->eee_active = 0; 8507 if (resp->eee_config_phy_addr & 8508 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 8509 eee->eee_active = 1; 8510 fw_speeds = le16_to_cpu( 8511 resp->link_partner_adv_eee_link_speed_mask); 8512 eee->lp_advertised = 8513 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8514 } 8515 8516 /* Pull initial EEE config */ 8517 if (!chng_link_state) { 8518 if (resp->eee_config_phy_addr & 8519 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 8520 eee->eee_enabled = 1; 8521 8522 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 8523 eee->advertised = 8524 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8525 8526 if (resp->eee_config_phy_addr & 8527 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 8528 __le32 tmr; 8529 8530 eee->tx_lpi_enabled = 1; 8531 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 8532 eee->tx_lpi_timer = le32_to_cpu(tmr) & 8533 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 8534 } 8535 } 8536 } 8537 8538 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 8539 if (bp->hwrm_spec_code >= 0x10504) 8540 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 8541 8542 /* TODO: need to add more logic to report VF link */ 8543 if (chng_link_state) { 8544 if (link_info->phy_link_status == BNXT_LINK_LINK) 8545 link_info->link_up = 1; 8546 else 8547 link_info->link_up = 0; 8548 if (link_up != link_info->link_up) 8549 bnxt_report_link(bp); 8550 } else { 8551 /* alwasy link down if not require to update link state */ 8552 link_info->link_up = 0; 8553 } 8554 mutex_unlock(&bp->hwrm_cmd_lock); 8555 8556 if (!BNXT_PHY_CFG_ABLE(bp)) 8557 return 0; 8558 8559 diff = link_info->support_auto_speeds ^ link_info->advertising; 8560 if ((link_info->support_auto_speeds | diff) != 8561 link_info->support_auto_speeds) { 8562 /* An advertised speed is no longer supported, so we need to 8563 * update the advertisement settings. Caller holds RTNL 8564 * so we can modify link settings. 8565 */ 8566 link_info->advertising = link_info->support_auto_speeds; 8567 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 8568 bnxt_hwrm_set_link_setting(bp, true, false); 8569 } 8570 return 0; 8571 } 8572 8573 static void bnxt_get_port_module_status(struct bnxt *bp) 8574 { 8575 struct bnxt_link_info *link_info = &bp->link_info; 8576 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 8577 u8 module_status; 8578 8579 if (bnxt_update_link(bp, true)) 8580 return; 8581 8582 module_status = link_info->module_status; 8583 switch (module_status) { 8584 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 8585 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 8586 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 8587 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 8588 bp->pf.port_id); 8589 if (bp->hwrm_spec_code >= 0x10201) { 8590 netdev_warn(bp->dev, "Module part number %s\n", 8591 resp->phy_vendor_partnumber); 8592 } 8593 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 8594 netdev_warn(bp->dev, "TX is disabled\n"); 8595 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 8596 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 8597 } 8598 } 8599 8600 static void 8601 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 8602 { 8603 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 8604 if (bp->hwrm_spec_code >= 0x10201) 8605 req->auto_pause = 8606 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 8607 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8608 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 8609 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8610 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 8611 req->enables |= 8612 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8613 } else { 8614 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8615 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 8616 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8617 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 8618 req->enables |= 8619 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 8620 if (bp->hwrm_spec_code >= 0x10201) { 8621 req->auto_pause = req->force_pause; 8622 req->enables |= cpu_to_le32( 8623 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8624 } 8625 } 8626 } 8627 8628 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 8629 struct hwrm_port_phy_cfg_input *req) 8630 { 8631 u8 autoneg = bp->link_info.autoneg; 8632 u16 fw_link_speed = bp->link_info.req_link_speed; 8633 u16 advertising = bp->link_info.advertising; 8634 8635 if (autoneg & BNXT_AUTONEG_SPEED) { 8636 req->auto_mode |= 8637 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 8638 8639 req->enables |= cpu_to_le32( 8640 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 8641 req->auto_link_speed_mask = cpu_to_le16(advertising); 8642 8643 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 8644 req->flags |= 8645 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 8646 } else { 8647 req->force_link_speed = cpu_to_le16(fw_link_speed); 8648 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 8649 } 8650 8651 /* tell chimp that the setting takes effect immediately */ 8652 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 8653 } 8654 8655 int bnxt_hwrm_set_pause(struct bnxt *bp) 8656 { 8657 struct hwrm_port_phy_cfg_input req = {0}; 8658 int rc; 8659 8660 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8661 bnxt_hwrm_set_pause_common(bp, &req); 8662 8663 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 8664 bp->link_info.force_link_chng) 8665 bnxt_hwrm_set_link_common(bp, &req); 8666 8667 mutex_lock(&bp->hwrm_cmd_lock); 8668 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8669 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 8670 /* since changing of pause setting doesn't trigger any link 8671 * change event, the driver needs to update the current pause 8672 * result upon successfully return of the phy_cfg command 8673 */ 8674 bp->link_info.pause = 8675 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 8676 bp->link_info.auto_pause_setting = 0; 8677 if (!bp->link_info.force_link_chng) 8678 bnxt_report_link(bp); 8679 } 8680 bp->link_info.force_link_chng = false; 8681 mutex_unlock(&bp->hwrm_cmd_lock); 8682 return rc; 8683 } 8684 8685 static void bnxt_hwrm_set_eee(struct bnxt *bp, 8686 struct hwrm_port_phy_cfg_input *req) 8687 { 8688 struct ethtool_eee *eee = &bp->eee; 8689 8690 if (eee->eee_enabled) { 8691 u16 eee_speeds; 8692 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 8693 8694 if (eee->tx_lpi_enabled) 8695 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 8696 else 8697 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 8698 8699 req->flags |= cpu_to_le32(flags); 8700 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 8701 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 8702 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 8703 } else { 8704 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 8705 } 8706 } 8707 8708 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 8709 { 8710 struct hwrm_port_phy_cfg_input req = {0}; 8711 8712 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8713 if (set_pause) 8714 bnxt_hwrm_set_pause_common(bp, &req); 8715 8716 bnxt_hwrm_set_link_common(bp, &req); 8717 8718 if (set_eee) 8719 bnxt_hwrm_set_eee(bp, &req); 8720 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8721 } 8722 8723 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 8724 { 8725 struct hwrm_port_phy_cfg_input req = {0}; 8726 8727 if (!BNXT_SINGLE_PF(bp)) 8728 return 0; 8729 8730 if (pci_num_vf(bp->pdev)) 8731 return 0; 8732 8733 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8734 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 8735 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8736 } 8737 8738 static int bnxt_fw_init_one(struct bnxt *bp); 8739 8740 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 8741 { 8742 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; 8743 struct hwrm_func_drv_if_change_input req = {0}; 8744 bool resc_reinit = false, fw_reset = false; 8745 u32 flags = 0; 8746 int rc; 8747 8748 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 8749 return 0; 8750 8751 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); 8752 if (up) 8753 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 8754 mutex_lock(&bp->hwrm_cmd_lock); 8755 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8756 if (!rc) 8757 flags = le32_to_cpu(resp->flags); 8758 mutex_unlock(&bp->hwrm_cmd_lock); 8759 if (rc) 8760 return rc; 8761 8762 if (!up) 8763 return 0; 8764 8765 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 8766 resc_reinit = true; 8767 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) 8768 fw_reset = true; 8769 8770 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 8771 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 8772 return -ENODEV; 8773 } 8774 if (resc_reinit || fw_reset) { 8775 if (fw_reset) { 8776 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 8777 bnxt_ulp_stop(bp); 8778 bnxt_free_ctx_mem(bp); 8779 kfree(bp->ctx); 8780 bp->ctx = NULL; 8781 bnxt_dcb_free(bp); 8782 rc = bnxt_fw_init_one(bp); 8783 if (rc) { 8784 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 8785 return rc; 8786 } 8787 bnxt_clear_int_mode(bp); 8788 rc = bnxt_init_int_mode(bp); 8789 if (rc) { 8790 netdev_err(bp->dev, "init int mode failed\n"); 8791 return rc; 8792 } 8793 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 8794 } 8795 if (BNXT_NEW_RM(bp)) { 8796 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8797 8798 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 8799 hw_resc->resv_cp_rings = 0; 8800 hw_resc->resv_stat_ctxs = 0; 8801 hw_resc->resv_irqs = 0; 8802 hw_resc->resv_tx_rings = 0; 8803 hw_resc->resv_rx_rings = 0; 8804 hw_resc->resv_hw_ring_grps = 0; 8805 hw_resc->resv_vnics = 0; 8806 if (!fw_reset) { 8807 bp->tx_nr_rings = 0; 8808 bp->rx_nr_rings = 0; 8809 } 8810 } 8811 } 8812 return 0; 8813 } 8814 8815 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 8816 { 8817 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8818 struct hwrm_port_led_qcaps_input req = {0}; 8819 struct bnxt_pf_info *pf = &bp->pf; 8820 int rc; 8821 8822 bp->num_leds = 0; 8823 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 8824 return 0; 8825 8826 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 8827 req.port_id = cpu_to_le16(pf->port_id); 8828 mutex_lock(&bp->hwrm_cmd_lock); 8829 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8830 if (rc) { 8831 mutex_unlock(&bp->hwrm_cmd_lock); 8832 return rc; 8833 } 8834 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 8835 int i; 8836 8837 bp->num_leds = resp->num_leds; 8838 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 8839 bp->num_leds); 8840 for (i = 0; i < bp->num_leds; i++) { 8841 struct bnxt_led_info *led = &bp->leds[i]; 8842 __le16 caps = led->led_state_caps; 8843 8844 if (!led->led_group_id || 8845 !BNXT_LED_ALT_BLINK_CAP(caps)) { 8846 bp->num_leds = 0; 8847 break; 8848 } 8849 } 8850 } 8851 mutex_unlock(&bp->hwrm_cmd_lock); 8852 return 0; 8853 } 8854 8855 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 8856 { 8857 struct hwrm_wol_filter_alloc_input req = {0}; 8858 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 8859 int rc; 8860 8861 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 8862 req.port_id = cpu_to_le16(bp->pf.port_id); 8863 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 8864 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 8865 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 8866 mutex_lock(&bp->hwrm_cmd_lock); 8867 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8868 if (!rc) 8869 bp->wol_filter_id = resp->wol_filter_id; 8870 mutex_unlock(&bp->hwrm_cmd_lock); 8871 return rc; 8872 } 8873 8874 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 8875 { 8876 struct hwrm_wol_filter_free_input req = {0}; 8877 8878 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 8879 req.port_id = cpu_to_le16(bp->pf.port_id); 8880 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 8881 req.wol_filter_id = bp->wol_filter_id; 8882 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8883 } 8884 8885 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 8886 { 8887 struct hwrm_wol_filter_qcfg_input req = {0}; 8888 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8889 u16 next_handle = 0; 8890 int rc; 8891 8892 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 8893 req.port_id = cpu_to_le16(bp->pf.port_id); 8894 req.handle = cpu_to_le16(handle); 8895 mutex_lock(&bp->hwrm_cmd_lock); 8896 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8897 if (!rc) { 8898 next_handle = le16_to_cpu(resp->next_handle); 8899 if (next_handle != 0) { 8900 if (resp->wol_type == 8901 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 8902 bp->wol = 1; 8903 bp->wol_filter_id = resp->wol_filter_id; 8904 } 8905 } 8906 } 8907 mutex_unlock(&bp->hwrm_cmd_lock); 8908 return next_handle; 8909 } 8910 8911 static void bnxt_get_wol_settings(struct bnxt *bp) 8912 { 8913 u16 handle = 0; 8914 8915 bp->wol = 0; 8916 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 8917 return; 8918 8919 do { 8920 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 8921 } while (handle && handle != 0xffff); 8922 } 8923 8924 #ifdef CONFIG_BNXT_HWMON 8925 static ssize_t bnxt_show_temp(struct device *dev, 8926 struct device_attribute *devattr, char *buf) 8927 { 8928 struct hwrm_temp_monitor_query_input req = {0}; 8929 struct hwrm_temp_monitor_query_output *resp; 8930 struct bnxt *bp = dev_get_drvdata(dev); 8931 u32 temp = 0; 8932 8933 resp = bp->hwrm_cmd_resp_addr; 8934 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 8935 mutex_lock(&bp->hwrm_cmd_lock); 8936 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 8937 temp = resp->temp * 1000; /* display millidegree */ 8938 mutex_unlock(&bp->hwrm_cmd_lock); 8939 8940 return sprintf(buf, "%u\n", temp); 8941 } 8942 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); 8943 8944 static struct attribute *bnxt_attrs[] = { 8945 &sensor_dev_attr_temp1_input.dev_attr.attr, 8946 NULL 8947 }; 8948 ATTRIBUTE_GROUPS(bnxt); 8949 8950 static void bnxt_hwmon_close(struct bnxt *bp) 8951 { 8952 if (bp->hwmon_dev) { 8953 hwmon_device_unregister(bp->hwmon_dev); 8954 bp->hwmon_dev = NULL; 8955 } 8956 } 8957 8958 static void bnxt_hwmon_open(struct bnxt *bp) 8959 { 8960 struct pci_dev *pdev = bp->pdev; 8961 8962 if (bp->hwmon_dev) 8963 return; 8964 8965 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 8966 DRV_MODULE_NAME, bp, 8967 bnxt_groups); 8968 if (IS_ERR(bp->hwmon_dev)) { 8969 bp->hwmon_dev = NULL; 8970 dev_warn(&pdev->dev, "Cannot register hwmon device\n"); 8971 } 8972 } 8973 #else 8974 static void bnxt_hwmon_close(struct bnxt *bp) 8975 { 8976 } 8977 8978 static void bnxt_hwmon_open(struct bnxt *bp) 8979 { 8980 } 8981 #endif 8982 8983 static bool bnxt_eee_config_ok(struct bnxt *bp) 8984 { 8985 struct ethtool_eee *eee = &bp->eee; 8986 struct bnxt_link_info *link_info = &bp->link_info; 8987 8988 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 8989 return true; 8990 8991 if (eee->eee_enabled) { 8992 u32 advertising = 8993 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 8994 8995 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 8996 eee->eee_enabled = 0; 8997 return false; 8998 } 8999 if (eee->advertised & ~advertising) { 9000 eee->advertised = advertising & eee->supported; 9001 return false; 9002 } 9003 } 9004 return true; 9005 } 9006 9007 static int bnxt_update_phy_setting(struct bnxt *bp) 9008 { 9009 int rc; 9010 bool update_link = false; 9011 bool update_pause = false; 9012 bool update_eee = false; 9013 struct bnxt_link_info *link_info = &bp->link_info; 9014 9015 rc = bnxt_update_link(bp, true); 9016 if (rc) { 9017 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 9018 rc); 9019 return rc; 9020 } 9021 if (!BNXT_SINGLE_PF(bp)) 9022 return 0; 9023 9024 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9025 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 9026 link_info->req_flow_ctrl) 9027 update_pause = true; 9028 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9029 link_info->force_pause_setting != link_info->req_flow_ctrl) 9030 update_pause = true; 9031 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9032 if (BNXT_AUTO_MODE(link_info->auto_mode)) 9033 update_link = true; 9034 if (link_info->req_link_speed != link_info->force_link_speed) 9035 update_link = true; 9036 if (link_info->req_duplex != link_info->duplex_setting) 9037 update_link = true; 9038 } else { 9039 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 9040 update_link = true; 9041 if (link_info->advertising != link_info->auto_link_speeds) 9042 update_link = true; 9043 } 9044 9045 /* The last close may have shutdown the link, so need to call 9046 * PHY_CFG to bring it back up. 9047 */ 9048 if (!bp->link_info.link_up) 9049 update_link = true; 9050 9051 if (!bnxt_eee_config_ok(bp)) 9052 update_eee = true; 9053 9054 if (update_link) 9055 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 9056 else if (update_pause) 9057 rc = bnxt_hwrm_set_pause(bp); 9058 if (rc) { 9059 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 9060 rc); 9061 return rc; 9062 } 9063 9064 return rc; 9065 } 9066 9067 /* Common routine to pre-map certain register block to different GRC window. 9068 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 9069 * in PF and 3 windows in VF that can be customized to map in different 9070 * register blocks. 9071 */ 9072 static void bnxt_preset_reg_win(struct bnxt *bp) 9073 { 9074 if (BNXT_PF(bp)) { 9075 /* CAG registers map to GRC window #4 */ 9076 writel(BNXT_CAG_REG_BASE, 9077 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 9078 } 9079 } 9080 9081 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 9082 9083 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9084 { 9085 int rc = 0; 9086 9087 bnxt_preset_reg_win(bp); 9088 netif_carrier_off(bp->dev); 9089 if (irq_re_init) { 9090 /* Reserve rings now if none were reserved at driver probe. */ 9091 rc = bnxt_init_dflt_ring_mode(bp); 9092 if (rc) { 9093 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 9094 return rc; 9095 } 9096 } 9097 rc = bnxt_reserve_rings(bp, irq_re_init); 9098 if (rc) 9099 return rc; 9100 if ((bp->flags & BNXT_FLAG_RFS) && 9101 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 9102 /* disable RFS if falling back to INTA */ 9103 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 9104 bp->flags &= ~BNXT_FLAG_RFS; 9105 } 9106 9107 rc = bnxt_alloc_mem(bp, irq_re_init); 9108 if (rc) { 9109 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9110 goto open_err_free_mem; 9111 } 9112 9113 if (irq_re_init) { 9114 bnxt_init_napi(bp); 9115 rc = bnxt_request_irq(bp); 9116 if (rc) { 9117 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 9118 goto open_err_irq; 9119 } 9120 } 9121 9122 bnxt_enable_napi(bp); 9123 bnxt_debug_dev_init(bp); 9124 9125 rc = bnxt_init_nic(bp, irq_re_init); 9126 if (rc) { 9127 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9128 goto open_err; 9129 } 9130 9131 if (link_re_init) { 9132 mutex_lock(&bp->link_lock); 9133 rc = bnxt_update_phy_setting(bp); 9134 mutex_unlock(&bp->link_lock); 9135 if (rc) { 9136 netdev_warn(bp->dev, "failed to update phy settings\n"); 9137 if (BNXT_SINGLE_PF(bp)) { 9138 bp->link_info.phy_retry = true; 9139 bp->link_info.phy_retry_expires = 9140 jiffies + 5 * HZ; 9141 } 9142 } 9143 } 9144 9145 if (irq_re_init) 9146 udp_tunnel_get_rx_info(bp->dev); 9147 9148 set_bit(BNXT_STATE_OPEN, &bp->state); 9149 bnxt_enable_int(bp); 9150 /* Enable TX queues */ 9151 bnxt_tx_enable(bp); 9152 mod_timer(&bp->timer, jiffies + bp->current_interval); 9153 /* Poll link status and check for SFP+ module status */ 9154 bnxt_get_port_module_status(bp); 9155 9156 /* VF-reps may need to be re-opened after the PF is re-opened */ 9157 if (BNXT_PF(bp)) 9158 bnxt_vf_reps_open(bp); 9159 return 0; 9160 9161 open_err: 9162 bnxt_debug_dev_exit(bp); 9163 bnxt_disable_napi(bp); 9164 9165 open_err_irq: 9166 bnxt_del_napi(bp); 9167 9168 open_err_free_mem: 9169 bnxt_free_skbs(bp); 9170 bnxt_free_irq(bp); 9171 bnxt_free_mem(bp, true); 9172 return rc; 9173 } 9174 9175 /* rtnl_lock held */ 9176 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9177 { 9178 int rc = 0; 9179 9180 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 9181 if (rc) { 9182 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 9183 dev_close(bp->dev); 9184 } 9185 return rc; 9186 } 9187 9188 /* rtnl_lock held, open the NIC half way by allocating all resources, but 9189 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 9190 * self tests. 9191 */ 9192 int bnxt_half_open_nic(struct bnxt *bp) 9193 { 9194 int rc = 0; 9195 9196 rc = bnxt_alloc_mem(bp, false); 9197 if (rc) { 9198 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9199 goto half_open_err; 9200 } 9201 rc = bnxt_init_nic(bp, false); 9202 if (rc) { 9203 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9204 goto half_open_err; 9205 } 9206 return 0; 9207 9208 half_open_err: 9209 bnxt_free_skbs(bp); 9210 bnxt_free_mem(bp, false); 9211 dev_close(bp->dev); 9212 return rc; 9213 } 9214 9215 /* rtnl_lock held, this call can only be made after a previous successful 9216 * call to bnxt_half_open_nic(). 9217 */ 9218 void bnxt_half_close_nic(struct bnxt *bp) 9219 { 9220 bnxt_hwrm_resource_free(bp, false, false); 9221 bnxt_free_skbs(bp); 9222 bnxt_free_mem(bp, false); 9223 } 9224 9225 static void bnxt_reenable_sriov(struct bnxt *bp) 9226 { 9227 if (BNXT_PF(bp)) { 9228 struct bnxt_pf_info *pf = &bp->pf; 9229 int n = pf->active_vfs; 9230 9231 if (n) 9232 bnxt_cfg_hw_sriov(bp, &n, true); 9233 } 9234 } 9235 9236 static int bnxt_open(struct net_device *dev) 9237 { 9238 struct bnxt *bp = netdev_priv(dev); 9239 int rc; 9240 9241 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 9242 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); 9243 return -ENODEV; 9244 } 9245 9246 rc = bnxt_hwrm_if_change(bp, true); 9247 if (rc) 9248 return rc; 9249 rc = __bnxt_open_nic(bp, true, true); 9250 if (rc) { 9251 bnxt_hwrm_if_change(bp, false); 9252 } else { 9253 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 9254 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9255 bnxt_ulp_start(bp, 0); 9256 bnxt_reenable_sriov(bp); 9257 } 9258 } 9259 bnxt_hwmon_open(bp); 9260 } 9261 9262 return rc; 9263 } 9264 9265 static bool bnxt_drv_busy(struct bnxt *bp) 9266 { 9267 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 9268 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 9269 } 9270 9271 static void bnxt_get_ring_stats(struct bnxt *bp, 9272 struct rtnl_link_stats64 *stats); 9273 9274 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 9275 bool link_re_init) 9276 { 9277 /* Close the VF-reps before closing PF */ 9278 if (BNXT_PF(bp)) 9279 bnxt_vf_reps_close(bp); 9280 9281 /* Change device state to avoid TX queue wake up's */ 9282 bnxt_tx_disable(bp); 9283 9284 clear_bit(BNXT_STATE_OPEN, &bp->state); 9285 smp_mb__after_atomic(); 9286 while (bnxt_drv_busy(bp)) 9287 msleep(20); 9288 9289 /* Flush rings and and disable interrupts */ 9290 bnxt_shutdown_nic(bp, irq_re_init); 9291 9292 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 9293 9294 bnxt_debug_dev_exit(bp); 9295 bnxt_disable_napi(bp); 9296 del_timer_sync(&bp->timer); 9297 bnxt_free_skbs(bp); 9298 9299 /* Save ring stats before shutdown */ 9300 if (bp->bnapi) 9301 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9302 if (irq_re_init) { 9303 bnxt_free_irq(bp); 9304 bnxt_del_napi(bp); 9305 } 9306 bnxt_free_mem(bp, irq_re_init); 9307 } 9308 9309 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9310 { 9311 int rc = 0; 9312 9313 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9314 /* If we get here, it means firmware reset is in progress 9315 * while we are trying to close. We can safely proceed with 9316 * the close because we are holding rtnl_lock(). Some firmware 9317 * messages may fail as we proceed to close. We set the 9318 * ABORT_ERR flag here so that the FW reset thread will later 9319 * abort when it gets the rtnl_lock() and sees the flag. 9320 */ 9321 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 9322 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9323 } 9324 9325 #ifdef CONFIG_BNXT_SRIOV 9326 if (bp->sriov_cfg) { 9327 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 9328 !bp->sriov_cfg, 9329 BNXT_SRIOV_CFG_WAIT_TMO); 9330 if (rc) 9331 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 9332 } 9333 #endif 9334 __bnxt_close_nic(bp, irq_re_init, link_re_init); 9335 return rc; 9336 } 9337 9338 static int bnxt_close(struct net_device *dev) 9339 { 9340 struct bnxt *bp = netdev_priv(dev); 9341 9342 bnxt_hwmon_close(bp); 9343 bnxt_close_nic(bp, true, true); 9344 bnxt_hwrm_shutdown_link(bp); 9345 bnxt_hwrm_if_change(bp, false); 9346 return 0; 9347 } 9348 9349 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 9350 u16 *val) 9351 { 9352 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; 9353 struct hwrm_port_phy_mdio_read_input req = {0}; 9354 int rc; 9355 9356 if (bp->hwrm_spec_code < 0x10a00) 9357 return -EOPNOTSUPP; 9358 9359 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); 9360 req.port_id = cpu_to_le16(bp->pf.port_id); 9361 req.phy_addr = phy_addr; 9362 req.reg_addr = cpu_to_le16(reg & 0x1f); 9363 if (mdio_phy_id_is_c45(phy_addr)) { 9364 req.cl45_mdio = 1; 9365 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9366 req.dev_addr = mdio_phy_id_devad(phy_addr); 9367 req.reg_addr = cpu_to_le16(reg); 9368 } 9369 9370 mutex_lock(&bp->hwrm_cmd_lock); 9371 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9372 if (!rc) 9373 *val = le16_to_cpu(resp->reg_data); 9374 mutex_unlock(&bp->hwrm_cmd_lock); 9375 return rc; 9376 } 9377 9378 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 9379 u16 val) 9380 { 9381 struct hwrm_port_phy_mdio_write_input req = {0}; 9382 9383 if (bp->hwrm_spec_code < 0x10a00) 9384 return -EOPNOTSUPP; 9385 9386 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); 9387 req.port_id = cpu_to_le16(bp->pf.port_id); 9388 req.phy_addr = phy_addr; 9389 req.reg_addr = cpu_to_le16(reg & 0x1f); 9390 if (mdio_phy_id_is_c45(phy_addr)) { 9391 req.cl45_mdio = 1; 9392 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9393 req.dev_addr = mdio_phy_id_devad(phy_addr); 9394 req.reg_addr = cpu_to_le16(reg); 9395 } 9396 req.reg_data = cpu_to_le16(val); 9397 9398 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9399 } 9400 9401 /* rtnl_lock held */ 9402 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 9403 { 9404 struct mii_ioctl_data *mdio = if_mii(ifr); 9405 struct bnxt *bp = netdev_priv(dev); 9406 int rc; 9407 9408 switch (cmd) { 9409 case SIOCGMIIPHY: 9410 mdio->phy_id = bp->link_info.phy_addr; 9411 9412 /* fallthru */ 9413 case SIOCGMIIREG: { 9414 u16 mii_regval = 0; 9415 9416 if (!netif_running(dev)) 9417 return -EAGAIN; 9418 9419 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 9420 &mii_regval); 9421 mdio->val_out = mii_regval; 9422 return rc; 9423 } 9424 9425 case SIOCSMIIREG: 9426 if (!netif_running(dev)) 9427 return -EAGAIN; 9428 9429 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 9430 mdio->val_in); 9431 9432 default: 9433 /* do nothing */ 9434 break; 9435 } 9436 return -EOPNOTSUPP; 9437 } 9438 9439 static void bnxt_get_ring_stats(struct bnxt *bp, 9440 struct rtnl_link_stats64 *stats) 9441 { 9442 int i; 9443 9444 9445 for (i = 0; i < bp->cp_nr_rings; i++) { 9446 struct bnxt_napi *bnapi = bp->bnapi[i]; 9447 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9448 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 9449 9450 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 9451 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 9452 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 9453 9454 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 9455 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 9456 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 9457 9458 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 9459 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 9460 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 9461 9462 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 9463 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 9464 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 9465 9466 stats->rx_missed_errors += 9467 le64_to_cpu(hw_stats->rx_discard_pkts); 9468 9469 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 9470 9471 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 9472 } 9473 } 9474 9475 static void bnxt_add_prev_stats(struct bnxt *bp, 9476 struct rtnl_link_stats64 *stats) 9477 { 9478 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 9479 9480 stats->rx_packets += prev_stats->rx_packets; 9481 stats->tx_packets += prev_stats->tx_packets; 9482 stats->rx_bytes += prev_stats->rx_bytes; 9483 stats->tx_bytes += prev_stats->tx_bytes; 9484 stats->rx_missed_errors += prev_stats->rx_missed_errors; 9485 stats->multicast += prev_stats->multicast; 9486 stats->tx_dropped += prev_stats->tx_dropped; 9487 } 9488 9489 static void 9490 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 9491 { 9492 struct bnxt *bp = netdev_priv(dev); 9493 9494 set_bit(BNXT_STATE_READ_STATS, &bp->state); 9495 /* Make sure bnxt_close_nic() sees that we are reading stats before 9496 * we check the BNXT_STATE_OPEN flag. 9497 */ 9498 smp_mb__after_atomic(); 9499 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9500 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9501 *stats = bp->net_stats_prev; 9502 return; 9503 } 9504 9505 bnxt_get_ring_stats(bp, stats); 9506 bnxt_add_prev_stats(bp, stats); 9507 9508 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9509 struct rx_port_stats *rx = bp->hw_rx_port_stats; 9510 struct tx_port_stats *tx = bp->hw_tx_port_stats; 9511 9512 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 9513 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 9514 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 9515 le64_to_cpu(rx->rx_ovrsz_frames) + 9516 le64_to_cpu(rx->rx_runt_frames); 9517 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 9518 le64_to_cpu(rx->rx_jbr_frames); 9519 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 9520 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 9521 stats->tx_errors = le64_to_cpu(tx->tx_err); 9522 } 9523 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9524 } 9525 9526 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 9527 { 9528 struct net_device *dev = bp->dev; 9529 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9530 struct netdev_hw_addr *ha; 9531 u8 *haddr; 9532 int mc_count = 0; 9533 bool update = false; 9534 int off = 0; 9535 9536 netdev_for_each_mc_addr(ha, dev) { 9537 if (mc_count >= BNXT_MAX_MC_ADDRS) { 9538 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9539 vnic->mc_list_count = 0; 9540 return false; 9541 } 9542 haddr = ha->addr; 9543 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 9544 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 9545 update = true; 9546 } 9547 off += ETH_ALEN; 9548 mc_count++; 9549 } 9550 if (mc_count) 9551 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 9552 9553 if (mc_count != vnic->mc_list_count) { 9554 vnic->mc_list_count = mc_count; 9555 update = true; 9556 } 9557 return update; 9558 } 9559 9560 static bool bnxt_uc_list_updated(struct bnxt *bp) 9561 { 9562 struct net_device *dev = bp->dev; 9563 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9564 struct netdev_hw_addr *ha; 9565 int off = 0; 9566 9567 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 9568 return true; 9569 9570 netdev_for_each_uc_addr(ha, dev) { 9571 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 9572 return true; 9573 9574 off += ETH_ALEN; 9575 } 9576 return false; 9577 } 9578 9579 static void bnxt_set_rx_mode(struct net_device *dev) 9580 { 9581 struct bnxt *bp = netdev_priv(dev); 9582 struct bnxt_vnic_info *vnic; 9583 bool mc_update = false; 9584 bool uc_update; 9585 u32 mask; 9586 9587 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 9588 return; 9589 9590 vnic = &bp->vnic_info[0]; 9591 mask = vnic->rx_mask; 9592 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 9593 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 9594 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 9595 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 9596 9597 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 9598 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9599 9600 uc_update = bnxt_uc_list_updated(bp); 9601 9602 if (dev->flags & IFF_BROADCAST) 9603 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 9604 if (dev->flags & IFF_ALLMULTI) { 9605 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9606 vnic->mc_list_count = 0; 9607 } else { 9608 mc_update = bnxt_mc_list_updated(bp, &mask); 9609 } 9610 9611 if (mask != vnic->rx_mask || uc_update || mc_update) { 9612 vnic->rx_mask = mask; 9613 9614 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 9615 bnxt_queue_sp_work(bp); 9616 } 9617 } 9618 9619 static int bnxt_cfg_rx_mode(struct bnxt *bp) 9620 { 9621 struct net_device *dev = bp->dev; 9622 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9623 struct netdev_hw_addr *ha; 9624 int i, off = 0, rc; 9625 bool uc_update; 9626 9627 netif_addr_lock_bh(dev); 9628 uc_update = bnxt_uc_list_updated(bp); 9629 netif_addr_unlock_bh(dev); 9630 9631 if (!uc_update) 9632 goto skip_uc; 9633 9634 mutex_lock(&bp->hwrm_cmd_lock); 9635 for (i = 1; i < vnic->uc_filter_count; i++) { 9636 struct hwrm_cfa_l2_filter_free_input req = {0}; 9637 9638 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 9639 -1); 9640 9641 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 9642 9643 rc = _hwrm_send_message(bp, &req, sizeof(req), 9644 HWRM_CMD_TIMEOUT); 9645 } 9646 mutex_unlock(&bp->hwrm_cmd_lock); 9647 9648 vnic->uc_filter_count = 1; 9649 9650 netif_addr_lock_bh(dev); 9651 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 9652 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9653 } else { 9654 netdev_for_each_uc_addr(ha, dev) { 9655 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 9656 off += ETH_ALEN; 9657 vnic->uc_filter_count++; 9658 } 9659 } 9660 netif_addr_unlock_bh(dev); 9661 9662 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 9663 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 9664 if (rc) { 9665 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 9666 rc); 9667 vnic->uc_filter_count = i; 9668 return rc; 9669 } 9670 } 9671 9672 skip_uc: 9673 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9674 if (rc && vnic->mc_list_count) { 9675 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 9676 rc); 9677 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9678 vnic->mc_list_count = 0; 9679 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9680 } 9681 if (rc) 9682 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 9683 rc); 9684 9685 return rc; 9686 } 9687 9688 static bool bnxt_can_reserve_rings(struct bnxt *bp) 9689 { 9690 #ifdef CONFIG_BNXT_SRIOV 9691 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 9692 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9693 9694 /* No minimum rings were provisioned by the PF. Don't 9695 * reserve rings by default when device is down. 9696 */ 9697 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 9698 return true; 9699 9700 if (!netif_running(bp->dev)) 9701 return false; 9702 } 9703 #endif 9704 return true; 9705 } 9706 9707 /* If the chip and firmware supports RFS */ 9708 static bool bnxt_rfs_supported(struct bnxt *bp) 9709 { 9710 if (bp->flags & BNXT_FLAG_CHIP_P5) { 9711 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 9712 return true; 9713 return false; 9714 } 9715 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 9716 return true; 9717 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9718 return true; 9719 return false; 9720 } 9721 9722 /* If runtime conditions support RFS */ 9723 static bool bnxt_rfs_capable(struct bnxt *bp) 9724 { 9725 #ifdef CONFIG_RFS_ACCEL 9726 int vnics, max_vnics, max_rss_ctxs; 9727 9728 if (bp->flags & BNXT_FLAG_CHIP_P5) 9729 return bnxt_rfs_supported(bp); 9730 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 9731 return false; 9732 9733 vnics = 1 + bp->rx_nr_rings; 9734 max_vnics = bnxt_get_max_func_vnics(bp); 9735 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 9736 9737 /* RSS contexts not a limiting factor */ 9738 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9739 max_rss_ctxs = max_vnics; 9740 if (vnics > max_vnics || vnics > max_rss_ctxs) { 9741 if (bp->rx_nr_rings > 1) 9742 netdev_warn(bp->dev, 9743 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 9744 min(max_rss_ctxs - 1, max_vnics - 1)); 9745 return false; 9746 } 9747 9748 if (!BNXT_NEW_RM(bp)) 9749 return true; 9750 9751 if (vnics == bp->hw_resc.resv_vnics) 9752 return true; 9753 9754 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 9755 if (vnics <= bp->hw_resc.resv_vnics) 9756 return true; 9757 9758 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 9759 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 9760 return false; 9761 #else 9762 return false; 9763 #endif 9764 } 9765 9766 static netdev_features_t bnxt_fix_features(struct net_device *dev, 9767 netdev_features_t features) 9768 { 9769 struct bnxt *bp = netdev_priv(dev); 9770 9771 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 9772 features &= ~NETIF_F_NTUPLE; 9773 9774 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9775 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 9776 9777 if (!(features & NETIF_F_GRO)) 9778 features &= ~NETIF_F_GRO_HW; 9779 9780 if (features & NETIF_F_GRO_HW) 9781 features &= ~NETIF_F_LRO; 9782 9783 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 9784 * turned on or off together. 9785 */ 9786 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 9787 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 9788 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 9789 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9790 NETIF_F_HW_VLAN_STAG_RX); 9791 else 9792 features |= NETIF_F_HW_VLAN_CTAG_RX | 9793 NETIF_F_HW_VLAN_STAG_RX; 9794 } 9795 #ifdef CONFIG_BNXT_SRIOV 9796 if (BNXT_VF(bp)) { 9797 if (bp->vf.vlan) { 9798 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9799 NETIF_F_HW_VLAN_STAG_RX); 9800 } 9801 } 9802 #endif 9803 return features; 9804 } 9805 9806 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 9807 { 9808 struct bnxt *bp = netdev_priv(dev); 9809 u32 flags = bp->flags; 9810 u32 changes; 9811 int rc = 0; 9812 bool re_init = false; 9813 bool update_tpa = false; 9814 9815 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 9816 if (features & NETIF_F_GRO_HW) 9817 flags |= BNXT_FLAG_GRO; 9818 else if (features & NETIF_F_LRO) 9819 flags |= BNXT_FLAG_LRO; 9820 9821 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9822 flags &= ~BNXT_FLAG_TPA; 9823 9824 if (features & NETIF_F_HW_VLAN_CTAG_RX) 9825 flags |= BNXT_FLAG_STRIP_VLAN; 9826 9827 if (features & NETIF_F_NTUPLE) 9828 flags |= BNXT_FLAG_RFS; 9829 9830 changes = flags ^ bp->flags; 9831 if (changes & BNXT_FLAG_TPA) { 9832 update_tpa = true; 9833 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 9834 (flags & BNXT_FLAG_TPA) == 0 || 9835 (bp->flags & BNXT_FLAG_CHIP_P5)) 9836 re_init = true; 9837 } 9838 9839 if (changes & ~BNXT_FLAG_TPA) 9840 re_init = true; 9841 9842 if (flags != bp->flags) { 9843 u32 old_flags = bp->flags; 9844 9845 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9846 bp->flags = flags; 9847 if (update_tpa) 9848 bnxt_set_ring_params(bp); 9849 return rc; 9850 } 9851 9852 if (re_init) { 9853 bnxt_close_nic(bp, false, false); 9854 bp->flags = flags; 9855 if (update_tpa) 9856 bnxt_set_ring_params(bp); 9857 9858 return bnxt_open_nic(bp, false, false); 9859 } 9860 if (update_tpa) { 9861 bp->flags = flags; 9862 rc = bnxt_set_tpa(bp, 9863 (flags & BNXT_FLAG_TPA) ? 9864 true : false); 9865 if (rc) 9866 bp->flags = old_flags; 9867 } 9868 } 9869 return rc; 9870 } 9871 9872 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 9873 u32 ring_id, u32 *prod, u32 *cons) 9874 { 9875 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; 9876 struct hwrm_dbg_ring_info_get_input req = {0}; 9877 int rc; 9878 9879 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); 9880 req.ring_type = ring_type; 9881 req.fw_ring_id = cpu_to_le32(ring_id); 9882 mutex_lock(&bp->hwrm_cmd_lock); 9883 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9884 if (!rc) { 9885 *prod = le32_to_cpu(resp->producer_index); 9886 *cons = le32_to_cpu(resp->consumer_index); 9887 } 9888 mutex_unlock(&bp->hwrm_cmd_lock); 9889 return rc; 9890 } 9891 9892 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 9893 { 9894 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 9895 int i = bnapi->index; 9896 9897 if (!txr) 9898 return; 9899 9900 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 9901 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 9902 txr->tx_cons); 9903 } 9904 9905 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 9906 { 9907 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 9908 int i = bnapi->index; 9909 9910 if (!rxr) 9911 return; 9912 9913 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 9914 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 9915 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 9916 rxr->rx_sw_agg_prod); 9917 } 9918 9919 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 9920 { 9921 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9922 int i = bnapi->index; 9923 9924 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 9925 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 9926 } 9927 9928 static void bnxt_dbg_dump_states(struct bnxt *bp) 9929 { 9930 int i; 9931 struct bnxt_napi *bnapi; 9932 9933 for (i = 0; i < bp->cp_nr_rings; i++) { 9934 bnapi = bp->bnapi[i]; 9935 if (netif_msg_drv(bp)) { 9936 bnxt_dump_tx_sw_state(bnapi); 9937 bnxt_dump_rx_sw_state(bnapi); 9938 bnxt_dump_cp_sw_state(bnapi); 9939 } 9940 } 9941 } 9942 9943 static void bnxt_reset_task(struct bnxt *bp, bool silent) 9944 { 9945 if (!silent) 9946 bnxt_dbg_dump_states(bp); 9947 if (netif_running(bp->dev)) { 9948 int rc; 9949 9950 if (silent) { 9951 bnxt_close_nic(bp, false, false); 9952 bnxt_open_nic(bp, false, false); 9953 } else { 9954 bnxt_ulp_stop(bp); 9955 bnxt_close_nic(bp, true, false); 9956 rc = bnxt_open_nic(bp, true, false); 9957 bnxt_ulp_start(bp, rc); 9958 } 9959 } 9960 } 9961 9962 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 9963 { 9964 struct bnxt *bp = netdev_priv(dev); 9965 9966 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 9967 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 9968 bnxt_queue_sp_work(bp); 9969 } 9970 9971 static void bnxt_fw_health_check(struct bnxt *bp) 9972 { 9973 struct bnxt_fw_health *fw_health = bp->fw_health; 9974 u32 val; 9975 9976 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 9977 return; 9978 9979 if (fw_health->tmr_counter) { 9980 fw_health->tmr_counter--; 9981 return; 9982 } 9983 9984 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 9985 if (val == fw_health->last_fw_heartbeat) 9986 goto fw_reset; 9987 9988 fw_health->last_fw_heartbeat = val; 9989 9990 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 9991 if (val != fw_health->last_fw_reset_cnt) 9992 goto fw_reset; 9993 9994 fw_health->tmr_counter = fw_health->tmr_multiplier; 9995 return; 9996 9997 fw_reset: 9998 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); 9999 bnxt_queue_sp_work(bp); 10000 } 10001 10002 static void bnxt_timer(struct timer_list *t) 10003 { 10004 struct bnxt *bp = from_timer(bp, t, timer); 10005 struct net_device *dev = bp->dev; 10006 10007 if (!netif_running(dev)) 10008 return; 10009 10010 if (atomic_read(&bp->intr_sem) != 0) 10011 goto bnxt_restart_timer; 10012 10013 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 10014 bnxt_fw_health_check(bp); 10015 10016 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 10017 bp->stats_coal_ticks) { 10018 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 10019 bnxt_queue_sp_work(bp); 10020 } 10021 10022 if (bnxt_tc_flower_enabled(bp)) { 10023 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 10024 bnxt_queue_sp_work(bp); 10025 } 10026 10027 #ifdef CONFIG_RFS_ACCEL 10028 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { 10029 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 10030 bnxt_queue_sp_work(bp); 10031 } 10032 #endif /*CONFIG_RFS_ACCEL*/ 10033 10034 if (bp->link_info.phy_retry) { 10035 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 10036 bp->link_info.phy_retry = false; 10037 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 10038 } else { 10039 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); 10040 bnxt_queue_sp_work(bp); 10041 } 10042 } 10043 10044 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && 10045 netif_carrier_ok(dev)) { 10046 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); 10047 bnxt_queue_sp_work(bp); 10048 } 10049 bnxt_restart_timer: 10050 mod_timer(&bp->timer, jiffies + bp->current_interval); 10051 } 10052 10053 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 10054 { 10055 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 10056 * set. If the device is being closed, bnxt_close() may be holding 10057 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 10058 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 10059 */ 10060 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10061 rtnl_lock(); 10062 } 10063 10064 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 10065 { 10066 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10067 rtnl_unlock(); 10068 } 10069 10070 /* Only called from bnxt_sp_task() */ 10071 static void bnxt_reset(struct bnxt *bp, bool silent) 10072 { 10073 bnxt_rtnl_lock_sp(bp); 10074 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 10075 bnxt_reset_task(bp, silent); 10076 bnxt_rtnl_unlock_sp(bp); 10077 } 10078 10079 static void bnxt_fw_reset_close(struct bnxt *bp) 10080 { 10081 bnxt_ulp_stop(bp); 10082 /* When firmware is fatal state, disable PCI device to prevent 10083 * any potential bad DMAs before freeing kernel memory. 10084 */ 10085 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10086 pci_disable_device(bp->pdev); 10087 __bnxt_close_nic(bp, true, false); 10088 bnxt_clear_int_mode(bp); 10089 bnxt_hwrm_func_drv_unrgtr(bp); 10090 if (pci_is_enabled(bp->pdev)) 10091 pci_disable_device(bp->pdev); 10092 bnxt_free_ctx_mem(bp); 10093 kfree(bp->ctx); 10094 bp->ctx = NULL; 10095 } 10096 10097 static bool is_bnxt_fw_ok(struct bnxt *bp) 10098 { 10099 struct bnxt_fw_health *fw_health = bp->fw_health; 10100 bool no_heartbeat = false, has_reset = false; 10101 u32 val; 10102 10103 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10104 if (val == fw_health->last_fw_heartbeat) 10105 no_heartbeat = true; 10106 10107 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10108 if (val != fw_health->last_fw_reset_cnt) 10109 has_reset = true; 10110 10111 if (!no_heartbeat && has_reset) 10112 return true; 10113 10114 return false; 10115 } 10116 10117 /* rtnl_lock is acquired before calling this function */ 10118 static void bnxt_force_fw_reset(struct bnxt *bp) 10119 { 10120 struct bnxt_fw_health *fw_health = bp->fw_health; 10121 u32 wait_dsecs; 10122 10123 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 10124 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10125 return; 10126 10127 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10128 bnxt_fw_reset_close(bp); 10129 wait_dsecs = fw_health->master_func_wait_dsecs; 10130 if (fw_health->master) { 10131 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 10132 wait_dsecs = 0; 10133 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10134 } else { 10135 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 10136 wait_dsecs = fw_health->normal_func_wait_dsecs; 10137 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10138 } 10139 10140 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 10141 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 10142 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10143 } 10144 10145 void bnxt_fw_exception(struct bnxt *bp) 10146 { 10147 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 10148 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10149 bnxt_rtnl_lock_sp(bp); 10150 bnxt_force_fw_reset(bp); 10151 bnxt_rtnl_unlock_sp(bp); 10152 } 10153 10154 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 10155 * < 0 on error. 10156 */ 10157 static int bnxt_get_registered_vfs(struct bnxt *bp) 10158 { 10159 #ifdef CONFIG_BNXT_SRIOV 10160 int rc; 10161 10162 if (!BNXT_PF(bp)) 10163 return 0; 10164 10165 rc = bnxt_hwrm_func_qcfg(bp); 10166 if (rc) { 10167 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 10168 return rc; 10169 } 10170 if (bp->pf.registered_vfs) 10171 return bp->pf.registered_vfs; 10172 if (bp->sriov_cfg) 10173 return 1; 10174 #endif 10175 return 0; 10176 } 10177 10178 void bnxt_fw_reset(struct bnxt *bp) 10179 { 10180 bnxt_rtnl_lock_sp(bp); 10181 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 10182 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10183 int n = 0, tmo; 10184 10185 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10186 if (bp->pf.active_vfs && 10187 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10188 n = bnxt_get_registered_vfs(bp); 10189 if (n < 0) { 10190 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 10191 n); 10192 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10193 dev_close(bp->dev); 10194 goto fw_reset_exit; 10195 } else if (n > 0) { 10196 u16 vf_tmo_dsecs = n * 10; 10197 10198 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 10199 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 10200 bp->fw_reset_state = 10201 BNXT_FW_RESET_STATE_POLL_VF; 10202 bnxt_queue_fw_reset_work(bp, HZ / 10); 10203 goto fw_reset_exit; 10204 } 10205 bnxt_fw_reset_close(bp); 10206 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10207 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10208 tmo = HZ / 10; 10209 } else { 10210 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10211 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10212 } 10213 bnxt_queue_fw_reset_work(bp, tmo); 10214 } 10215 fw_reset_exit: 10216 bnxt_rtnl_unlock_sp(bp); 10217 } 10218 10219 static void bnxt_chk_missed_irq(struct bnxt *bp) 10220 { 10221 int i; 10222 10223 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 10224 return; 10225 10226 for (i = 0; i < bp->cp_nr_rings; i++) { 10227 struct bnxt_napi *bnapi = bp->bnapi[i]; 10228 struct bnxt_cp_ring_info *cpr; 10229 u32 fw_ring_id; 10230 int j; 10231 10232 if (!bnapi) 10233 continue; 10234 10235 cpr = &bnapi->cp_ring; 10236 for (j = 0; j < 2; j++) { 10237 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 10238 u32 val[2]; 10239 10240 if (!cpr2 || cpr2->has_more_work || 10241 !bnxt_has_work(bp, cpr2)) 10242 continue; 10243 10244 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 10245 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 10246 continue; 10247 } 10248 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 10249 bnxt_dbg_hwrm_ring_info_get(bp, 10250 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 10251 fw_ring_id, &val[0], &val[1]); 10252 cpr->missed_irqs++; 10253 } 10254 } 10255 } 10256 10257 static void bnxt_cfg_ntp_filters(struct bnxt *); 10258 10259 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 10260 { 10261 struct bnxt_link_info *link_info = &bp->link_info; 10262 10263 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 10264 link_info->autoneg = BNXT_AUTONEG_SPEED; 10265 if (bp->hwrm_spec_code >= 0x10201) { 10266 if (link_info->auto_pause_setting & 10267 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 10268 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10269 } else { 10270 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10271 } 10272 link_info->advertising = link_info->auto_link_speeds; 10273 } else { 10274 link_info->req_link_speed = link_info->force_link_speed; 10275 link_info->req_duplex = link_info->duplex_setting; 10276 } 10277 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 10278 link_info->req_flow_ctrl = 10279 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 10280 else 10281 link_info->req_flow_ctrl = link_info->force_pause_setting; 10282 } 10283 10284 static void bnxt_sp_task(struct work_struct *work) 10285 { 10286 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 10287 10288 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10289 smp_mb__after_atomic(); 10290 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10291 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10292 return; 10293 } 10294 10295 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 10296 bnxt_cfg_rx_mode(bp); 10297 10298 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 10299 bnxt_cfg_ntp_filters(bp); 10300 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 10301 bnxt_hwrm_exec_fwd_req(bp); 10302 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10303 bnxt_hwrm_tunnel_dst_port_alloc( 10304 bp, bp->vxlan_port, 10305 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10306 } 10307 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10308 bnxt_hwrm_tunnel_dst_port_free( 10309 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10310 } 10311 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10312 bnxt_hwrm_tunnel_dst_port_alloc( 10313 bp, bp->nge_port, 10314 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10315 } 10316 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10317 bnxt_hwrm_tunnel_dst_port_free( 10318 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10319 } 10320 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 10321 bnxt_hwrm_port_qstats(bp); 10322 bnxt_hwrm_port_qstats_ext(bp); 10323 bnxt_hwrm_pcie_qstats(bp); 10324 } 10325 10326 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 10327 int rc; 10328 10329 mutex_lock(&bp->link_lock); 10330 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 10331 &bp->sp_event)) 10332 bnxt_hwrm_phy_qcaps(bp); 10333 10334 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 10335 &bp->sp_event)) 10336 bnxt_init_ethtool_link_settings(bp); 10337 10338 rc = bnxt_update_link(bp, true); 10339 mutex_unlock(&bp->link_lock); 10340 if (rc) 10341 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 10342 rc); 10343 } 10344 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 10345 int rc; 10346 10347 mutex_lock(&bp->link_lock); 10348 rc = bnxt_update_phy_setting(bp); 10349 mutex_unlock(&bp->link_lock); 10350 if (rc) { 10351 netdev_warn(bp->dev, "update phy settings retry failed\n"); 10352 } else { 10353 bp->link_info.phy_retry = false; 10354 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 10355 } 10356 } 10357 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 10358 mutex_lock(&bp->link_lock); 10359 bnxt_get_port_module_status(bp); 10360 mutex_unlock(&bp->link_lock); 10361 } 10362 10363 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 10364 bnxt_tc_flow_stats_work(bp); 10365 10366 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 10367 bnxt_chk_missed_irq(bp); 10368 10369 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 10370 * must be the last functions to be called before exiting. 10371 */ 10372 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 10373 bnxt_reset(bp, false); 10374 10375 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 10376 bnxt_reset(bp, true); 10377 10378 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) 10379 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); 10380 10381 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 10382 if (!is_bnxt_fw_ok(bp)) 10383 bnxt_devlink_health_report(bp, 10384 BNXT_FW_EXCEPTION_SP_EVENT); 10385 } 10386 10387 smp_mb__before_atomic(); 10388 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10389 } 10390 10391 /* Under rtnl_lock */ 10392 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 10393 int tx_xdp) 10394 { 10395 int max_rx, max_tx, tx_sets = 1; 10396 int tx_rings_needed, stats; 10397 int rx_rings = rx; 10398 int cp, vnics, rc; 10399 10400 if (tcs) 10401 tx_sets = tcs; 10402 10403 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 10404 if (rc) 10405 return rc; 10406 10407 if (max_rx < rx) 10408 return -ENOMEM; 10409 10410 tx_rings_needed = tx * tx_sets + tx_xdp; 10411 if (max_tx < tx_rings_needed) 10412 return -ENOMEM; 10413 10414 vnics = 1; 10415 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 10416 vnics += rx_rings; 10417 10418 if (bp->flags & BNXT_FLAG_AGG_RINGS) 10419 rx_rings <<= 1; 10420 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 10421 stats = cp; 10422 if (BNXT_NEW_RM(bp)) { 10423 cp += bnxt_get_ulp_msix_num(bp); 10424 stats += bnxt_get_ulp_stat_ctxs(bp); 10425 } 10426 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 10427 stats, vnics); 10428 } 10429 10430 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 10431 { 10432 if (bp->bar2) { 10433 pci_iounmap(pdev, bp->bar2); 10434 bp->bar2 = NULL; 10435 } 10436 10437 if (bp->bar1) { 10438 pci_iounmap(pdev, bp->bar1); 10439 bp->bar1 = NULL; 10440 } 10441 10442 if (bp->bar0) { 10443 pci_iounmap(pdev, bp->bar0); 10444 bp->bar0 = NULL; 10445 } 10446 } 10447 10448 static void bnxt_cleanup_pci(struct bnxt *bp) 10449 { 10450 bnxt_unmap_bars(bp, bp->pdev); 10451 pci_release_regions(bp->pdev); 10452 if (pci_is_enabled(bp->pdev)) 10453 pci_disable_device(bp->pdev); 10454 } 10455 10456 static void bnxt_init_dflt_coal(struct bnxt *bp) 10457 { 10458 struct bnxt_coal *coal; 10459 10460 /* Tick values in micro seconds. 10461 * 1 coal_buf x bufs_per_record = 1 completion record. 10462 */ 10463 coal = &bp->rx_coal; 10464 coal->coal_ticks = 10; 10465 coal->coal_bufs = 30; 10466 coal->coal_ticks_irq = 1; 10467 coal->coal_bufs_irq = 2; 10468 coal->idle_thresh = 50; 10469 coal->bufs_per_record = 2; 10470 coal->budget = 64; /* NAPI budget */ 10471 10472 coal = &bp->tx_coal; 10473 coal->coal_ticks = 28; 10474 coal->coal_bufs = 30; 10475 coal->coal_ticks_irq = 2; 10476 coal->coal_bufs_irq = 2; 10477 coal->bufs_per_record = 1; 10478 10479 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 10480 } 10481 10482 static void bnxt_alloc_fw_health(struct bnxt *bp) 10483 { 10484 if (bp->fw_health) 10485 return; 10486 10487 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 10488 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 10489 return; 10490 10491 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 10492 if (!bp->fw_health) { 10493 netdev_warn(bp->dev, "Failed to allocate fw_health\n"); 10494 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 10495 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 10496 } 10497 } 10498 10499 static int bnxt_fw_init_one_p1(struct bnxt *bp) 10500 { 10501 int rc; 10502 10503 bp->fw_cap = 0; 10504 rc = bnxt_hwrm_ver_get(bp); 10505 if (rc) 10506 return rc; 10507 10508 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { 10509 rc = bnxt_alloc_kong_hwrm_resources(bp); 10510 if (rc) 10511 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; 10512 } 10513 10514 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 10515 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { 10516 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 10517 if (rc) 10518 return rc; 10519 } 10520 rc = bnxt_hwrm_func_reset(bp); 10521 if (rc) 10522 return -ENODEV; 10523 10524 bnxt_hwrm_fw_set_time(bp); 10525 return 0; 10526 } 10527 10528 static int bnxt_fw_init_one_p2(struct bnxt *bp) 10529 { 10530 int rc; 10531 10532 /* Get the MAX capabilities for this function */ 10533 rc = bnxt_hwrm_func_qcaps(bp); 10534 if (rc) { 10535 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 10536 rc); 10537 return -ENODEV; 10538 } 10539 10540 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 10541 if (rc) 10542 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 10543 rc); 10544 10545 bnxt_alloc_fw_health(bp); 10546 rc = bnxt_hwrm_error_recovery_qcfg(bp); 10547 if (rc) 10548 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 10549 rc); 10550 10551 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 10552 if (rc) 10553 return -ENODEV; 10554 10555 bnxt_hwrm_func_qcfg(bp); 10556 bnxt_hwrm_vnic_qcaps(bp); 10557 bnxt_hwrm_port_led_qcaps(bp); 10558 bnxt_ethtool_init(bp); 10559 bnxt_dcb_init(bp); 10560 return 0; 10561 } 10562 10563 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 10564 { 10565 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; 10566 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 10567 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 10568 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 10569 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 10570 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 10571 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 10572 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 10573 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 10574 } 10575 } 10576 10577 static void bnxt_set_dflt_rfs(struct bnxt *bp) 10578 { 10579 struct net_device *dev = bp->dev; 10580 10581 dev->hw_features &= ~NETIF_F_NTUPLE; 10582 dev->features &= ~NETIF_F_NTUPLE; 10583 bp->flags &= ~BNXT_FLAG_RFS; 10584 if (bnxt_rfs_supported(bp)) { 10585 dev->hw_features |= NETIF_F_NTUPLE; 10586 if (bnxt_rfs_capable(bp)) { 10587 bp->flags |= BNXT_FLAG_RFS; 10588 dev->features |= NETIF_F_NTUPLE; 10589 } 10590 } 10591 } 10592 10593 static void bnxt_fw_init_one_p3(struct bnxt *bp) 10594 { 10595 struct pci_dev *pdev = bp->pdev; 10596 10597 bnxt_set_dflt_rss_hash_type(bp); 10598 bnxt_set_dflt_rfs(bp); 10599 10600 bnxt_get_wol_settings(bp); 10601 if (bp->flags & BNXT_FLAG_WOL_CAP) 10602 device_set_wakeup_enable(&pdev->dev, bp->wol); 10603 else 10604 device_set_wakeup_capable(&pdev->dev, false); 10605 10606 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 10607 bnxt_hwrm_coal_params_qcaps(bp); 10608 } 10609 10610 static int bnxt_fw_init_one(struct bnxt *bp) 10611 { 10612 int rc; 10613 10614 rc = bnxt_fw_init_one_p1(bp); 10615 if (rc) { 10616 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 10617 return rc; 10618 } 10619 rc = bnxt_fw_init_one_p2(bp); 10620 if (rc) { 10621 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 10622 return rc; 10623 } 10624 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 10625 if (rc) 10626 return rc; 10627 10628 /* In case fw capabilities have changed, destroy the unneeded 10629 * reporters and create newly capable ones. 10630 */ 10631 bnxt_dl_fw_reporters_destroy(bp, false); 10632 bnxt_dl_fw_reporters_create(bp); 10633 bnxt_fw_init_one_p3(bp); 10634 return 0; 10635 } 10636 10637 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 10638 { 10639 struct bnxt_fw_health *fw_health = bp->fw_health; 10640 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 10641 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 10642 u32 reg_type, reg_off, delay_msecs; 10643 10644 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 10645 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 10646 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 10647 switch (reg_type) { 10648 case BNXT_FW_HEALTH_REG_TYPE_CFG: 10649 pci_write_config_dword(bp->pdev, reg_off, val); 10650 break; 10651 case BNXT_FW_HEALTH_REG_TYPE_GRC: 10652 writel(reg_off & BNXT_GRC_BASE_MASK, 10653 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 10654 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 10655 /* fall through */ 10656 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 10657 writel(val, bp->bar0 + reg_off); 10658 break; 10659 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 10660 writel(val, bp->bar1 + reg_off); 10661 break; 10662 } 10663 if (delay_msecs) { 10664 pci_read_config_dword(bp->pdev, 0, &val); 10665 msleep(delay_msecs); 10666 } 10667 } 10668 10669 static void bnxt_reset_all(struct bnxt *bp) 10670 { 10671 struct bnxt_fw_health *fw_health = bp->fw_health; 10672 int i, rc; 10673 10674 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10675 #ifdef CONFIG_TEE_BNXT_FW 10676 rc = tee_bnxt_fw_load(); 10677 if (rc) 10678 netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc); 10679 bp->fw_reset_timestamp = jiffies; 10680 #endif 10681 return; 10682 } 10683 10684 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 10685 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 10686 bnxt_fw_reset_writel(bp, i); 10687 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 10688 struct hwrm_fw_reset_input req = {0}; 10689 10690 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 10691 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 10692 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 10693 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 10694 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 10695 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10696 if (rc) 10697 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 10698 } 10699 bp->fw_reset_timestamp = jiffies; 10700 } 10701 10702 static void bnxt_fw_reset_task(struct work_struct *work) 10703 { 10704 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 10705 int rc; 10706 10707 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10708 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 10709 return; 10710 } 10711 10712 switch (bp->fw_reset_state) { 10713 case BNXT_FW_RESET_STATE_POLL_VF: { 10714 int n = bnxt_get_registered_vfs(bp); 10715 int tmo; 10716 10717 if (n < 0) { 10718 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 10719 n, jiffies_to_msecs(jiffies - 10720 bp->fw_reset_timestamp)); 10721 goto fw_reset_abort; 10722 } else if (n > 0) { 10723 if (time_after(jiffies, bp->fw_reset_timestamp + 10724 (bp->fw_reset_max_dsecs * HZ / 10))) { 10725 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10726 bp->fw_reset_state = 0; 10727 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 10728 n); 10729 return; 10730 } 10731 bnxt_queue_fw_reset_work(bp, HZ / 10); 10732 return; 10733 } 10734 bp->fw_reset_timestamp = jiffies; 10735 rtnl_lock(); 10736 bnxt_fw_reset_close(bp); 10737 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10738 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10739 tmo = HZ / 10; 10740 } else { 10741 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10742 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10743 } 10744 rtnl_unlock(); 10745 bnxt_queue_fw_reset_work(bp, tmo); 10746 return; 10747 } 10748 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 10749 u32 val; 10750 10751 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 10752 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 10753 !time_after(jiffies, bp->fw_reset_timestamp + 10754 (bp->fw_reset_max_dsecs * HZ / 10))) { 10755 bnxt_queue_fw_reset_work(bp, HZ / 5); 10756 return; 10757 } 10758 10759 if (!bp->fw_health->master) { 10760 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 10761 10762 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10763 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10764 return; 10765 } 10766 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10767 } 10768 /* fall through */ 10769 case BNXT_FW_RESET_STATE_RESET_FW: 10770 bnxt_reset_all(bp); 10771 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10772 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 10773 return; 10774 case BNXT_FW_RESET_STATE_ENABLE_DEV: 10775 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 10776 u32 val; 10777 10778 val = bnxt_fw_health_readl(bp, 10779 BNXT_FW_RESET_INPROG_REG); 10780 if (val) 10781 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", 10782 val); 10783 } 10784 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10785 if (pci_enable_device(bp->pdev)) { 10786 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 10787 goto fw_reset_abort; 10788 } 10789 pci_set_master(bp->pdev); 10790 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 10791 /* fall through */ 10792 case BNXT_FW_RESET_STATE_POLL_FW: 10793 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 10794 rc = __bnxt_hwrm_ver_get(bp, true); 10795 if (rc) { 10796 if (time_after(jiffies, bp->fw_reset_timestamp + 10797 (bp->fw_reset_max_dsecs * HZ / 10))) { 10798 netdev_err(bp->dev, "Firmware reset aborted\n"); 10799 goto fw_reset_abort; 10800 } 10801 bnxt_queue_fw_reset_work(bp, HZ / 5); 10802 return; 10803 } 10804 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 10805 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 10806 /* fall through */ 10807 case BNXT_FW_RESET_STATE_OPENING: 10808 while (!rtnl_trylock()) { 10809 bnxt_queue_fw_reset_work(bp, HZ / 10); 10810 return; 10811 } 10812 rc = bnxt_open(bp->dev); 10813 if (rc) { 10814 netdev_err(bp->dev, "bnxt_open_nic() failed\n"); 10815 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10816 dev_close(bp->dev); 10817 } 10818 10819 bp->fw_reset_state = 0; 10820 /* Make sure fw_reset_state is 0 before clearing the flag */ 10821 smp_mb__before_atomic(); 10822 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10823 bnxt_ulp_start(bp, rc); 10824 if (!rc) 10825 bnxt_reenable_sriov(bp); 10826 bnxt_dl_health_recovery_done(bp); 10827 bnxt_dl_health_status_update(bp, true); 10828 rtnl_unlock(); 10829 break; 10830 } 10831 return; 10832 10833 fw_reset_abort: 10834 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10835 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 10836 bnxt_dl_health_status_update(bp, false); 10837 bp->fw_reset_state = 0; 10838 rtnl_lock(); 10839 dev_close(bp->dev); 10840 rtnl_unlock(); 10841 } 10842 10843 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 10844 { 10845 int rc; 10846 struct bnxt *bp = netdev_priv(dev); 10847 10848 SET_NETDEV_DEV(dev, &pdev->dev); 10849 10850 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 10851 rc = pci_enable_device(pdev); 10852 if (rc) { 10853 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 10854 goto init_err; 10855 } 10856 10857 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 10858 dev_err(&pdev->dev, 10859 "Cannot find PCI device base address, aborting\n"); 10860 rc = -ENODEV; 10861 goto init_err_disable; 10862 } 10863 10864 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 10865 if (rc) { 10866 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 10867 goto init_err_disable; 10868 } 10869 10870 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 10871 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 10872 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 10873 goto init_err_disable; 10874 } 10875 10876 pci_set_master(pdev); 10877 10878 bp->dev = dev; 10879 bp->pdev = pdev; 10880 10881 bp->bar0 = pci_ioremap_bar(pdev, 0); 10882 if (!bp->bar0) { 10883 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 10884 rc = -ENOMEM; 10885 goto init_err_release; 10886 } 10887 10888 bp->bar1 = pci_ioremap_bar(pdev, 2); 10889 if (!bp->bar1) { 10890 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 10891 rc = -ENOMEM; 10892 goto init_err_release; 10893 } 10894 10895 bp->bar2 = pci_ioremap_bar(pdev, 4); 10896 if (!bp->bar2) { 10897 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 10898 rc = -ENOMEM; 10899 goto init_err_release; 10900 } 10901 10902 pci_enable_pcie_error_reporting(pdev); 10903 10904 INIT_WORK(&bp->sp_task, bnxt_sp_task); 10905 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 10906 10907 spin_lock_init(&bp->ntp_fltr_lock); 10908 #if BITS_PER_LONG == 32 10909 spin_lock_init(&bp->db_lock); 10910 #endif 10911 10912 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 10913 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 10914 10915 bnxt_init_dflt_coal(bp); 10916 10917 timer_setup(&bp->timer, bnxt_timer, 0); 10918 bp->current_interval = BNXT_TIMER_INTERVAL; 10919 10920 clear_bit(BNXT_STATE_OPEN, &bp->state); 10921 return 0; 10922 10923 init_err_release: 10924 bnxt_unmap_bars(bp, pdev); 10925 pci_release_regions(pdev); 10926 10927 init_err_disable: 10928 pci_disable_device(pdev); 10929 10930 init_err: 10931 return rc; 10932 } 10933 10934 /* rtnl_lock held */ 10935 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 10936 { 10937 struct sockaddr *addr = p; 10938 struct bnxt *bp = netdev_priv(dev); 10939 int rc = 0; 10940 10941 if (!is_valid_ether_addr(addr->sa_data)) 10942 return -EADDRNOTAVAIL; 10943 10944 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 10945 return 0; 10946 10947 rc = bnxt_approve_mac(bp, addr->sa_data, true); 10948 if (rc) 10949 return rc; 10950 10951 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 10952 if (netif_running(dev)) { 10953 bnxt_close_nic(bp, false, false); 10954 rc = bnxt_open_nic(bp, false, false); 10955 } 10956 10957 return rc; 10958 } 10959 10960 /* rtnl_lock held */ 10961 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 10962 { 10963 struct bnxt *bp = netdev_priv(dev); 10964 10965 if (netif_running(dev)) 10966 bnxt_close_nic(bp, true, false); 10967 10968 dev->mtu = new_mtu; 10969 bnxt_set_ring_params(bp); 10970 10971 if (netif_running(dev)) 10972 return bnxt_open_nic(bp, true, false); 10973 10974 return 0; 10975 } 10976 10977 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 10978 { 10979 struct bnxt *bp = netdev_priv(dev); 10980 bool sh = false; 10981 int rc; 10982 10983 if (tc > bp->max_tc) { 10984 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 10985 tc, bp->max_tc); 10986 return -EINVAL; 10987 } 10988 10989 if (netdev_get_num_tc(dev) == tc) 10990 return 0; 10991 10992 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 10993 sh = true; 10994 10995 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 10996 sh, tc, bp->tx_nr_rings_xdp); 10997 if (rc) 10998 return rc; 10999 11000 /* Needs to close the device and do hw resource re-allocations */ 11001 if (netif_running(bp->dev)) 11002 bnxt_close_nic(bp, true, false); 11003 11004 if (tc) { 11005 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 11006 netdev_set_num_tc(dev, tc); 11007 } else { 11008 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11009 netdev_reset_tc(dev); 11010 } 11011 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 11012 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 11013 bp->tx_nr_rings + bp->rx_nr_rings; 11014 11015 if (netif_running(bp->dev)) 11016 return bnxt_open_nic(bp, true, false); 11017 11018 return 0; 11019 } 11020 11021 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 11022 void *cb_priv) 11023 { 11024 struct bnxt *bp = cb_priv; 11025 11026 if (!bnxt_tc_flower_enabled(bp) || 11027 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 11028 return -EOPNOTSUPP; 11029 11030 switch (type) { 11031 case TC_SETUP_CLSFLOWER: 11032 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 11033 default: 11034 return -EOPNOTSUPP; 11035 } 11036 } 11037 11038 LIST_HEAD(bnxt_block_cb_list); 11039 11040 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 11041 void *type_data) 11042 { 11043 struct bnxt *bp = netdev_priv(dev); 11044 11045 switch (type) { 11046 case TC_SETUP_BLOCK: 11047 return flow_block_cb_setup_simple(type_data, 11048 &bnxt_block_cb_list, 11049 bnxt_setup_tc_block_cb, 11050 bp, bp, true); 11051 case TC_SETUP_QDISC_MQPRIO: { 11052 struct tc_mqprio_qopt *mqprio = type_data; 11053 11054 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 11055 11056 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 11057 } 11058 default: 11059 return -EOPNOTSUPP; 11060 } 11061 } 11062 11063 #ifdef CONFIG_RFS_ACCEL 11064 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 11065 struct bnxt_ntuple_filter *f2) 11066 { 11067 struct flow_keys *keys1 = &f1->fkeys; 11068 struct flow_keys *keys2 = &f2->fkeys; 11069 11070 if (keys1->basic.n_proto != keys2->basic.n_proto || 11071 keys1->basic.ip_proto != keys2->basic.ip_proto) 11072 return false; 11073 11074 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 11075 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 11076 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) 11077 return false; 11078 } else { 11079 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, 11080 sizeof(keys1->addrs.v6addrs.src)) || 11081 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, 11082 sizeof(keys1->addrs.v6addrs.dst))) 11083 return false; 11084 } 11085 11086 if (keys1->ports.ports == keys2->ports.ports && 11087 keys1->control.flags == keys2->control.flags && 11088 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 11089 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 11090 return true; 11091 11092 return false; 11093 } 11094 11095 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 11096 u16 rxq_index, u32 flow_id) 11097 { 11098 struct bnxt *bp = netdev_priv(dev); 11099 struct bnxt_ntuple_filter *fltr, *new_fltr; 11100 struct flow_keys *fkeys; 11101 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 11102 int rc = 0, idx, bit_id, l2_idx = 0; 11103 struct hlist_head *head; 11104 u32 flags; 11105 11106 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 11107 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11108 int off = 0, j; 11109 11110 netif_addr_lock_bh(dev); 11111 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 11112 if (ether_addr_equal(eth->h_dest, 11113 vnic->uc_list + off)) { 11114 l2_idx = j + 1; 11115 break; 11116 } 11117 } 11118 netif_addr_unlock_bh(dev); 11119 if (!l2_idx) 11120 return -EINVAL; 11121 } 11122 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 11123 if (!new_fltr) 11124 return -ENOMEM; 11125 11126 fkeys = &new_fltr->fkeys; 11127 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 11128 rc = -EPROTONOSUPPORT; 11129 goto err_free; 11130 } 11131 11132 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 11133 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 11134 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 11135 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 11136 rc = -EPROTONOSUPPORT; 11137 goto err_free; 11138 } 11139 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 11140 bp->hwrm_spec_code < 0x10601) { 11141 rc = -EPROTONOSUPPORT; 11142 goto err_free; 11143 } 11144 flags = fkeys->control.flags; 11145 if (((flags & FLOW_DIS_ENCAPSULATION) && 11146 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 11147 rc = -EPROTONOSUPPORT; 11148 goto err_free; 11149 } 11150 11151 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 11152 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 11153 11154 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 11155 head = &bp->ntp_fltr_hash_tbl[idx]; 11156 rcu_read_lock(); 11157 hlist_for_each_entry_rcu(fltr, head, hash) { 11158 if (bnxt_fltr_match(fltr, new_fltr)) { 11159 rcu_read_unlock(); 11160 rc = 0; 11161 goto err_free; 11162 } 11163 } 11164 rcu_read_unlock(); 11165 11166 spin_lock_bh(&bp->ntp_fltr_lock); 11167 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 11168 BNXT_NTP_FLTR_MAX_FLTR, 0); 11169 if (bit_id < 0) { 11170 spin_unlock_bh(&bp->ntp_fltr_lock); 11171 rc = -ENOMEM; 11172 goto err_free; 11173 } 11174 11175 new_fltr->sw_id = (u16)bit_id; 11176 new_fltr->flow_id = flow_id; 11177 new_fltr->l2_fltr_idx = l2_idx; 11178 new_fltr->rxq = rxq_index; 11179 hlist_add_head_rcu(&new_fltr->hash, head); 11180 bp->ntp_fltr_count++; 11181 spin_unlock_bh(&bp->ntp_fltr_lock); 11182 11183 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 11184 bnxt_queue_sp_work(bp); 11185 11186 return new_fltr->sw_id; 11187 11188 err_free: 11189 kfree(new_fltr); 11190 return rc; 11191 } 11192 11193 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11194 { 11195 int i; 11196 11197 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 11198 struct hlist_head *head; 11199 struct hlist_node *tmp; 11200 struct bnxt_ntuple_filter *fltr; 11201 int rc; 11202 11203 head = &bp->ntp_fltr_hash_tbl[i]; 11204 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 11205 bool del = false; 11206 11207 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 11208 if (rps_may_expire_flow(bp->dev, fltr->rxq, 11209 fltr->flow_id, 11210 fltr->sw_id)) { 11211 bnxt_hwrm_cfa_ntuple_filter_free(bp, 11212 fltr); 11213 del = true; 11214 } 11215 } else { 11216 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 11217 fltr); 11218 if (rc) 11219 del = true; 11220 else 11221 set_bit(BNXT_FLTR_VALID, &fltr->state); 11222 } 11223 11224 if (del) { 11225 spin_lock_bh(&bp->ntp_fltr_lock); 11226 hlist_del_rcu(&fltr->hash); 11227 bp->ntp_fltr_count--; 11228 spin_unlock_bh(&bp->ntp_fltr_lock); 11229 synchronize_rcu(); 11230 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 11231 kfree(fltr); 11232 } 11233 } 11234 } 11235 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 11236 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 11237 } 11238 11239 #else 11240 11241 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11242 { 11243 } 11244 11245 #endif /* CONFIG_RFS_ACCEL */ 11246 11247 static void bnxt_udp_tunnel_add(struct net_device *dev, 11248 struct udp_tunnel_info *ti) 11249 { 11250 struct bnxt *bp = netdev_priv(dev); 11251 11252 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11253 return; 11254 11255 if (!netif_running(dev)) 11256 return; 11257 11258 switch (ti->type) { 11259 case UDP_TUNNEL_TYPE_VXLAN: 11260 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 11261 return; 11262 11263 bp->vxlan_port_cnt++; 11264 if (bp->vxlan_port_cnt == 1) { 11265 bp->vxlan_port = ti->port; 11266 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 11267 bnxt_queue_sp_work(bp); 11268 } 11269 break; 11270 case UDP_TUNNEL_TYPE_GENEVE: 11271 if (bp->nge_port_cnt && bp->nge_port != ti->port) 11272 return; 11273 11274 bp->nge_port_cnt++; 11275 if (bp->nge_port_cnt == 1) { 11276 bp->nge_port = ti->port; 11277 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 11278 } 11279 break; 11280 default: 11281 return; 11282 } 11283 11284 bnxt_queue_sp_work(bp); 11285 } 11286 11287 static void bnxt_udp_tunnel_del(struct net_device *dev, 11288 struct udp_tunnel_info *ti) 11289 { 11290 struct bnxt *bp = netdev_priv(dev); 11291 11292 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11293 return; 11294 11295 if (!netif_running(dev)) 11296 return; 11297 11298 switch (ti->type) { 11299 case UDP_TUNNEL_TYPE_VXLAN: 11300 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 11301 return; 11302 bp->vxlan_port_cnt--; 11303 11304 if (bp->vxlan_port_cnt != 0) 11305 return; 11306 11307 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 11308 break; 11309 case UDP_TUNNEL_TYPE_GENEVE: 11310 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 11311 return; 11312 bp->nge_port_cnt--; 11313 11314 if (bp->nge_port_cnt != 0) 11315 return; 11316 11317 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 11318 break; 11319 default: 11320 return; 11321 } 11322 11323 bnxt_queue_sp_work(bp); 11324 } 11325 11326 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 11327 struct net_device *dev, u32 filter_mask, 11328 int nlflags) 11329 { 11330 struct bnxt *bp = netdev_priv(dev); 11331 11332 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 11333 nlflags, filter_mask, NULL); 11334 } 11335 11336 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 11337 u16 flags, struct netlink_ext_ack *extack) 11338 { 11339 struct bnxt *bp = netdev_priv(dev); 11340 struct nlattr *attr, *br_spec; 11341 int rem, rc = 0; 11342 11343 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 11344 return -EOPNOTSUPP; 11345 11346 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 11347 if (!br_spec) 11348 return -EINVAL; 11349 11350 nla_for_each_nested(attr, br_spec, rem) { 11351 u16 mode; 11352 11353 if (nla_type(attr) != IFLA_BRIDGE_MODE) 11354 continue; 11355 11356 if (nla_len(attr) < sizeof(mode)) 11357 return -EINVAL; 11358 11359 mode = nla_get_u16(attr); 11360 if (mode == bp->br_mode) 11361 break; 11362 11363 rc = bnxt_hwrm_set_br_mode(bp, mode); 11364 if (!rc) 11365 bp->br_mode = mode; 11366 break; 11367 } 11368 return rc; 11369 } 11370 11371 int bnxt_get_port_parent_id(struct net_device *dev, 11372 struct netdev_phys_item_id *ppid) 11373 { 11374 struct bnxt *bp = netdev_priv(dev); 11375 11376 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 11377 return -EOPNOTSUPP; 11378 11379 /* The PF and it's VF-reps only support the switchdev framework */ 11380 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 11381 return -EOPNOTSUPP; 11382 11383 ppid->id_len = sizeof(bp->dsn); 11384 memcpy(ppid->id, bp->dsn, ppid->id_len); 11385 11386 return 0; 11387 } 11388 11389 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) 11390 { 11391 struct bnxt *bp = netdev_priv(dev); 11392 11393 return &bp->dl_port; 11394 } 11395 11396 static const struct net_device_ops bnxt_netdev_ops = { 11397 .ndo_open = bnxt_open, 11398 .ndo_start_xmit = bnxt_start_xmit, 11399 .ndo_stop = bnxt_close, 11400 .ndo_get_stats64 = bnxt_get_stats64, 11401 .ndo_set_rx_mode = bnxt_set_rx_mode, 11402 .ndo_do_ioctl = bnxt_ioctl, 11403 .ndo_validate_addr = eth_validate_addr, 11404 .ndo_set_mac_address = bnxt_change_mac_addr, 11405 .ndo_change_mtu = bnxt_change_mtu, 11406 .ndo_fix_features = bnxt_fix_features, 11407 .ndo_set_features = bnxt_set_features, 11408 .ndo_tx_timeout = bnxt_tx_timeout, 11409 #ifdef CONFIG_BNXT_SRIOV 11410 .ndo_get_vf_config = bnxt_get_vf_config, 11411 .ndo_set_vf_mac = bnxt_set_vf_mac, 11412 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 11413 .ndo_set_vf_rate = bnxt_set_vf_bw, 11414 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 11415 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 11416 .ndo_set_vf_trust = bnxt_set_vf_trust, 11417 #endif 11418 .ndo_setup_tc = bnxt_setup_tc, 11419 #ifdef CONFIG_RFS_ACCEL 11420 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 11421 #endif 11422 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 11423 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 11424 .ndo_bpf = bnxt_xdp, 11425 .ndo_xdp_xmit = bnxt_xdp_xmit, 11426 .ndo_bridge_getlink = bnxt_bridge_getlink, 11427 .ndo_bridge_setlink = bnxt_bridge_setlink, 11428 .ndo_get_devlink_port = bnxt_get_devlink_port, 11429 }; 11430 11431 static void bnxt_remove_one(struct pci_dev *pdev) 11432 { 11433 struct net_device *dev = pci_get_drvdata(pdev); 11434 struct bnxt *bp = netdev_priv(dev); 11435 11436 if (BNXT_PF(bp)) 11437 bnxt_sriov_disable(bp); 11438 11439 bnxt_dl_fw_reporters_destroy(bp, true); 11440 if (BNXT_PF(bp)) 11441 devlink_port_type_clear(&bp->dl_port); 11442 pci_disable_pcie_error_reporting(pdev); 11443 unregister_netdev(dev); 11444 bnxt_dl_unregister(bp); 11445 bnxt_shutdown_tc(bp); 11446 bnxt_cancel_sp_work(bp); 11447 bp->sp_event = 0; 11448 11449 bnxt_clear_int_mode(bp); 11450 bnxt_hwrm_func_drv_unrgtr(bp); 11451 bnxt_free_hwrm_resources(bp); 11452 bnxt_free_hwrm_short_cmd_req(bp); 11453 bnxt_ethtool_free(bp); 11454 bnxt_dcb_free(bp); 11455 kfree(bp->edev); 11456 bp->edev = NULL; 11457 kfree(bp->fw_health); 11458 bp->fw_health = NULL; 11459 bnxt_cleanup_pci(bp); 11460 bnxt_free_ctx_mem(bp); 11461 kfree(bp->ctx); 11462 bp->ctx = NULL; 11463 bnxt_free_port_stats(bp); 11464 free_netdev(dev); 11465 } 11466 11467 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 11468 { 11469 int rc = 0; 11470 struct bnxt_link_info *link_info = &bp->link_info; 11471 11472 rc = bnxt_hwrm_phy_qcaps(bp); 11473 if (rc) { 11474 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 11475 rc); 11476 return rc; 11477 } 11478 if (!fw_dflt) 11479 return 0; 11480 11481 rc = bnxt_update_link(bp, false); 11482 if (rc) { 11483 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 11484 rc); 11485 return rc; 11486 } 11487 11488 /* Older firmware does not have supported_auto_speeds, so assume 11489 * that all supported speeds can be autonegotiated. 11490 */ 11491 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 11492 link_info->support_auto_speeds = link_info->support_speeds; 11493 11494 bnxt_init_ethtool_link_settings(bp); 11495 return 0; 11496 } 11497 11498 static int bnxt_get_max_irq(struct pci_dev *pdev) 11499 { 11500 u16 ctrl; 11501 11502 if (!pdev->msix_cap) 11503 return 1; 11504 11505 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 11506 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 11507 } 11508 11509 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11510 int *max_cp) 11511 { 11512 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11513 int max_ring_grps = 0, max_irq; 11514 11515 *max_tx = hw_resc->max_tx_rings; 11516 *max_rx = hw_resc->max_rx_rings; 11517 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 11518 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 11519 bnxt_get_ulp_msix_num(bp), 11520 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 11521 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 11522 *max_cp = min_t(int, *max_cp, max_irq); 11523 max_ring_grps = hw_resc->max_hw_ring_grps; 11524 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 11525 *max_cp -= 1; 11526 *max_rx -= 2; 11527 } 11528 if (bp->flags & BNXT_FLAG_AGG_RINGS) 11529 *max_rx >>= 1; 11530 if (bp->flags & BNXT_FLAG_CHIP_P5) { 11531 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 11532 /* On P5 chips, max_cp output param should be available NQs */ 11533 *max_cp = max_irq; 11534 } 11535 *max_rx = min_t(int, *max_rx, max_ring_grps); 11536 } 11537 11538 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 11539 { 11540 int rx, tx, cp; 11541 11542 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 11543 *max_rx = rx; 11544 *max_tx = tx; 11545 if (!rx || !tx || !cp) 11546 return -ENOMEM; 11547 11548 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 11549 } 11550 11551 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11552 bool shared) 11553 { 11554 int rc; 11555 11556 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11557 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 11558 /* Not enough rings, try disabling agg rings. */ 11559 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 11560 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11561 if (rc) { 11562 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 11563 bp->flags |= BNXT_FLAG_AGG_RINGS; 11564 return rc; 11565 } 11566 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 11567 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11568 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11569 bnxt_set_ring_params(bp); 11570 } 11571 11572 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 11573 int max_cp, max_stat, max_irq; 11574 11575 /* Reserve minimum resources for RoCE */ 11576 max_cp = bnxt_get_max_func_cp_rings(bp); 11577 max_stat = bnxt_get_max_func_stat_ctxs(bp); 11578 max_irq = bnxt_get_max_func_irqs(bp); 11579 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 11580 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 11581 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 11582 return 0; 11583 11584 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 11585 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 11586 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 11587 max_cp = min_t(int, max_cp, max_irq); 11588 max_cp = min_t(int, max_cp, max_stat); 11589 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 11590 if (rc) 11591 rc = 0; 11592 } 11593 return rc; 11594 } 11595 11596 /* In initial default shared ring setting, each shared ring must have a 11597 * RX/TX ring pair. 11598 */ 11599 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 11600 { 11601 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 11602 bp->rx_nr_rings = bp->cp_nr_rings; 11603 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 11604 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11605 } 11606 11607 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 11608 { 11609 int dflt_rings, max_rx_rings, max_tx_rings, rc; 11610 11611 if (!bnxt_can_reserve_rings(bp)) 11612 return 0; 11613 11614 if (sh) 11615 bp->flags |= BNXT_FLAG_SHARED_RINGS; 11616 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 11617 /* Reduce default rings on multi-port cards so that total default 11618 * rings do not exceed CPU count. 11619 */ 11620 if (bp->port_count > 1) { 11621 int max_rings = 11622 max_t(int, num_online_cpus() / bp->port_count, 1); 11623 11624 dflt_rings = min_t(int, dflt_rings, max_rings); 11625 } 11626 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 11627 if (rc) 11628 return rc; 11629 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 11630 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 11631 if (sh) 11632 bnxt_trim_dflt_sh_rings(bp); 11633 else 11634 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 11635 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11636 11637 rc = __bnxt_reserve_rings(bp); 11638 if (rc) 11639 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 11640 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11641 if (sh) 11642 bnxt_trim_dflt_sh_rings(bp); 11643 11644 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 11645 if (bnxt_need_reserve_rings(bp)) { 11646 rc = __bnxt_reserve_rings(bp); 11647 if (rc) 11648 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 11649 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11650 } 11651 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11652 bp->rx_nr_rings++; 11653 bp->cp_nr_rings++; 11654 } 11655 if (rc) { 11656 bp->tx_nr_rings = 0; 11657 bp->rx_nr_rings = 0; 11658 } 11659 return rc; 11660 } 11661 11662 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 11663 { 11664 int rc; 11665 11666 if (bp->tx_nr_rings) 11667 return 0; 11668 11669 bnxt_ulp_irq_stop(bp); 11670 bnxt_clear_int_mode(bp); 11671 rc = bnxt_set_dflt_rings(bp, true); 11672 if (rc) { 11673 netdev_err(bp->dev, "Not enough rings available.\n"); 11674 goto init_dflt_ring_err; 11675 } 11676 rc = bnxt_init_int_mode(bp); 11677 if (rc) 11678 goto init_dflt_ring_err; 11679 11680 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11681 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 11682 bp->flags |= BNXT_FLAG_RFS; 11683 bp->dev->features |= NETIF_F_NTUPLE; 11684 } 11685 init_dflt_ring_err: 11686 bnxt_ulp_irq_restart(bp, rc); 11687 return rc; 11688 } 11689 11690 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 11691 { 11692 int rc; 11693 11694 ASSERT_RTNL(); 11695 bnxt_hwrm_func_qcaps(bp); 11696 11697 if (netif_running(bp->dev)) 11698 __bnxt_close_nic(bp, true, false); 11699 11700 bnxt_ulp_irq_stop(bp); 11701 bnxt_clear_int_mode(bp); 11702 rc = bnxt_init_int_mode(bp); 11703 bnxt_ulp_irq_restart(bp, rc); 11704 11705 if (netif_running(bp->dev)) { 11706 if (rc) 11707 dev_close(bp->dev); 11708 else 11709 rc = bnxt_open_nic(bp, true, false); 11710 } 11711 11712 return rc; 11713 } 11714 11715 static int bnxt_init_mac_addr(struct bnxt *bp) 11716 { 11717 int rc = 0; 11718 11719 if (BNXT_PF(bp)) { 11720 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 11721 } else { 11722 #ifdef CONFIG_BNXT_SRIOV 11723 struct bnxt_vf_info *vf = &bp->vf; 11724 bool strict_approval = true; 11725 11726 if (is_valid_ether_addr(vf->mac_addr)) { 11727 /* overwrite netdev dev_addr with admin VF MAC */ 11728 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 11729 /* Older PF driver or firmware may not approve this 11730 * correctly. 11731 */ 11732 strict_approval = false; 11733 } else { 11734 eth_hw_addr_random(bp->dev); 11735 } 11736 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 11737 #endif 11738 } 11739 return rc; 11740 } 11741 11742 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 11743 { 11744 struct pci_dev *pdev = bp->pdev; 11745 u64 qword; 11746 11747 qword = pci_get_dsn(pdev); 11748 if (!qword) { 11749 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 11750 return -EOPNOTSUPP; 11751 } 11752 11753 put_unaligned_le64(qword, dsn); 11754 11755 bp->flags |= BNXT_FLAG_DSN_VALID; 11756 return 0; 11757 } 11758 11759 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 11760 { 11761 struct net_device *dev; 11762 struct bnxt *bp; 11763 int rc, max_irqs; 11764 11765 if (pci_is_bridge(pdev)) 11766 return -ENODEV; 11767 11768 /* Clear any pending DMA transactions from crash kernel 11769 * while loading driver in capture kernel. 11770 */ 11771 if (is_kdump_kernel()) { 11772 pci_clear_master(pdev); 11773 pcie_flr(pdev); 11774 } 11775 11776 max_irqs = bnxt_get_max_irq(pdev); 11777 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 11778 if (!dev) 11779 return -ENOMEM; 11780 11781 bp = netdev_priv(dev); 11782 bnxt_set_max_func_irqs(bp, max_irqs); 11783 11784 if (bnxt_vf_pciid(ent->driver_data)) 11785 bp->flags |= BNXT_FLAG_VF; 11786 11787 if (pdev->msix_cap) 11788 bp->flags |= BNXT_FLAG_MSIX_CAP; 11789 11790 rc = bnxt_init_board(pdev, dev); 11791 if (rc < 0) 11792 goto init_err_free; 11793 11794 dev->netdev_ops = &bnxt_netdev_ops; 11795 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 11796 dev->ethtool_ops = &bnxt_ethtool_ops; 11797 pci_set_drvdata(pdev, dev); 11798 11799 rc = bnxt_alloc_hwrm_resources(bp); 11800 if (rc) 11801 goto init_err_pci_clean; 11802 11803 mutex_init(&bp->hwrm_cmd_lock); 11804 mutex_init(&bp->link_lock); 11805 11806 rc = bnxt_fw_init_one_p1(bp); 11807 if (rc) 11808 goto init_err_pci_clean; 11809 11810 if (BNXT_CHIP_P5(bp)) 11811 bp->flags |= BNXT_FLAG_CHIP_P5; 11812 11813 rc = bnxt_fw_init_one_p2(bp); 11814 if (rc) 11815 goto init_err_pci_clean; 11816 11817 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11818 NETIF_F_TSO | NETIF_F_TSO6 | 11819 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11820 NETIF_F_GSO_IPXIP4 | 11821 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11822 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 11823 NETIF_F_RXCSUM | NETIF_F_GRO; 11824 11825 if (BNXT_SUPPORTS_TPA(bp)) 11826 dev->hw_features |= NETIF_F_LRO; 11827 11828 dev->hw_enc_features = 11829 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11830 NETIF_F_TSO | NETIF_F_TSO6 | 11831 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11832 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11833 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 11834 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 11835 NETIF_F_GSO_GRE_CSUM; 11836 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 11837 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 11838 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 11839 if (BNXT_SUPPORTS_TPA(bp)) 11840 dev->hw_features |= NETIF_F_GRO_HW; 11841 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 11842 if (dev->features & NETIF_F_GRO_HW) 11843 dev->features &= ~NETIF_F_LRO; 11844 dev->priv_flags |= IFF_UNICAST_FLT; 11845 11846 #ifdef CONFIG_BNXT_SRIOV 11847 init_waitqueue_head(&bp->sriov_cfg_wait); 11848 mutex_init(&bp->sriov_lock); 11849 #endif 11850 if (BNXT_SUPPORTS_TPA(bp)) { 11851 bp->gro_func = bnxt_gro_func_5730x; 11852 if (BNXT_CHIP_P4(bp)) 11853 bp->gro_func = bnxt_gro_func_5731x; 11854 else if (BNXT_CHIP_P5(bp)) 11855 bp->gro_func = bnxt_gro_func_5750x; 11856 } 11857 if (!BNXT_CHIP_P4_PLUS(bp)) 11858 bp->flags |= BNXT_FLAG_DOUBLE_DB; 11859 11860 bp->ulp_probe = bnxt_ulp_probe; 11861 11862 rc = bnxt_init_mac_addr(bp); 11863 if (rc) { 11864 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 11865 rc = -EADDRNOTAVAIL; 11866 goto init_err_pci_clean; 11867 } 11868 11869 if (BNXT_PF(bp)) { 11870 /* Read the adapter's DSN to use as the eswitch switch_id */ 11871 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 11872 } 11873 11874 /* MTU range: 60 - FW defined max */ 11875 dev->min_mtu = ETH_ZLEN; 11876 dev->max_mtu = bp->max_mtu; 11877 11878 rc = bnxt_probe_phy(bp, true); 11879 if (rc) 11880 goto init_err_pci_clean; 11881 11882 bnxt_set_rx_skb_mode(bp, false); 11883 bnxt_set_tpa_flags(bp); 11884 bnxt_set_ring_params(bp); 11885 rc = bnxt_set_dflt_rings(bp, true); 11886 if (rc) { 11887 netdev_err(bp->dev, "Not enough rings available.\n"); 11888 rc = -ENOMEM; 11889 goto init_err_pci_clean; 11890 } 11891 11892 bnxt_fw_init_one_p3(bp); 11893 11894 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 11895 bp->flags |= BNXT_FLAG_STRIP_VLAN; 11896 11897 rc = bnxt_init_int_mode(bp); 11898 if (rc) 11899 goto init_err_pci_clean; 11900 11901 /* No TC has been set yet and rings may have been trimmed due to 11902 * limited MSIX, so we re-initialize the TX rings per TC. 11903 */ 11904 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11905 11906 if (BNXT_PF(bp)) { 11907 if (!bnxt_pf_wq) { 11908 bnxt_pf_wq = 11909 create_singlethread_workqueue("bnxt_pf_wq"); 11910 if (!bnxt_pf_wq) { 11911 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 11912 goto init_err_pci_clean; 11913 } 11914 } 11915 bnxt_init_tc(bp); 11916 } 11917 11918 bnxt_dl_register(bp); 11919 11920 rc = register_netdev(dev); 11921 if (rc) 11922 goto init_err_cleanup; 11923 11924 if (BNXT_PF(bp)) 11925 devlink_port_type_eth_set(&bp->dl_port, bp->dev); 11926 bnxt_dl_fw_reporters_create(bp); 11927 11928 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 11929 board_info[ent->driver_data].name, 11930 (long)pci_resource_start(pdev, 0), dev->dev_addr); 11931 pcie_print_link_status(pdev); 11932 11933 return 0; 11934 11935 init_err_cleanup: 11936 bnxt_dl_unregister(bp); 11937 bnxt_shutdown_tc(bp); 11938 bnxt_clear_int_mode(bp); 11939 11940 init_err_pci_clean: 11941 bnxt_hwrm_func_drv_unrgtr(bp); 11942 bnxt_free_hwrm_short_cmd_req(bp); 11943 bnxt_free_hwrm_resources(bp); 11944 kfree(bp->fw_health); 11945 bp->fw_health = NULL; 11946 bnxt_cleanup_pci(bp); 11947 bnxt_free_ctx_mem(bp); 11948 kfree(bp->ctx); 11949 bp->ctx = NULL; 11950 11951 init_err_free: 11952 free_netdev(dev); 11953 return rc; 11954 } 11955 11956 static void bnxt_shutdown(struct pci_dev *pdev) 11957 { 11958 struct net_device *dev = pci_get_drvdata(pdev); 11959 struct bnxt *bp; 11960 11961 if (!dev) 11962 return; 11963 11964 rtnl_lock(); 11965 bp = netdev_priv(dev); 11966 if (!bp) 11967 goto shutdown_exit; 11968 11969 if (netif_running(dev)) 11970 dev_close(dev); 11971 11972 bnxt_ulp_shutdown(bp); 11973 bnxt_clear_int_mode(bp); 11974 pci_disable_device(pdev); 11975 11976 if (system_state == SYSTEM_POWER_OFF) { 11977 pci_wake_from_d3(pdev, bp->wol); 11978 pci_set_power_state(pdev, PCI_D3hot); 11979 } 11980 11981 shutdown_exit: 11982 rtnl_unlock(); 11983 } 11984 11985 #ifdef CONFIG_PM_SLEEP 11986 static int bnxt_suspend(struct device *device) 11987 { 11988 struct net_device *dev = dev_get_drvdata(device); 11989 struct bnxt *bp = netdev_priv(dev); 11990 int rc = 0; 11991 11992 rtnl_lock(); 11993 bnxt_ulp_stop(bp); 11994 if (netif_running(dev)) { 11995 netif_device_detach(dev); 11996 rc = bnxt_close(dev); 11997 } 11998 bnxt_hwrm_func_drv_unrgtr(bp); 11999 pci_disable_device(bp->pdev); 12000 bnxt_free_ctx_mem(bp); 12001 kfree(bp->ctx); 12002 bp->ctx = NULL; 12003 rtnl_unlock(); 12004 return rc; 12005 } 12006 12007 static int bnxt_resume(struct device *device) 12008 { 12009 struct net_device *dev = dev_get_drvdata(device); 12010 struct bnxt *bp = netdev_priv(dev); 12011 int rc = 0; 12012 12013 rtnl_lock(); 12014 rc = pci_enable_device(bp->pdev); 12015 if (rc) { 12016 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 12017 rc); 12018 goto resume_exit; 12019 } 12020 pci_set_master(bp->pdev); 12021 if (bnxt_hwrm_ver_get(bp)) { 12022 rc = -ENODEV; 12023 goto resume_exit; 12024 } 12025 rc = bnxt_hwrm_func_reset(bp); 12026 if (rc) { 12027 rc = -EBUSY; 12028 goto resume_exit; 12029 } 12030 12031 if (bnxt_hwrm_queue_qportcfg(bp)) { 12032 rc = -ENODEV; 12033 goto resume_exit; 12034 } 12035 12036 if (bp->hwrm_spec_code >= 0x10803) { 12037 if (bnxt_alloc_ctx_mem(bp)) { 12038 rc = -ENODEV; 12039 goto resume_exit; 12040 } 12041 } 12042 if (BNXT_NEW_RM(bp)) 12043 bnxt_hwrm_func_resc_qcaps(bp, false); 12044 12045 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 12046 rc = -ENODEV; 12047 goto resume_exit; 12048 } 12049 12050 bnxt_get_wol_settings(bp); 12051 if (netif_running(dev)) { 12052 rc = bnxt_open(dev); 12053 if (!rc) 12054 netif_device_attach(dev); 12055 } 12056 12057 resume_exit: 12058 bnxt_ulp_start(bp, rc); 12059 rtnl_unlock(); 12060 return rc; 12061 } 12062 12063 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 12064 #define BNXT_PM_OPS (&bnxt_pm_ops) 12065 12066 #else 12067 12068 #define BNXT_PM_OPS NULL 12069 12070 #endif /* CONFIG_PM_SLEEP */ 12071 12072 /** 12073 * bnxt_io_error_detected - called when PCI error is detected 12074 * @pdev: Pointer to PCI device 12075 * @state: The current pci connection state 12076 * 12077 * This function is called after a PCI bus error affecting 12078 * this device has been detected. 12079 */ 12080 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 12081 pci_channel_state_t state) 12082 { 12083 struct net_device *netdev = pci_get_drvdata(pdev); 12084 struct bnxt *bp = netdev_priv(netdev); 12085 12086 netdev_info(netdev, "PCI I/O error detected\n"); 12087 12088 rtnl_lock(); 12089 netif_device_detach(netdev); 12090 12091 bnxt_ulp_stop(bp); 12092 12093 if (state == pci_channel_io_perm_failure) { 12094 rtnl_unlock(); 12095 return PCI_ERS_RESULT_DISCONNECT; 12096 } 12097 12098 if (netif_running(netdev)) 12099 bnxt_close(netdev); 12100 12101 pci_disable_device(pdev); 12102 rtnl_unlock(); 12103 12104 /* Request a slot slot reset. */ 12105 return PCI_ERS_RESULT_NEED_RESET; 12106 } 12107 12108 /** 12109 * bnxt_io_slot_reset - called after the pci bus has been reset. 12110 * @pdev: Pointer to PCI device 12111 * 12112 * Restart the card from scratch, as if from a cold-boot. 12113 * At this point, the card has exprienced a hard reset, 12114 * followed by fixups by BIOS, and has its config space 12115 * set up identically to what it was at cold boot. 12116 */ 12117 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 12118 { 12119 struct net_device *netdev = pci_get_drvdata(pdev); 12120 struct bnxt *bp = netdev_priv(netdev); 12121 int err = 0; 12122 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 12123 12124 netdev_info(bp->dev, "PCI Slot Reset\n"); 12125 12126 rtnl_lock(); 12127 12128 if (pci_enable_device(pdev)) { 12129 dev_err(&pdev->dev, 12130 "Cannot re-enable PCI device after reset.\n"); 12131 } else { 12132 pci_set_master(pdev); 12133 12134 err = bnxt_hwrm_func_reset(bp); 12135 if (!err && netif_running(netdev)) 12136 err = bnxt_open(netdev); 12137 12138 if (!err) 12139 result = PCI_ERS_RESULT_RECOVERED; 12140 bnxt_ulp_start(bp, err); 12141 } 12142 12143 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 12144 dev_close(netdev); 12145 12146 rtnl_unlock(); 12147 12148 return PCI_ERS_RESULT_RECOVERED; 12149 } 12150 12151 /** 12152 * bnxt_io_resume - called when traffic can start flowing again. 12153 * @pdev: Pointer to PCI device 12154 * 12155 * This callback is called when the error recovery driver tells 12156 * us that its OK to resume normal operation. 12157 */ 12158 static void bnxt_io_resume(struct pci_dev *pdev) 12159 { 12160 struct net_device *netdev = pci_get_drvdata(pdev); 12161 12162 rtnl_lock(); 12163 12164 netif_device_attach(netdev); 12165 12166 rtnl_unlock(); 12167 } 12168 12169 static const struct pci_error_handlers bnxt_err_handler = { 12170 .error_detected = bnxt_io_error_detected, 12171 .slot_reset = bnxt_io_slot_reset, 12172 .resume = bnxt_io_resume 12173 }; 12174 12175 static struct pci_driver bnxt_pci_driver = { 12176 .name = DRV_MODULE_NAME, 12177 .id_table = bnxt_pci_tbl, 12178 .probe = bnxt_init_one, 12179 .remove = bnxt_remove_one, 12180 .shutdown = bnxt_shutdown, 12181 .driver.pm = BNXT_PM_OPS, 12182 .err_handler = &bnxt_err_handler, 12183 #if defined(CONFIG_BNXT_SRIOV) 12184 .sriov_configure = bnxt_sriov_configure, 12185 #endif 12186 }; 12187 12188 static int __init bnxt_init(void) 12189 { 12190 bnxt_debug_init(); 12191 return pci_register_driver(&bnxt_pci_driver); 12192 } 12193 12194 static void __exit bnxt_exit(void) 12195 { 12196 pci_unregister_driver(&bnxt_pci_driver); 12197 if (bnxt_pf_wq) 12198 destroy_workqueue(bnxt_pf_wq); 12199 bnxt_debug_exit(); 12200 } 12201 12202 module_init(bnxt_init); 12203 module_exit(bnxt_exit); 12204