1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/if.h> 35 #include <linux/if_vlan.h> 36 #include <linux/if_bridge.h> 37 #include <linux/rtc.h> 38 #include <linux/bpf.h> 39 #include <net/ip.h> 40 #include <net/tcp.h> 41 #include <net/udp.h> 42 #include <net/checksum.h> 43 #include <net/ip6_checksum.h> 44 #include <net/udp_tunnel.h> 45 #include <linux/workqueue.h> 46 #include <linux/prefetch.h> 47 #include <linux/cache.h> 48 #include <linux/log2.h> 49 #include <linux/aer.h> 50 #include <linux/bitmap.h> 51 #include <linux/cpu_rmap.h> 52 #include <linux/cpumask.h> 53 #include <net/pkt_cls.h> 54 55 #include "bnxt_hsi.h" 56 #include "bnxt.h" 57 #include "bnxt_ulp.h" 58 #include "bnxt_sriov.h" 59 #include "bnxt_ethtool.h" 60 #include "bnxt_dcb.h" 61 #include "bnxt_xdp.h" 62 #include "bnxt_vfr.h" 63 #include "bnxt_tc.h" 64 #include "bnxt_devlink.h" 65 66 #define BNXT_TX_TIMEOUT (5 * HZ) 67 68 static const char version[] = 69 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 70 71 MODULE_LICENSE("GPL"); 72 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 73 MODULE_VERSION(DRV_MODULE_VERSION); 74 75 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 76 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 77 #define BNXT_RX_COPY_THRESH 256 78 79 #define BNXT_TX_PUSH_THRESH 164 80 81 enum board_idx { 82 BCM57301, 83 BCM57302, 84 BCM57304, 85 BCM57417_NPAR, 86 BCM58700, 87 BCM57311, 88 BCM57312, 89 BCM57402, 90 BCM57404, 91 BCM57406, 92 BCM57402_NPAR, 93 BCM57407, 94 BCM57412, 95 BCM57414, 96 BCM57416, 97 BCM57417, 98 BCM57412_NPAR, 99 BCM57314, 100 BCM57417_SFP, 101 BCM57416_SFP, 102 BCM57404_NPAR, 103 BCM57406_NPAR, 104 BCM57407_SFP, 105 BCM57407_NPAR, 106 BCM57414_NPAR, 107 BCM57416_NPAR, 108 BCM57452, 109 BCM57454, 110 BCM58802, 111 BCM58804, 112 BCM58808, 113 NETXTREME_E_VF, 114 NETXTREME_C_VF, 115 NETXTREME_S_VF, 116 }; 117 118 /* indexed by enum above */ 119 static const struct { 120 char *name; 121 } board_info[] = { 122 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 123 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 124 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 125 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 126 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 127 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 128 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 129 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 130 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 131 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 132 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 133 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 134 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 135 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 136 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 137 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 138 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 139 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 140 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 141 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 142 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 143 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 144 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 145 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 146 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 147 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 148 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 149 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 150 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 151 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 152 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 153 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 154 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 155 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 156 }; 157 158 static const struct pci_device_id bnxt_pci_tbl[] = { 159 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 160 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 161 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 162 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 163 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 164 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 165 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 166 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 167 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 168 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 169 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 170 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 171 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 172 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 173 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 174 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 175 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 176 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 177 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 179 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 180 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 181 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 182 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 183 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 184 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 185 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 186 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 187 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 189 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 190 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 191 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 192 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 193 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 194 #ifdef CONFIG_BNXT_SRIOV 195 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 196 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 197 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 198 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 199 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 201 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 202 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 203 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 204 #endif 205 { 0 } 206 }; 207 208 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 209 210 static const u16 bnxt_vf_req_snif[] = { 211 HWRM_FUNC_CFG, 212 HWRM_PORT_PHY_QCFG, 213 HWRM_CFA_L2_FILTER_ALLOC, 214 }; 215 216 static const u16 bnxt_async_events_arr[] = { 217 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 218 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 219 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 220 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 221 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 222 }; 223 224 static struct workqueue_struct *bnxt_pf_wq; 225 226 static bool bnxt_vf_pciid(enum board_idx idx) 227 { 228 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 229 idx == NETXTREME_S_VF); 230 } 231 232 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 233 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 234 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 235 236 #define BNXT_CP_DB_REARM(db, raw_cons) \ 237 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) 238 239 #define BNXT_CP_DB(db, raw_cons) \ 240 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) 241 242 #define BNXT_CP_DB_IRQ_DIS(db) \ 243 writel(DB_CP_IRQ_DIS_FLAGS, db) 244 245 const u16 bnxt_lhint_arr[] = { 246 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 247 TX_BD_FLAGS_LHINT_512_TO_1023, 248 TX_BD_FLAGS_LHINT_1024_TO_2047, 249 TX_BD_FLAGS_LHINT_1024_TO_2047, 250 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 251 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 252 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 253 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 254 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 255 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 256 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 257 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 258 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 259 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 260 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 261 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 262 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 263 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 264 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 265 }; 266 267 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 268 { 269 struct metadata_dst *md_dst = skb_metadata_dst(skb); 270 271 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 272 return 0; 273 274 return md_dst->u.port_info.port_id; 275 } 276 277 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 278 { 279 struct bnxt *bp = netdev_priv(dev); 280 struct tx_bd *txbd; 281 struct tx_bd_ext *txbd1; 282 struct netdev_queue *txq; 283 int i; 284 dma_addr_t mapping; 285 unsigned int length, pad = 0; 286 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 287 u16 prod, last_frag; 288 struct pci_dev *pdev = bp->pdev; 289 struct bnxt_tx_ring_info *txr; 290 struct bnxt_sw_tx_bd *tx_buf; 291 292 i = skb_get_queue_mapping(skb); 293 if (unlikely(i >= bp->tx_nr_rings)) { 294 dev_kfree_skb_any(skb); 295 return NETDEV_TX_OK; 296 } 297 298 txq = netdev_get_tx_queue(dev, i); 299 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 300 prod = txr->tx_prod; 301 302 free_size = bnxt_tx_avail(bp, txr); 303 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 304 netif_tx_stop_queue(txq); 305 return NETDEV_TX_BUSY; 306 } 307 308 length = skb->len; 309 len = skb_headlen(skb); 310 last_frag = skb_shinfo(skb)->nr_frags; 311 312 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 313 314 txbd->tx_bd_opaque = prod; 315 316 tx_buf = &txr->tx_buf_ring[prod]; 317 tx_buf->skb = skb; 318 tx_buf->nr_frags = last_frag; 319 320 vlan_tag_flags = 0; 321 cfa_action = bnxt_xmit_get_cfa_action(skb); 322 if (skb_vlan_tag_present(skb)) { 323 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 324 skb_vlan_tag_get(skb); 325 /* Currently supports 8021Q, 8021AD vlan offloads 326 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 327 */ 328 if (skb->vlan_proto == htons(ETH_P_8021Q)) 329 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 330 } 331 332 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 333 struct tx_push_buffer *tx_push_buf = txr->tx_push; 334 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 335 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 336 void *pdata = tx_push_buf->data; 337 u64 *end; 338 int j, push_len; 339 340 /* Set COAL_NOW to be ready quickly for the next push */ 341 tx_push->tx_bd_len_flags_type = 342 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 343 TX_BD_TYPE_LONG_TX_BD | 344 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 345 TX_BD_FLAGS_COAL_NOW | 346 TX_BD_FLAGS_PACKET_END | 347 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 348 349 if (skb->ip_summed == CHECKSUM_PARTIAL) 350 tx_push1->tx_bd_hsize_lflags = 351 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 352 else 353 tx_push1->tx_bd_hsize_lflags = 0; 354 355 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 356 tx_push1->tx_bd_cfa_action = 357 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 358 359 end = pdata + length; 360 end = PTR_ALIGN(end, 8) - 1; 361 *end = 0; 362 363 skb_copy_from_linear_data(skb, pdata, len); 364 pdata += len; 365 for (j = 0; j < last_frag; j++) { 366 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 367 void *fptr; 368 369 fptr = skb_frag_address_safe(frag); 370 if (!fptr) 371 goto normal_tx; 372 373 memcpy(pdata, fptr, skb_frag_size(frag)); 374 pdata += skb_frag_size(frag); 375 } 376 377 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 378 txbd->tx_bd_haddr = txr->data_mapping; 379 prod = NEXT_TX(prod); 380 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 381 memcpy(txbd, tx_push1, sizeof(*txbd)); 382 prod = NEXT_TX(prod); 383 tx_push->doorbell = 384 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 385 txr->tx_prod = prod; 386 387 tx_buf->is_push = 1; 388 netdev_tx_sent_queue(txq, skb->len); 389 wmb(); /* Sync is_push and byte queue before pushing data */ 390 391 push_len = (length + sizeof(*tx_push) + 7) / 8; 392 if (push_len > 16) { 393 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 394 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 395 (push_len - 16) << 1); 396 } else { 397 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 398 push_len); 399 } 400 401 goto tx_done; 402 } 403 404 normal_tx: 405 if (length < BNXT_MIN_PKT_SIZE) { 406 pad = BNXT_MIN_PKT_SIZE - length; 407 if (skb_pad(skb, pad)) { 408 /* SKB already freed. */ 409 tx_buf->skb = NULL; 410 return NETDEV_TX_OK; 411 } 412 length = BNXT_MIN_PKT_SIZE; 413 } 414 415 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 416 417 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 418 dev_kfree_skb_any(skb); 419 tx_buf->skb = NULL; 420 return NETDEV_TX_OK; 421 } 422 423 dma_unmap_addr_set(tx_buf, mapping, mapping); 424 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 425 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 426 427 txbd->tx_bd_haddr = cpu_to_le64(mapping); 428 429 prod = NEXT_TX(prod); 430 txbd1 = (struct tx_bd_ext *) 431 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 432 433 txbd1->tx_bd_hsize_lflags = 0; 434 if (skb_is_gso(skb)) { 435 u32 hdr_len; 436 437 if (skb->encapsulation) 438 hdr_len = skb_inner_network_offset(skb) + 439 skb_inner_network_header_len(skb) + 440 inner_tcp_hdrlen(skb); 441 else 442 hdr_len = skb_transport_offset(skb) + 443 tcp_hdrlen(skb); 444 445 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 446 TX_BD_FLAGS_T_IPID | 447 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 448 length = skb_shinfo(skb)->gso_size; 449 txbd1->tx_bd_mss = cpu_to_le32(length); 450 length += hdr_len; 451 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 452 txbd1->tx_bd_hsize_lflags = 453 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 454 txbd1->tx_bd_mss = 0; 455 } 456 457 length >>= 9; 458 flags |= bnxt_lhint_arr[length]; 459 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 460 461 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 462 txbd1->tx_bd_cfa_action = 463 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 464 for (i = 0; i < last_frag; i++) { 465 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 466 467 prod = NEXT_TX(prod); 468 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 469 470 len = skb_frag_size(frag); 471 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 472 DMA_TO_DEVICE); 473 474 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 475 goto tx_dma_error; 476 477 tx_buf = &txr->tx_buf_ring[prod]; 478 dma_unmap_addr_set(tx_buf, mapping, mapping); 479 480 txbd->tx_bd_haddr = cpu_to_le64(mapping); 481 482 flags = len << TX_BD_LEN_SHIFT; 483 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 484 } 485 486 flags &= ~TX_BD_LEN; 487 txbd->tx_bd_len_flags_type = 488 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 489 TX_BD_FLAGS_PACKET_END); 490 491 netdev_tx_sent_queue(txq, skb->len); 492 493 /* Sync BD data before updating doorbell */ 494 wmb(); 495 496 prod = NEXT_TX(prod); 497 txr->tx_prod = prod; 498 499 if (!skb->xmit_more || netif_xmit_stopped(txq)) 500 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); 501 502 tx_done: 503 504 mmiowb(); 505 506 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 507 if (skb->xmit_more && !tx_buf->is_push) 508 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); 509 510 netif_tx_stop_queue(txq); 511 512 /* netif_tx_stop_queue() must be done before checking 513 * tx index in bnxt_tx_avail() below, because in 514 * bnxt_tx_int(), we update tx index before checking for 515 * netif_tx_queue_stopped(). 516 */ 517 smp_mb(); 518 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 519 netif_tx_wake_queue(txq); 520 } 521 return NETDEV_TX_OK; 522 523 tx_dma_error: 524 last_frag = i; 525 526 /* start back at beginning and unmap skb */ 527 prod = txr->tx_prod; 528 tx_buf = &txr->tx_buf_ring[prod]; 529 tx_buf->skb = NULL; 530 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 531 skb_headlen(skb), PCI_DMA_TODEVICE); 532 prod = NEXT_TX(prod); 533 534 /* unmap remaining mapped pages */ 535 for (i = 0; i < last_frag; i++) { 536 prod = NEXT_TX(prod); 537 tx_buf = &txr->tx_buf_ring[prod]; 538 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 539 skb_frag_size(&skb_shinfo(skb)->frags[i]), 540 PCI_DMA_TODEVICE); 541 } 542 543 dev_kfree_skb_any(skb); 544 return NETDEV_TX_OK; 545 } 546 547 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 548 { 549 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 550 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 551 u16 cons = txr->tx_cons; 552 struct pci_dev *pdev = bp->pdev; 553 int i; 554 unsigned int tx_bytes = 0; 555 556 for (i = 0; i < nr_pkts; i++) { 557 struct bnxt_sw_tx_bd *tx_buf; 558 struct sk_buff *skb; 559 int j, last; 560 561 tx_buf = &txr->tx_buf_ring[cons]; 562 cons = NEXT_TX(cons); 563 skb = tx_buf->skb; 564 tx_buf->skb = NULL; 565 566 if (tx_buf->is_push) { 567 tx_buf->is_push = 0; 568 goto next_tx_int; 569 } 570 571 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 572 skb_headlen(skb), PCI_DMA_TODEVICE); 573 last = tx_buf->nr_frags; 574 575 for (j = 0; j < last; j++) { 576 cons = NEXT_TX(cons); 577 tx_buf = &txr->tx_buf_ring[cons]; 578 dma_unmap_page( 579 &pdev->dev, 580 dma_unmap_addr(tx_buf, mapping), 581 skb_frag_size(&skb_shinfo(skb)->frags[j]), 582 PCI_DMA_TODEVICE); 583 } 584 585 next_tx_int: 586 cons = NEXT_TX(cons); 587 588 tx_bytes += skb->len; 589 dev_kfree_skb_any(skb); 590 } 591 592 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 593 txr->tx_cons = cons; 594 595 /* Need to make the tx_cons update visible to bnxt_start_xmit() 596 * before checking for netif_tx_queue_stopped(). Without the 597 * memory barrier, there is a small possibility that bnxt_start_xmit() 598 * will miss it and cause the queue to be stopped forever. 599 */ 600 smp_mb(); 601 602 if (unlikely(netif_tx_queue_stopped(txq)) && 603 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 604 __netif_tx_lock(txq, smp_processor_id()); 605 if (netif_tx_queue_stopped(txq) && 606 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 607 txr->dev_state != BNXT_DEV_STATE_CLOSING) 608 netif_tx_wake_queue(txq); 609 __netif_tx_unlock(txq); 610 } 611 } 612 613 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 614 gfp_t gfp) 615 { 616 struct device *dev = &bp->pdev->dev; 617 struct page *page; 618 619 page = alloc_page(gfp); 620 if (!page) 621 return NULL; 622 623 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 624 DMA_ATTR_WEAK_ORDERING); 625 if (dma_mapping_error(dev, *mapping)) { 626 __free_page(page); 627 return NULL; 628 } 629 *mapping += bp->rx_dma_offset; 630 return page; 631 } 632 633 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 634 gfp_t gfp) 635 { 636 u8 *data; 637 struct pci_dev *pdev = bp->pdev; 638 639 data = kmalloc(bp->rx_buf_size, gfp); 640 if (!data) 641 return NULL; 642 643 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 644 bp->rx_buf_use_size, bp->rx_dir, 645 DMA_ATTR_WEAK_ORDERING); 646 647 if (dma_mapping_error(&pdev->dev, *mapping)) { 648 kfree(data); 649 data = NULL; 650 } 651 return data; 652 } 653 654 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 655 u16 prod, gfp_t gfp) 656 { 657 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 658 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 659 dma_addr_t mapping; 660 661 if (BNXT_RX_PAGE_MODE(bp)) { 662 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); 663 664 if (!page) 665 return -ENOMEM; 666 667 rx_buf->data = page; 668 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 669 } else { 670 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 671 672 if (!data) 673 return -ENOMEM; 674 675 rx_buf->data = data; 676 rx_buf->data_ptr = data + bp->rx_offset; 677 } 678 rx_buf->mapping = mapping; 679 680 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 681 return 0; 682 } 683 684 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 685 { 686 u16 prod = rxr->rx_prod; 687 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 688 struct rx_bd *cons_bd, *prod_bd; 689 690 prod_rx_buf = &rxr->rx_buf_ring[prod]; 691 cons_rx_buf = &rxr->rx_buf_ring[cons]; 692 693 prod_rx_buf->data = data; 694 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 695 696 prod_rx_buf->mapping = cons_rx_buf->mapping; 697 698 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 699 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 700 701 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 702 } 703 704 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 705 { 706 u16 next, max = rxr->rx_agg_bmap_size; 707 708 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 709 if (next >= max) 710 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 711 return next; 712 } 713 714 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 715 struct bnxt_rx_ring_info *rxr, 716 u16 prod, gfp_t gfp) 717 { 718 struct rx_bd *rxbd = 719 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 720 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 721 struct pci_dev *pdev = bp->pdev; 722 struct page *page; 723 dma_addr_t mapping; 724 u16 sw_prod = rxr->rx_sw_agg_prod; 725 unsigned int offset = 0; 726 727 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 728 page = rxr->rx_page; 729 if (!page) { 730 page = alloc_page(gfp); 731 if (!page) 732 return -ENOMEM; 733 rxr->rx_page = page; 734 rxr->rx_page_offset = 0; 735 } 736 offset = rxr->rx_page_offset; 737 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 738 if (rxr->rx_page_offset == PAGE_SIZE) 739 rxr->rx_page = NULL; 740 else 741 get_page(page); 742 } else { 743 page = alloc_page(gfp); 744 if (!page) 745 return -ENOMEM; 746 } 747 748 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 749 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 750 DMA_ATTR_WEAK_ORDERING); 751 if (dma_mapping_error(&pdev->dev, mapping)) { 752 __free_page(page); 753 return -EIO; 754 } 755 756 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 757 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 758 759 __set_bit(sw_prod, rxr->rx_agg_bmap); 760 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 761 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 762 763 rx_agg_buf->page = page; 764 rx_agg_buf->offset = offset; 765 rx_agg_buf->mapping = mapping; 766 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 767 rxbd->rx_bd_opaque = sw_prod; 768 return 0; 769 } 770 771 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, 772 u32 agg_bufs) 773 { 774 struct bnxt *bp = bnapi->bp; 775 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 776 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 777 u16 prod = rxr->rx_agg_prod; 778 u16 sw_prod = rxr->rx_sw_agg_prod; 779 u32 i; 780 781 for (i = 0; i < agg_bufs; i++) { 782 u16 cons; 783 struct rx_agg_cmp *agg; 784 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 785 struct rx_bd *prod_bd; 786 struct page *page; 787 788 agg = (struct rx_agg_cmp *) 789 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 790 cons = agg->rx_agg_cmp_opaque; 791 __clear_bit(cons, rxr->rx_agg_bmap); 792 793 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 794 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 795 796 __set_bit(sw_prod, rxr->rx_agg_bmap); 797 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 798 cons_rx_buf = &rxr->rx_agg_ring[cons]; 799 800 /* It is possible for sw_prod to be equal to cons, so 801 * set cons_rx_buf->page to NULL first. 802 */ 803 page = cons_rx_buf->page; 804 cons_rx_buf->page = NULL; 805 prod_rx_buf->page = page; 806 prod_rx_buf->offset = cons_rx_buf->offset; 807 808 prod_rx_buf->mapping = cons_rx_buf->mapping; 809 810 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 811 812 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 813 prod_bd->rx_bd_opaque = sw_prod; 814 815 prod = NEXT_RX_AGG(prod); 816 sw_prod = NEXT_RX_AGG(sw_prod); 817 cp_cons = NEXT_CMP(cp_cons); 818 } 819 rxr->rx_agg_prod = prod; 820 rxr->rx_sw_agg_prod = sw_prod; 821 } 822 823 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 824 struct bnxt_rx_ring_info *rxr, 825 u16 cons, void *data, u8 *data_ptr, 826 dma_addr_t dma_addr, 827 unsigned int offset_and_len) 828 { 829 unsigned int payload = offset_and_len >> 16; 830 unsigned int len = offset_and_len & 0xffff; 831 struct skb_frag_struct *frag; 832 struct page *page = data; 833 u16 prod = rxr->rx_prod; 834 struct sk_buff *skb; 835 int off, err; 836 837 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 838 if (unlikely(err)) { 839 bnxt_reuse_rx_data(rxr, cons, data); 840 return NULL; 841 } 842 dma_addr -= bp->rx_dma_offset; 843 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 844 DMA_ATTR_WEAK_ORDERING); 845 846 if (unlikely(!payload)) 847 payload = eth_get_headlen(data_ptr, len); 848 849 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 850 if (!skb) { 851 __free_page(page); 852 return NULL; 853 } 854 855 off = (void *)data_ptr - page_address(page); 856 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 857 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 858 payload + NET_IP_ALIGN); 859 860 frag = &skb_shinfo(skb)->frags[0]; 861 skb_frag_size_sub(frag, payload); 862 frag->page_offset += payload; 863 skb->data_len -= payload; 864 skb->tail += payload; 865 866 return skb; 867 } 868 869 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 870 struct bnxt_rx_ring_info *rxr, u16 cons, 871 void *data, u8 *data_ptr, 872 dma_addr_t dma_addr, 873 unsigned int offset_and_len) 874 { 875 u16 prod = rxr->rx_prod; 876 struct sk_buff *skb; 877 int err; 878 879 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 880 if (unlikely(err)) { 881 bnxt_reuse_rx_data(rxr, cons, data); 882 return NULL; 883 } 884 885 skb = build_skb(data, 0); 886 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 887 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 888 if (!skb) { 889 kfree(data); 890 return NULL; 891 } 892 893 skb_reserve(skb, bp->rx_offset); 894 skb_put(skb, offset_and_len & 0xffff); 895 return skb; 896 } 897 898 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, 899 struct sk_buff *skb, u16 cp_cons, 900 u32 agg_bufs) 901 { 902 struct pci_dev *pdev = bp->pdev; 903 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 904 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 905 u16 prod = rxr->rx_agg_prod; 906 u32 i; 907 908 for (i = 0; i < agg_bufs; i++) { 909 u16 cons, frag_len; 910 struct rx_agg_cmp *agg; 911 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 912 struct page *page; 913 dma_addr_t mapping; 914 915 agg = (struct rx_agg_cmp *) 916 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 917 cons = agg->rx_agg_cmp_opaque; 918 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 919 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 920 921 cons_rx_buf = &rxr->rx_agg_ring[cons]; 922 skb_fill_page_desc(skb, i, cons_rx_buf->page, 923 cons_rx_buf->offset, frag_len); 924 __clear_bit(cons, rxr->rx_agg_bmap); 925 926 /* It is possible for bnxt_alloc_rx_page() to allocate 927 * a sw_prod index that equals the cons index, so we 928 * need to clear the cons entry now. 929 */ 930 mapping = cons_rx_buf->mapping; 931 page = cons_rx_buf->page; 932 cons_rx_buf->page = NULL; 933 934 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 935 struct skb_shared_info *shinfo; 936 unsigned int nr_frags; 937 938 shinfo = skb_shinfo(skb); 939 nr_frags = --shinfo->nr_frags; 940 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 941 942 dev_kfree_skb(skb); 943 944 cons_rx_buf->page = page; 945 946 /* Update prod since possibly some pages have been 947 * allocated already. 948 */ 949 rxr->rx_agg_prod = prod; 950 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); 951 return NULL; 952 } 953 954 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 955 PCI_DMA_FROMDEVICE, 956 DMA_ATTR_WEAK_ORDERING); 957 958 skb->data_len += frag_len; 959 skb->len += frag_len; 960 skb->truesize += PAGE_SIZE; 961 962 prod = NEXT_RX_AGG(prod); 963 cp_cons = NEXT_CMP(cp_cons); 964 } 965 rxr->rx_agg_prod = prod; 966 return skb; 967 } 968 969 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 970 u8 agg_bufs, u32 *raw_cons) 971 { 972 u16 last; 973 struct rx_agg_cmp *agg; 974 975 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 976 last = RING_CMP(*raw_cons); 977 agg = (struct rx_agg_cmp *) 978 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 979 return RX_AGG_CMP_VALID(agg, *raw_cons); 980 } 981 982 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 983 unsigned int len, 984 dma_addr_t mapping) 985 { 986 struct bnxt *bp = bnapi->bp; 987 struct pci_dev *pdev = bp->pdev; 988 struct sk_buff *skb; 989 990 skb = napi_alloc_skb(&bnapi->napi, len); 991 if (!skb) 992 return NULL; 993 994 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 995 bp->rx_dir); 996 997 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 998 len + NET_IP_ALIGN); 999 1000 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1001 bp->rx_dir); 1002 1003 skb_put(skb, len); 1004 return skb; 1005 } 1006 1007 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, 1008 u32 *raw_cons, void *cmp) 1009 { 1010 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1011 struct rx_cmp *rxcmp = cmp; 1012 u32 tmp_raw_cons = *raw_cons; 1013 u8 cmp_type, agg_bufs = 0; 1014 1015 cmp_type = RX_CMP_TYPE(rxcmp); 1016 1017 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1018 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1019 RX_CMP_AGG_BUFS) >> 1020 RX_CMP_AGG_BUFS_SHIFT; 1021 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1022 struct rx_tpa_end_cmp *tpa_end = cmp; 1023 1024 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1025 RX_TPA_END_CMP_AGG_BUFS) >> 1026 RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1027 } 1028 1029 if (agg_bufs) { 1030 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1031 return -EBUSY; 1032 } 1033 *raw_cons = tmp_raw_cons; 1034 return 0; 1035 } 1036 1037 static void bnxt_queue_sp_work(struct bnxt *bp) 1038 { 1039 if (BNXT_PF(bp)) 1040 queue_work(bnxt_pf_wq, &bp->sp_task); 1041 else 1042 schedule_work(&bp->sp_task); 1043 } 1044 1045 static void bnxt_cancel_sp_work(struct bnxt *bp) 1046 { 1047 if (BNXT_PF(bp)) 1048 flush_workqueue(bnxt_pf_wq); 1049 else 1050 cancel_work_sync(&bp->sp_task); 1051 } 1052 1053 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1054 { 1055 if (!rxr->bnapi->in_reset) { 1056 rxr->bnapi->in_reset = true; 1057 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1058 bnxt_queue_sp_work(bp); 1059 } 1060 rxr->rx_next_cons = 0xffff; 1061 } 1062 1063 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1064 struct rx_tpa_start_cmp *tpa_start, 1065 struct rx_tpa_start_cmp_ext *tpa_start1) 1066 { 1067 u8 agg_id = TPA_START_AGG_ID(tpa_start); 1068 u16 cons, prod; 1069 struct bnxt_tpa_info *tpa_info; 1070 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1071 struct rx_bd *prod_bd; 1072 dma_addr_t mapping; 1073 1074 cons = tpa_start->rx_tpa_start_cmp_opaque; 1075 prod = rxr->rx_prod; 1076 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1077 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1078 tpa_info = &rxr->rx_tpa[agg_id]; 1079 1080 if (unlikely(cons != rxr->rx_next_cons)) { 1081 bnxt_sched_reset(bp, rxr); 1082 return; 1083 } 1084 /* Store cfa_code in tpa_info to use in tpa_end 1085 * completion processing. 1086 */ 1087 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1088 prod_rx_buf->data = tpa_info->data; 1089 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1090 1091 mapping = tpa_info->mapping; 1092 prod_rx_buf->mapping = mapping; 1093 1094 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1095 1096 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1097 1098 tpa_info->data = cons_rx_buf->data; 1099 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1100 cons_rx_buf->data = NULL; 1101 tpa_info->mapping = cons_rx_buf->mapping; 1102 1103 tpa_info->len = 1104 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1105 RX_TPA_START_CMP_LEN_SHIFT; 1106 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1107 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1108 1109 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1110 tpa_info->gso_type = SKB_GSO_TCPV4; 1111 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1112 if (hash_type == 3) 1113 tpa_info->gso_type = SKB_GSO_TCPV6; 1114 tpa_info->rss_hash = 1115 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1116 } else { 1117 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1118 tpa_info->gso_type = 0; 1119 if (netif_msg_rx_err(bp)) 1120 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1121 } 1122 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1123 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1124 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1125 1126 rxr->rx_prod = NEXT_RX(prod); 1127 cons = NEXT_RX(cons); 1128 rxr->rx_next_cons = NEXT_RX(cons); 1129 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1130 1131 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1132 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1133 cons_rx_buf->data = NULL; 1134 } 1135 1136 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, 1137 u16 cp_cons, u32 agg_bufs) 1138 { 1139 if (agg_bufs) 1140 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1141 } 1142 1143 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1144 int payload_off, int tcp_ts, 1145 struct sk_buff *skb) 1146 { 1147 #ifdef CONFIG_INET 1148 struct tcphdr *th; 1149 int len, nw_off; 1150 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1151 u32 hdr_info = tpa_info->hdr_info; 1152 bool loopback = false; 1153 1154 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1155 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1156 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1157 1158 /* If the packet is an internal loopback packet, the offsets will 1159 * have an extra 4 bytes. 1160 */ 1161 if (inner_mac_off == 4) { 1162 loopback = true; 1163 } else if (inner_mac_off > 4) { 1164 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1165 ETH_HLEN - 2)); 1166 1167 /* We only support inner iPv4/ipv6. If we don't see the 1168 * correct protocol ID, it must be a loopback packet where 1169 * the offsets are off by 4. 1170 */ 1171 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1172 loopback = true; 1173 } 1174 if (loopback) { 1175 /* internal loopback packet, subtract all offsets by 4 */ 1176 inner_ip_off -= 4; 1177 inner_mac_off -= 4; 1178 outer_ip_off -= 4; 1179 } 1180 1181 nw_off = inner_ip_off - ETH_HLEN; 1182 skb_set_network_header(skb, nw_off); 1183 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1184 struct ipv6hdr *iph = ipv6_hdr(skb); 1185 1186 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1187 len = skb->len - skb_transport_offset(skb); 1188 th = tcp_hdr(skb); 1189 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1190 } else { 1191 struct iphdr *iph = ip_hdr(skb); 1192 1193 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1194 len = skb->len - skb_transport_offset(skb); 1195 th = tcp_hdr(skb); 1196 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1197 } 1198 1199 if (inner_mac_off) { /* tunnel */ 1200 struct udphdr *uh = NULL; 1201 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1202 ETH_HLEN - 2)); 1203 1204 if (proto == htons(ETH_P_IP)) { 1205 struct iphdr *iph = (struct iphdr *)skb->data; 1206 1207 if (iph->protocol == IPPROTO_UDP) 1208 uh = (struct udphdr *)(iph + 1); 1209 } else { 1210 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1211 1212 if (iph->nexthdr == IPPROTO_UDP) 1213 uh = (struct udphdr *)(iph + 1); 1214 } 1215 if (uh) { 1216 if (uh->check) 1217 skb_shinfo(skb)->gso_type |= 1218 SKB_GSO_UDP_TUNNEL_CSUM; 1219 else 1220 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1221 } 1222 } 1223 #endif 1224 return skb; 1225 } 1226 1227 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1228 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1229 1230 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1231 int payload_off, int tcp_ts, 1232 struct sk_buff *skb) 1233 { 1234 #ifdef CONFIG_INET 1235 struct tcphdr *th; 1236 int len, nw_off, tcp_opt_len = 0; 1237 1238 if (tcp_ts) 1239 tcp_opt_len = 12; 1240 1241 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1242 struct iphdr *iph; 1243 1244 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1245 ETH_HLEN; 1246 skb_set_network_header(skb, nw_off); 1247 iph = ip_hdr(skb); 1248 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1249 len = skb->len - skb_transport_offset(skb); 1250 th = tcp_hdr(skb); 1251 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1252 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1253 struct ipv6hdr *iph; 1254 1255 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1256 ETH_HLEN; 1257 skb_set_network_header(skb, nw_off); 1258 iph = ipv6_hdr(skb); 1259 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1260 len = skb->len - skb_transport_offset(skb); 1261 th = tcp_hdr(skb); 1262 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1263 } else { 1264 dev_kfree_skb_any(skb); 1265 return NULL; 1266 } 1267 1268 if (nw_off) { /* tunnel */ 1269 struct udphdr *uh = NULL; 1270 1271 if (skb->protocol == htons(ETH_P_IP)) { 1272 struct iphdr *iph = (struct iphdr *)skb->data; 1273 1274 if (iph->protocol == IPPROTO_UDP) 1275 uh = (struct udphdr *)(iph + 1); 1276 } else { 1277 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1278 1279 if (iph->nexthdr == IPPROTO_UDP) 1280 uh = (struct udphdr *)(iph + 1); 1281 } 1282 if (uh) { 1283 if (uh->check) 1284 skb_shinfo(skb)->gso_type |= 1285 SKB_GSO_UDP_TUNNEL_CSUM; 1286 else 1287 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1288 } 1289 } 1290 #endif 1291 return skb; 1292 } 1293 1294 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1295 struct bnxt_tpa_info *tpa_info, 1296 struct rx_tpa_end_cmp *tpa_end, 1297 struct rx_tpa_end_cmp_ext *tpa_end1, 1298 struct sk_buff *skb) 1299 { 1300 #ifdef CONFIG_INET 1301 int payload_off; 1302 u16 segs; 1303 1304 segs = TPA_END_TPA_SEGS(tpa_end); 1305 if (segs == 1) 1306 return skb; 1307 1308 NAPI_GRO_CB(skb)->count = segs; 1309 skb_shinfo(skb)->gso_size = 1310 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1311 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1312 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1313 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 1314 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 1315 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1316 if (likely(skb)) 1317 tcp_gro_complete(skb); 1318 #endif 1319 return skb; 1320 } 1321 1322 /* Given the cfa_code of a received packet determine which 1323 * netdev (vf-rep or PF) the packet is destined to. 1324 */ 1325 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1326 { 1327 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1328 1329 /* if vf-rep dev is NULL, the must belongs to the PF */ 1330 return dev ? dev : bp->dev; 1331 } 1332 1333 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1334 struct bnxt_napi *bnapi, 1335 u32 *raw_cons, 1336 struct rx_tpa_end_cmp *tpa_end, 1337 struct rx_tpa_end_cmp_ext *tpa_end1, 1338 u8 *event) 1339 { 1340 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1341 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1342 u8 agg_id = TPA_END_AGG_ID(tpa_end); 1343 u8 *data_ptr, agg_bufs; 1344 u16 cp_cons = RING_CMP(*raw_cons); 1345 unsigned int len; 1346 struct bnxt_tpa_info *tpa_info; 1347 dma_addr_t mapping; 1348 struct sk_buff *skb; 1349 void *data; 1350 1351 if (unlikely(bnapi->in_reset)) { 1352 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); 1353 1354 if (rc < 0) 1355 return ERR_PTR(-EBUSY); 1356 return NULL; 1357 } 1358 1359 tpa_info = &rxr->rx_tpa[agg_id]; 1360 data = tpa_info->data; 1361 data_ptr = tpa_info->data_ptr; 1362 prefetch(data_ptr); 1363 len = tpa_info->len; 1364 mapping = tpa_info->mapping; 1365 1366 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1367 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1368 1369 if (agg_bufs) { 1370 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1371 return ERR_PTR(-EBUSY); 1372 1373 *event |= BNXT_AGG_EVENT; 1374 cp_cons = NEXT_CMP(cp_cons); 1375 } 1376 1377 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1378 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1379 if (agg_bufs > MAX_SKB_FRAGS) 1380 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1381 agg_bufs, (int)MAX_SKB_FRAGS); 1382 return NULL; 1383 } 1384 1385 if (len <= bp->rx_copy_thresh) { 1386 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1387 if (!skb) { 1388 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1389 return NULL; 1390 } 1391 } else { 1392 u8 *new_data; 1393 dma_addr_t new_mapping; 1394 1395 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1396 if (!new_data) { 1397 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1398 return NULL; 1399 } 1400 1401 tpa_info->data = new_data; 1402 tpa_info->data_ptr = new_data + bp->rx_offset; 1403 tpa_info->mapping = new_mapping; 1404 1405 skb = build_skb(data, 0); 1406 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1407 bp->rx_buf_use_size, bp->rx_dir, 1408 DMA_ATTR_WEAK_ORDERING); 1409 1410 if (!skb) { 1411 kfree(data); 1412 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1413 return NULL; 1414 } 1415 skb_reserve(skb, bp->rx_offset); 1416 skb_put(skb, len); 1417 } 1418 1419 if (agg_bufs) { 1420 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1421 if (!skb) { 1422 /* Page reuse already handled by bnxt_rx_pages(). */ 1423 return NULL; 1424 } 1425 } 1426 1427 skb->protocol = 1428 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1429 1430 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1431 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1432 1433 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1434 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1435 u16 vlan_proto = tpa_info->metadata >> 1436 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1437 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1438 1439 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1440 } 1441 1442 skb_checksum_none_assert(skb); 1443 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1444 skb->ip_summed = CHECKSUM_UNNECESSARY; 1445 skb->csum_level = 1446 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1447 } 1448 1449 if (TPA_END_GRO(tpa_end)) 1450 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1451 1452 return skb; 1453 } 1454 1455 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1456 struct sk_buff *skb) 1457 { 1458 if (skb->dev != bp->dev) { 1459 /* this packet belongs to a vf-rep */ 1460 bnxt_vf_rep_rx(bp, skb); 1461 return; 1462 } 1463 skb_record_rx_queue(skb, bnapi->index); 1464 napi_gro_receive(&bnapi->napi, skb); 1465 } 1466 1467 /* returns the following: 1468 * 1 - 1 packet successfully received 1469 * 0 - successful TPA_START, packet not completed yet 1470 * -EBUSY - completion ring does not have all the agg buffers yet 1471 * -ENOMEM - packet aborted due to out of memory 1472 * -EIO - packet aborted due to hw error indicated in BD 1473 */ 1474 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, 1475 u8 *event) 1476 { 1477 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1478 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1479 struct net_device *dev = bp->dev; 1480 struct rx_cmp *rxcmp; 1481 struct rx_cmp_ext *rxcmp1; 1482 u32 tmp_raw_cons = *raw_cons; 1483 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1484 struct bnxt_sw_rx_bd *rx_buf; 1485 unsigned int len; 1486 u8 *data_ptr, agg_bufs, cmp_type; 1487 dma_addr_t dma_addr; 1488 struct sk_buff *skb; 1489 void *data; 1490 int rc = 0; 1491 u32 misc; 1492 1493 rxcmp = (struct rx_cmp *) 1494 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1495 1496 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1497 cp_cons = RING_CMP(tmp_raw_cons); 1498 rxcmp1 = (struct rx_cmp_ext *) 1499 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1500 1501 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1502 return -EBUSY; 1503 1504 cmp_type = RX_CMP_TYPE(rxcmp); 1505 1506 prod = rxr->rx_prod; 1507 1508 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1509 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1510 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1511 1512 *event |= BNXT_RX_EVENT; 1513 goto next_rx_no_prod; 1514 1515 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1516 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, 1517 (struct rx_tpa_end_cmp *)rxcmp, 1518 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1519 1520 if (IS_ERR(skb)) 1521 return -EBUSY; 1522 1523 rc = -ENOMEM; 1524 if (likely(skb)) { 1525 bnxt_deliver_skb(bp, bnapi, skb); 1526 rc = 1; 1527 } 1528 *event |= BNXT_RX_EVENT; 1529 goto next_rx_no_prod; 1530 } 1531 1532 cons = rxcmp->rx_cmp_opaque; 1533 rx_buf = &rxr->rx_buf_ring[cons]; 1534 data = rx_buf->data; 1535 data_ptr = rx_buf->data_ptr; 1536 if (unlikely(cons != rxr->rx_next_cons)) { 1537 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); 1538 1539 bnxt_sched_reset(bp, rxr); 1540 return rc1; 1541 } 1542 prefetch(data_ptr); 1543 1544 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1545 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1546 1547 if (agg_bufs) { 1548 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1549 return -EBUSY; 1550 1551 cp_cons = NEXT_CMP(cp_cons); 1552 *event |= BNXT_AGG_EVENT; 1553 } 1554 *event |= BNXT_RX_EVENT; 1555 1556 rx_buf->data = NULL; 1557 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1558 bnxt_reuse_rx_data(rxr, cons, data); 1559 if (agg_bufs) 1560 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1561 1562 rc = -EIO; 1563 goto next_rx; 1564 } 1565 1566 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1567 dma_addr = rx_buf->mapping; 1568 1569 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1570 rc = 1; 1571 goto next_rx; 1572 } 1573 1574 if (len <= bp->rx_copy_thresh) { 1575 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1576 bnxt_reuse_rx_data(rxr, cons, data); 1577 if (!skb) { 1578 rc = -ENOMEM; 1579 goto next_rx; 1580 } 1581 } else { 1582 u32 payload; 1583 1584 if (rx_buf->data_ptr == data_ptr) 1585 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1586 else 1587 payload = 0; 1588 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1589 payload | len); 1590 if (!skb) { 1591 rc = -ENOMEM; 1592 goto next_rx; 1593 } 1594 } 1595 1596 if (agg_bufs) { 1597 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1598 if (!skb) { 1599 rc = -ENOMEM; 1600 goto next_rx; 1601 } 1602 } 1603 1604 if (RX_CMP_HASH_VALID(rxcmp)) { 1605 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1606 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1607 1608 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1609 if (hash_type != 1 && hash_type != 3) 1610 type = PKT_HASH_TYPE_L3; 1611 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1612 } 1613 1614 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1615 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1616 1617 if ((rxcmp1->rx_cmp_flags2 & 1618 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1619 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1620 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1621 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1622 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1623 1624 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1625 } 1626 1627 skb_checksum_none_assert(skb); 1628 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1629 if (dev->features & NETIF_F_RXCSUM) { 1630 skb->ip_summed = CHECKSUM_UNNECESSARY; 1631 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1632 } 1633 } else { 1634 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1635 if (dev->features & NETIF_F_RXCSUM) 1636 cpr->rx_l4_csum_errors++; 1637 } 1638 } 1639 1640 bnxt_deliver_skb(bp, bnapi, skb); 1641 rc = 1; 1642 1643 next_rx: 1644 rxr->rx_prod = NEXT_RX(prod); 1645 rxr->rx_next_cons = NEXT_RX(cons); 1646 1647 next_rx_no_prod: 1648 *raw_cons = tmp_raw_cons; 1649 1650 return rc; 1651 } 1652 1653 /* In netpoll mode, if we are using a combined completion ring, we need to 1654 * discard the rx packets and recycle the buffers. 1655 */ 1656 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, 1657 u32 *raw_cons, u8 *event) 1658 { 1659 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1660 u32 tmp_raw_cons = *raw_cons; 1661 struct rx_cmp_ext *rxcmp1; 1662 struct rx_cmp *rxcmp; 1663 u16 cp_cons; 1664 u8 cmp_type; 1665 1666 cp_cons = RING_CMP(tmp_raw_cons); 1667 rxcmp = (struct rx_cmp *) 1668 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1669 1670 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1671 cp_cons = RING_CMP(tmp_raw_cons); 1672 rxcmp1 = (struct rx_cmp_ext *) 1673 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1674 1675 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1676 return -EBUSY; 1677 1678 cmp_type = RX_CMP_TYPE(rxcmp); 1679 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1680 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1681 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1682 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1683 struct rx_tpa_end_cmp_ext *tpa_end1; 1684 1685 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1686 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1687 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1688 } 1689 return bnxt_rx_pkt(bp, bnapi, raw_cons, event); 1690 } 1691 1692 #define BNXT_GET_EVENT_PORT(data) \ 1693 ((data) & \ 1694 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1695 1696 static int bnxt_async_event_process(struct bnxt *bp, 1697 struct hwrm_async_event_cmpl *cmpl) 1698 { 1699 u16 event_id = le16_to_cpu(cmpl->event_id); 1700 1701 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1702 switch (event_id) { 1703 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1704 u32 data1 = le32_to_cpu(cmpl->event_data1); 1705 struct bnxt_link_info *link_info = &bp->link_info; 1706 1707 if (BNXT_VF(bp)) 1708 goto async_event_process_exit; 1709 1710 /* print unsupported speed warning in forced speed mode only */ 1711 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1712 (data1 & 0x20000)) { 1713 u16 fw_speed = link_info->force_link_speed; 1714 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1715 1716 if (speed != SPEED_UNKNOWN) 1717 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1718 speed); 1719 } 1720 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1721 /* fall thru */ 1722 } 1723 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1724 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1725 break; 1726 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1727 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1728 break; 1729 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1730 u32 data1 = le32_to_cpu(cmpl->event_data1); 1731 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1732 1733 if (BNXT_VF(bp)) 1734 break; 1735 1736 if (bp->pf.port_id != port_id) 1737 break; 1738 1739 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1740 break; 1741 } 1742 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1743 if (BNXT_PF(bp)) 1744 goto async_event_process_exit; 1745 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1746 break; 1747 default: 1748 goto async_event_process_exit; 1749 } 1750 bnxt_queue_sp_work(bp); 1751 async_event_process_exit: 1752 bnxt_ulp_async_events(bp, cmpl); 1753 return 0; 1754 } 1755 1756 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 1757 { 1758 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 1759 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 1760 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 1761 (struct hwrm_fwd_req_cmpl *)txcmp; 1762 1763 switch (cmpl_type) { 1764 case CMPL_BASE_TYPE_HWRM_DONE: 1765 seq_id = le16_to_cpu(h_cmpl->sequence_id); 1766 if (seq_id == bp->hwrm_intr_seq_id) 1767 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; 1768 else 1769 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 1770 break; 1771 1772 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 1773 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 1774 1775 if ((vf_id < bp->pf.first_vf_id) || 1776 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 1777 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 1778 vf_id); 1779 return -EINVAL; 1780 } 1781 1782 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1783 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1784 bnxt_queue_sp_work(bp); 1785 break; 1786 1787 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1788 bnxt_async_event_process(bp, 1789 (struct hwrm_async_event_cmpl *)txcmp); 1790 1791 default: 1792 break; 1793 } 1794 1795 return 0; 1796 } 1797 1798 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 1799 { 1800 struct bnxt_napi *bnapi = dev_instance; 1801 struct bnxt *bp = bnapi->bp; 1802 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1803 u32 cons = RING_CMP(cpr->cp_raw_cons); 1804 1805 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1806 napi_schedule(&bnapi->napi); 1807 return IRQ_HANDLED; 1808 } 1809 1810 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 1811 { 1812 u32 raw_cons = cpr->cp_raw_cons; 1813 u16 cons = RING_CMP(raw_cons); 1814 struct tx_cmp *txcmp; 1815 1816 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1817 1818 return TX_CMP_VALID(txcmp, raw_cons); 1819 } 1820 1821 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1822 { 1823 struct bnxt_napi *bnapi = dev_instance; 1824 struct bnxt *bp = bnapi->bp; 1825 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1826 u32 cons = RING_CMP(cpr->cp_raw_cons); 1827 u32 int_status; 1828 1829 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1830 1831 if (!bnxt_has_work(bp, cpr)) { 1832 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1833 /* return if erroneous interrupt */ 1834 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1835 return IRQ_NONE; 1836 } 1837 1838 /* disable ring IRQ */ 1839 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); 1840 1841 /* Return here if interrupt is shared and is disabled. */ 1842 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 1843 return IRQ_HANDLED; 1844 1845 napi_schedule(&bnapi->napi); 1846 return IRQ_HANDLED; 1847 } 1848 1849 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 1850 { 1851 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1852 u32 raw_cons = cpr->cp_raw_cons; 1853 u32 cons; 1854 int tx_pkts = 0; 1855 int rx_pkts = 0; 1856 u8 event = 0; 1857 struct tx_cmp *txcmp; 1858 1859 while (1) { 1860 int rc; 1861 1862 cons = RING_CMP(raw_cons); 1863 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1864 1865 if (!TX_CMP_VALID(txcmp, raw_cons)) 1866 break; 1867 1868 /* The valid test of the entry must be done first before 1869 * reading any further. 1870 */ 1871 dma_rmb(); 1872 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1873 tx_pkts++; 1874 /* return full budget so NAPI will complete. */ 1875 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1876 rx_pkts = budget; 1877 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1878 if (likely(budget)) 1879 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1880 else 1881 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, 1882 &event); 1883 if (likely(rc >= 0)) 1884 rx_pkts += rc; 1885 /* Increment rx_pkts when rc is -ENOMEM to count towards 1886 * the NAPI budget. Otherwise, we may potentially loop 1887 * here forever if we consistently cannot allocate 1888 * buffers. 1889 */ 1890 else if (rc == -ENOMEM && budget) 1891 rx_pkts++; 1892 else if (rc == -EBUSY) /* partial completion */ 1893 break; 1894 } else if (unlikely((TX_CMP_TYPE(txcmp) == 1895 CMPL_BASE_TYPE_HWRM_DONE) || 1896 (TX_CMP_TYPE(txcmp) == 1897 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 1898 (TX_CMP_TYPE(txcmp) == 1899 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 1900 bnxt_hwrm_handler(bp, txcmp); 1901 } 1902 raw_cons = NEXT_RAW_CMP(raw_cons); 1903 1904 if (rx_pkts == budget) 1905 break; 1906 } 1907 1908 if (event & BNXT_TX_EVENT) { 1909 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 1910 void __iomem *db = txr->tx_doorbell; 1911 u16 prod = txr->tx_prod; 1912 1913 /* Sync BD data before updating doorbell */ 1914 wmb(); 1915 1916 bnxt_db_write(bp, db, DB_KEY_TX | prod); 1917 } 1918 1919 cpr->cp_raw_cons = raw_cons; 1920 /* ACK completion ring before freeing tx ring and producing new 1921 * buffers in rx/agg rings to prevent overflowing the completion 1922 * ring. 1923 */ 1924 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1925 1926 if (tx_pkts) 1927 bnapi->tx_int(bp, bnapi, tx_pkts); 1928 1929 if (event & BNXT_RX_EVENT) { 1930 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1931 1932 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); 1933 if (event & BNXT_AGG_EVENT) 1934 bnxt_db_write(bp, rxr->rx_agg_doorbell, 1935 DB_KEY_RX | rxr->rx_agg_prod); 1936 } 1937 return rx_pkts; 1938 } 1939 1940 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 1941 { 1942 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1943 struct bnxt *bp = bnapi->bp; 1944 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1945 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1946 struct tx_cmp *txcmp; 1947 struct rx_cmp_ext *rxcmp1; 1948 u32 cp_cons, tmp_raw_cons; 1949 u32 raw_cons = cpr->cp_raw_cons; 1950 u32 rx_pkts = 0; 1951 u8 event = 0; 1952 1953 while (1) { 1954 int rc; 1955 1956 cp_cons = RING_CMP(raw_cons); 1957 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1958 1959 if (!TX_CMP_VALID(txcmp, raw_cons)) 1960 break; 1961 1962 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1963 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 1964 cp_cons = RING_CMP(tmp_raw_cons); 1965 rxcmp1 = (struct rx_cmp_ext *) 1966 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1967 1968 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1969 break; 1970 1971 /* force an error to recycle the buffer */ 1972 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1973 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1974 1975 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1976 if (likely(rc == -EIO) && budget) 1977 rx_pkts++; 1978 else if (rc == -EBUSY) /* partial completion */ 1979 break; 1980 } else if (unlikely(TX_CMP_TYPE(txcmp) == 1981 CMPL_BASE_TYPE_HWRM_DONE)) { 1982 bnxt_hwrm_handler(bp, txcmp); 1983 } else { 1984 netdev_err(bp->dev, 1985 "Invalid completion received on special ring\n"); 1986 } 1987 raw_cons = NEXT_RAW_CMP(raw_cons); 1988 1989 if (rx_pkts == budget) 1990 break; 1991 } 1992 1993 cpr->cp_raw_cons = raw_cons; 1994 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1995 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); 1996 1997 if (event & BNXT_AGG_EVENT) 1998 bnxt_db_write(bp, rxr->rx_agg_doorbell, 1999 DB_KEY_RX | rxr->rx_agg_prod); 2000 2001 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2002 napi_complete_done(napi, rx_pkts); 2003 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 2004 } 2005 return rx_pkts; 2006 } 2007 2008 static int bnxt_poll(struct napi_struct *napi, int budget) 2009 { 2010 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2011 struct bnxt *bp = bnapi->bp; 2012 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2013 int work_done = 0; 2014 2015 while (1) { 2016 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 2017 2018 if (work_done >= budget) 2019 break; 2020 2021 if (!bnxt_has_work(bp, cpr)) { 2022 if (napi_complete_done(napi, work_done)) 2023 BNXT_CP_DB_REARM(cpr->cp_doorbell, 2024 cpr->cp_raw_cons); 2025 break; 2026 } 2027 } 2028 mmiowb(); 2029 return work_done; 2030 } 2031 2032 static void bnxt_free_tx_skbs(struct bnxt *bp) 2033 { 2034 int i, max_idx; 2035 struct pci_dev *pdev = bp->pdev; 2036 2037 if (!bp->tx_ring) 2038 return; 2039 2040 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2041 for (i = 0; i < bp->tx_nr_rings; i++) { 2042 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2043 int j; 2044 2045 for (j = 0; j < max_idx;) { 2046 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2047 struct sk_buff *skb = tx_buf->skb; 2048 int k, last; 2049 2050 if (!skb) { 2051 j++; 2052 continue; 2053 } 2054 2055 tx_buf->skb = NULL; 2056 2057 if (tx_buf->is_push) { 2058 dev_kfree_skb(skb); 2059 j += 2; 2060 continue; 2061 } 2062 2063 dma_unmap_single(&pdev->dev, 2064 dma_unmap_addr(tx_buf, mapping), 2065 skb_headlen(skb), 2066 PCI_DMA_TODEVICE); 2067 2068 last = tx_buf->nr_frags; 2069 j += 2; 2070 for (k = 0; k < last; k++, j++) { 2071 int ring_idx = j & bp->tx_ring_mask; 2072 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2073 2074 tx_buf = &txr->tx_buf_ring[ring_idx]; 2075 dma_unmap_page( 2076 &pdev->dev, 2077 dma_unmap_addr(tx_buf, mapping), 2078 skb_frag_size(frag), PCI_DMA_TODEVICE); 2079 } 2080 dev_kfree_skb(skb); 2081 } 2082 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2083 } 2084 } 2085 2086 static void bnxt_free_rx_skbs(struct bnxt *bp) 2087 { 2088 int i, max_idx, max_agg_idx; 2089 struct pci_dev *pdev = bp->pdev; 2090 2091 if (!bp->rx_ring) 2092 return; 2093 2094 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2095 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2096 for (i = 0; i < bp->rx_nr_rings; i++) { 2097 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2098 int j; 2099 2100 if (rxr->rx_tpa) { 2101 for (j = 0; j < MAX_TPA; j++) { 2102 struct bnxt_tpa_info *tpa_info = 2103 &rxr->rx_tpa[j]; 2104 u8 *data = tpa_info->data; 2105 2106 if (!data) 2107 continue; 2108 2109 dma_unmap_single_attrs(&pdev->dev, 2110 tpa_info->mapping, 2111 bp->rx_buf_use_size, 2112 bp->rx_dir, 2113 DMA_ATTR_WEAK_ORDERING); 2114 2115 tpa_info->data = NULL; 2116 2117 kfree(data); 2118 } 2119 } 2120 2121 for (j = 0; j < max_idx; j++) { 2122 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2123 dma_addr_t mapping = rx_buf->mapping; 2124 void *data = rx_buf->data; 2125 2126 if (!data) 2127 continue; 2128 2129 rx_buf->data = NULL; 2130 2131 if (BNXT_RX_PAGE_MODE(bp)) { 2132 mapping -= bp->rx_dma_offset; 2133 dma_unmap_page_attrs(&pdev->dev, mapping, 2134 PAGE_SIZE, bp->rx_dir, 2135 DMA_ATTR_WEAK_ORDERING); 2136 __free_page(data); 2137 } else { 2138 dma_unmap_single_attrs(&pdev->dev, mapping, 2139 bp->rx_buf_use_size, 2140 bp->rx_dir, 2141 DMA_ATTR_WEAK_ORDERING); 2142 kfree(data); 2143 } 2144 } 2145 2146 for (j = 0; j < max_agg_idx; j++) { 2147 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2148 &rxr->rx_agg_ring[j]; 2149 struct page *page = rx_agg_buf->page; 2150 2151 if (!page) 2152 continue; 2153 2154 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2155 BNXT_RX_PAGE_SIZE, 2156 PCI_DMA_FROMDEVICE, 2157 DMA_ATTR_WEAK_ORDERING); 2158 2159 rx_agg_buf->page = NULL; 2160 __clear_bit(j, rxr->rx_agg_bmap); 2161 2162 __free_page(page); 2163 } 2164 if (rxr->rx_page) { 2165 __free_page(rxr->rx_page); 2166 rxr->rx_page = NULL; 2167 } 2168 } 2169 } 2170 2171 static void bnxt_free_skbs(struct bnxt *bp) 2172 { 2173 bnxt_free_tx_skbs(bp); 2174 bnxt_free_rx_skbs(bp); 2175 } 2176 2177 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2178 { 2179 struct pci_dev *pdev = bp->pdev; 2180 int i; 2181 2182 for (i = 0; i < ring->nr_pages; i++) { 2183 if (!ring->pg_arr[i]) 2184 continue; 2185 2186 dma_free_coherent(&pdev->dev, ring->page_size, 2187 ring->pg_arr[i], ring->dma_arr[i]); 2188 2189 ring->pg_arr[i] = NULL; 2190 } 2191 if (ring->pg_tbl) { 2192 dma_free_coherent(&pdev->dev, ring->nr_pages * 8, 2193 ring->pg_tbl, ring->pg_tbl_map); 2194 ring->pg_tbl = NULL; 2195 } 2196 if (ring->vmem_size && *ring->vmem) { 2197 vfree(*ring->vmem); 2198 *ring->vmem = NULL; 2199 } 2200 } 2201 2202 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2203 { 2204 int i; 2205 struct pci_dev *pdev = bp->pdev; 2206 2207 if (ring->nr_pages > 1) { 2208 ring->pg_tbl = dma_alloc_coherent(&pdev->dev, 2209 ring->nr_pages * 8, 2210 &ring->pg_tbl_map, 2211 GFP_KERNEL); 2212 if (!ring->pg_tbl) 2213 return -ENOMEM; 2214 } 2215 2216 for (i = 0; i < ring->nr_pages; i++) { 2217 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2218 ring->page_size, 2219 &ring->dma_arr[i], 2220 GFP_KERNEL); 2221 if (!ring->pg_arr[i]) 2222 return -ENOMEM; 2223 2224 if (ring->nr_pages > 1) 2225 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); 2226 } 2227 2228 if (ring->vmem_size) { 2229 *ring->vmem = vzalloc(ring->vmem_size); 2230 if (!(*ring->vmem)) 2231 return -ENOMEM; 2232 } 2233 return 0; 2234 } 2235 2236 static void bnxt_free_rx_rings(struct bnxt *bp) 2237 { 2238 int i; 2239 2240 if (!bp->rx_ring) 2241 return; 2242 2243 for (i = 0; i < bp->rx_nr_rings; i++) { 2244 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2245 struct bnxt_ring_struct *ring; 2246 2247 if (rxr->xdp_prog) 2248 bpf_prog_put(rxr->xdp_prog); 2249 2250 kfree(rxr->rx_tpa); 2251 rxr->rx_tpa = NULL; 2252 2253 kfree(rxr->rx_agg_bmap); 2254 rxr->rx_agg_bmap = NULL; 2255 2256 ring = &rxr->rx_ring_struct; 2257 bnxt_free_ring(bp, ring); 2258 2259 ring = &rxr->rx_agg_ring_struct; 2260 bnxt_free_ring(bp, ring); 2261 } 2262 } 2263 2264 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2265 { 2266 int i, rc, agg_rings = 0, tpa_rings = 0; 2267 2268 if (!bp->rx_ring) 2269 return -ENOMEM; 2270 2271 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2272 agg_rings = 1; 2273 2274 if (bp->flags & BNXT_FLAG_TPA) 2275 tpa_rings = 1; 2276 2277 for (i = 0; i < bp->rx_nr_rings; i++) { 2278 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2279 struct bnxt_ring_struct *ring; 2280 2281 ring = &rxr->rx_ring_struct; 2282 2283 rc = bnxt_alloc_ring(bp, ring); 2284 if (rc) 2285 return rc; 2286 2287 if (agg_rings) { 2288 u16 mem_size; 2289 2290 ring = &rxr->rx_agg_ring_struct; 2291 rc = bnxt_alloc_ring(bp, ring); 2292 if (rc) 2293 return rc; 2294 2295 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2296 mem_size = rxr->rx_agg_bmap_size / 8; 2297 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2298 if (!rxr->rx_agg_bmap) 2299 return -ENOMEM; 2300 2301 if (tpa_rings) { 2302 rxr->rx_tpa = kcalloc(MAX_TPA, 2303 sizeof(struct bnxt_tpa_info), 2304 GFP_KERNEL); 2305 if (!rxr->rx_tpa) 2306 return -ENOMEM; 2307 } 2308 } 2309 } 2310 return 0; 2311 } 2312 2313 static void bnxt_free_tx_rings(struct bnxt *bp) 2314 { 2315 int i; 2316 struct pci_dev *pdev = bp->pdev; 2317 2318 if (!bp->tx_ring) 2319 return; 2320 2321 for (i = 0; i < bp->tx_nr_rings; i++) { 2322 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2323 struct bnxt_ring_struct *ring; 2324 2325 if (txr->tx_push) { 2326 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2327 txr->tx_push, txr->tx_push_mapping); 2328 txr->tx_push = NULL; 2329 } 2330 2331 ring = &txr->tx_ring_struct; 2332 2333 bnxt_free_ring(bp, ring); 2334 } 2335 } 2336 2337 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2338 { 2339 int i, j, rc; 2340 struct pci_dev *pdev = bp->pdev; 2341 2342 bp->tx_push_size = 0; 2343 if (bp->tx_push_thresh) { 2344 int push_size; 2345 2346 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2347 bp->tx_push_thresh); 2348 2349 if (push_size > 256) { 2350 push_size = 0; 2351 bp->tx_push_thresh = 0; 2352 } 2353 2354 bp->tx_push_size = push_size; 2355 } 2356 2357 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2358 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2359 struct bnxt_ring_struct *ring; 2360 2361 ring = &txr->tx_ring_struct; 2362 2363 rc = bnxt_alloc_ring(bp, ring); 2364 if (rc) 2365 return rc; 2366 2367 if (bp->tx_push_size) { 2368 dma_addr_t mapping; 2369 2370 /* One pre-allocated DMA buffer to backup 2371 * TX push operation 2372 */ 2373 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2374 bp->tx_push_size, 2375 &txr->tx_push_mapping, 2376 GFP_KERNEL); 2377 2378 if (!txr->tx_push) 2379 return -ENOMEM; 2380 2381 mapping = txr->tx_push_mapping + 2382 sizeof(struct tx_push_bd); 2383 txr->data_mapping = cpu_to_le64(mapping); 2384 2385 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 2386 } 2387 ring->queue_id = bp->q_info[j].queue_id; 2388 if (i < bp->tx_nr_rings_xdp) 2389 continue; 2390 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2391 j++; 2392 } 2393 return 0; 2394 } 2395 2396 static void bnxt_free_cp_rings(struct bnxt *bp) 2397 { 2398 int i; 2399 2400 if (!bp->bnapi) 2401 return; 2402 2403 for (i = 0; i < bp->cp_nr_rings; i++) { 2404 struct bnxt_napi *bnapi = bp->bnapi[i]; 2405 struct bnxt_cp_ring_info *cpr; 2406 struct bnxt_ring_struct *ring; 2407 2408 if (!bnapi) 2409 continue; 2410 2411 cpr = &bnapi->cp_ring; 2412 ring = &cpr->cp_ring_struct; 2413 2414 bnxt_free_ring(bp, ring); 2415 } 2416 } 2417 2418 static int bnxt_alloc_cp_rings(struct bnxt *bp) 2419 { 2420 int i, rc; 2421 2422 for (i = 0; i < bp->cp_nr_rings; i++) { 2423 struct bnxt_napi *bnapi = bp->bnapi[i]; 2424 struct bnxt_cp_ring_info *cpr; 2425 struct bnxt_ring_struct *ring; 2426 2427 if (!bnapi) 2428 continue; 2429 2430 cpr = &bnapi->cp_ring; 2431 ring = &cpr->cp_ring_struct; 2432 2433 rc = bnxt_alloc_ring(bp, ring); 2434 if (rc) 2435 return rc; 2436 } 2437 return 0; 2438 } 2439 2440 static void bnxt_init_ring_struct(struct bnxt *bp) 2441 { 2442 int i; 2443 2444 for (i = 0; i < bp->cp_nr_rings; i++) { 2445 struct bnxt_napi *bnapi = bp->bnapi[i]; 2446 struct bnxt_cp_ring_info *cpr; 2447 struct bnxt_rx_ring_info *rxr; 2448 struct bnxt_tx_ring_info *txr; 2449 struct bnxt_ring_struct *ring; 2450 2451 if (!bnapi) 2452 continue; 2453 2454 cpr = &bnapi->cp_ring; 2455 ring = &cpr->cp_ring_struct; 2456 ring->nr_pages = bp->cp_nr_pages; 2457 ring->page_size = HW_CMPD_RING_SIZE; 2458 ring->pg_arr = (void **)cpr->cp_desc_ring; 2459 ring->dma_arr = cpr->cp_desc_mapping; 2460 ring->vmem_size = 0; 2461 2462 rxr = bnapi->rx_ring; 2463 if (!rxr) 2464 goto skip_rx; 2465 2466 ring = &rxr->rx_ring_struct; 2467 ring->nr_pages = bp->rx_nr_pages; 2468 ring->page_size = HW_RXBD_RING_SIZE; 2469 ring->pg_arr = (void **)rxr->rx_desc_ring; 2470 ring->dma_arr = rxr->rx_desc_mapping; 2471 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 2472 ring->vmem = (void **)&rxr->rx_buf_ring; 2473 2474 ring = &rxr->rx_agg_ring_struct; 2475 ring->nr_pages = bp->rx_agg_nr_pages; 2476 ring->page_size = HW_RXBD_RING_SIZE; 2477 ring->pg_arr = (void **)rxr->rx_agg_desc_ring; 2478 ring->dma_arr = rxr->rx_agg_desc_mapping; 2479 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 2480 ring->vmem = (void **)&rxr->rx_agg_ring; 2481 2482 skip_rx: 2483 txr = bnapi->tx_ring; 2484 if (!txr) 2485 continue; 2486 2487 ring = &txr->tx_ring_struct; 2488 ring->nr_pages = bp->tx_nr_pages; 2489 ring->page_size = HW_RXBD_RING_SIZE; 2490 ring->pg_arr = (void **)txr->tx_desc_ring; 2491 ring->dma_arr = txr->tx_desc_mapping; 2492 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 2493 ring->vmem = (void **)&txr->tx_buf_ring; 2494 } 2495 } 2496 2497 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 2498 { 2499 int i; 2500 u32 prod; 2501 struct rx_bd **rx_buf_ring; 2502 2503 rx_buf_ring = (struct rx_bd **)ring->pg_arr; 2504 for (i = 0, prod = 0; i < ring->nr_pages; i++) { 2505 int j; 2506 struct rx_bd *rxbd; 2507 2508 rxbd = rx_buf_ring[i]; 2509 if (!rxbd) 2510 continue; 2511 2512 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 2513 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 2514 rxbd->rx_bd_opaque = prod; 2515 } 2516 } 2517 } 2518 2519 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 2520 { 2521 struct net_device *dev = bp->dev; 2522 struct bnxt_rx_ring_info *rxr; 2523 struct bnxt_ring_struct *ring; 2524 u32 prod, type; 2525 int i; 2526 2527 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 2528 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 2529 2530 if (NET_IP_ALIGN == 2) 2531 type |= RX_BD_FLAGS_SOP; 2532 2533 rxr = &bp->rx_ring[ring_nr]; 2534 ring = &rxr->rx_ring_struct; 2535 bnxt_init_rxbd_pages(ring, type); 2536 2537 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 2538 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); 2539 if (IS_ERR(rxr->xdp_prog)) { 2540 int rc = PTR_ERR(rxr->xdp_prog); 2541 2542 rxr->xdp_prog = NULL; 2543 return rc; 2544 } 2545 } 2546 prod = rxr->rx_prod; 2547 for (i = 0; i < bp->rx_ring_size; i++) { 2548 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 2549 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 2550 ring_nr, i, bp->rx_ring_size); 2551 break; 2552 } 2553 prod = NEXT_RX(prod); 2554 } 2555 rxr->rx_prod = prod; 2556 ring->fw_ring_id = INVALID_HW_RING_ID; 2557 2558 ring = &rxr->rx_agg_ring_struct; 2559 ring->fw_ring_id = INVALID_HW_RING_ID; 2560 2561 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 2562 return 0; 2563 2564 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 2565 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2566 2567 bnxt_init_rxbd_pages(ring, type); 2568 2569 prod = rxr->rx_agg_prod; 2570 for (i = 0; i < bp->rx_agg_ring_size; i++) { 2571 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 2572 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 2573 ring_nr, i, bp->rx_ring_size); 2574 break; 2575 } 2576 prod = NEXT_RX_AGG(prod); 2577 } 2578 rxr->rx_agg_prod = prod; 2579 2580 if (bp->flags & BNXT_FLAG_TPA) { 2581 if (rxr->rx_tpa) { 2582 u8 *data; 2583 dma_addr_t mapping; 2584 2585 for (i = 0; i < MAX_TPA; i++) { 2586 data = __bnxt_alloc_rx_data(bp, &mapping, 2587 GFP_KERNEL); 2588 if (!data) 2589 return -ENOMEM; 2590 2591 rxr->rx_tpa[i].data = data; 2592 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 2593 rxr->rx_tpa[i].mapping = mapping; 2594 } 2595 } else { 2596 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 2597 return -ENOMEM; 2598 } 2599 } 2600 2601 return 0; 2602 } 2603 2604 static void bnxt_init_cp_rings(struct bnxt *bp) 2605 { 2606 int i; 2607 2608 for (i = 0; i < bp->cp_nr_rings; i++) { 2609 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 2610 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 2611 2612 ring->fw_ring_id = INVALID_HW_RING_ID; 2613 } 2614 } 2615 2616 static int bnxt_init_rx_rings(struct bnxt *bp) 2617 { 2618 int i, rc = 0; 2619 2620 if (BNXT_RX_PAGE_MODE(bp)) { 2621 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 2622 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 2623 } else { 2624 bp->rx_offset = BNXT_RX_OFFSET; 2625 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 2626 } 2627 2628 for (i = 0; i < bp->rx_nr_rings; i++) { 2629 rc = bnxt_init_one_rx_ring(bp, i); 2630 if (rc) 2631 break; 2632 } 2633 2634 return rc; 2635 } 2636 2637 static int bnxt_init_tx_rings(struct bnxt *bp) 2638 { 2639 u16 i; 2640 2641 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 2642 MAX_SKB_FRAGS + 1); 2643 2644 for (i = 0; i < bp->tx_nr_rings; i++) { 2645 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2646 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 2647 2648 ring->fw_ring_id = INVALID_HW_RING_ID; 2649 } 2650 2651 return 0; 2652 } 2653 2654 static void bnxt_free_ring_grps(struct bnxt *bp) 2655 { 2656 kfree(bp->grp_info); 2657 bp->grp_info = NULL; 2658 } 2659 2660 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 2661 { 2662 int i; 2663 2664 if (irq_re_init) { 2665 bp->grp_info = kcalloc(bp->cp_nr_rings, 2666 sizeof(struct bnxt_ring_grp_info), 2667 GFP_KERNEL); 2668 if (!bp->grp_info) 2669 return -ENOMEM; 2670 } 2671 for (i = 0; i < bp->cp_nr_rings; i++) { 2672 if (irq_re_init) 2673 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 2674 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 2675 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 2676 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 2677 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 2678 } 2679 return 0; 2680 } 2681 2682 static void bnxt_free_vnics(struct bnxt *bp) 2683 { 2684 kfree(bp->vnic_info); 2685 bp->vnic_info = NULL; 2686 bp->nr_vnics = 0; 2687 } 2688 2689 static int bnxt_alloc_vnics(struct bnxt *bp) 2690 { 2691 int num_vnics = 1; 2692 2693 #ifdef CONFIG_RFS_ACCEL 2694 if (bp->flags & BNXT_FLAG_RFS) 2695 num_vnics += bp->rx_nr_rings; 2696 #endif 2697 2698 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 2699 num_vnics++; 2700 2701 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 2702 GFP_KERNEL); 2703 if (!bp->vnic_info) 2704 return -ENOMEM; 2705 2706 bp->nr_vnics = num_vnics; 2707 return 0; 2708 } 2709 2710 static void bnxt_init_vnics(struct bnxt *bp) 2711 { 2712 int i; 2713 2714 for (i = 0; i < bp->nr_vnics; i++) { 2715 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2716 2717 vnic->fw_vnic_id = INVALID_HW_RING_ID; 2718 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 2719 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 2720 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 2721 2722 if (bp->vnic_info[i].rss_hash_key) { 2723 if (i == 0) 2724 prandom_bytes(vnic->rss_hash_key, 2725 HW_HASH_KEY_SIZE); 2726 else 2727 memcpy(vnic->rss_hash_key, 2728 bp->vnic_info[0].rss_hash_key, 2729 HW_HASH_KEY_SIZE); 2730 } 2731 } 2732 } 2733 2734 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 2735 { 2736 int pages; 2737 2738 pages = ring_size / desc_per_pg; 2739 2740 if (!pages) 2741 return 1; 2742 2743 pages++; 2744 2745 while (pages & (pages - 1)) 2746 pages++; 2747 2748 return pages; 2749 } 2750 2751 void bnxt_set_tpa_flags(struct bnxt *bp) 2752 { 2753 bp->flags &= ~BNXT_FLAG_TPA; 2754 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 2755 return; 2756 if (bp->dev->features & NETIF_F_LRO) 2757 bp->flags |= BNXT_FLAG_LRO; 2758 else if (bp->dev->features & NETIF_F_GRO_HW) 2759 bp->flags |= BNXT_FLAG_GRO; 2760 } 2761 2762 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 2763 * be set on entry. 2764 */ 2765 void bnxt_set_ring_params(struct bnxt *bp) 2766 { 2767 u32 ring_size, rx_size, rx_space; 2768 u32 agg_factor = 0, agg_ring_size = 0; 2769 2770 /* 8 for CRC and VLAN */ 2771 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 2772 2773 rx_space = rx_size + NET_SKB_PAD + 2774 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2775 2776 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 2777 ring_size = bp->rx_ring_size; 2778 bp->rx_agg_ring_size = 0; 2779 bp->rx_agg_nr_pages = 0; 2780 2781 if (bp->flags & BNXT_FLAG_TPA) 2782 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 2783 2784 bp->flags &= ~BNXT_FLAG_JUMBO; 2785 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 2786 u32 jumbo_factor; 2787 2788 bp->flags |= BNXT_FLAG_JUMBO; 2789 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 2790 if (jumbo_factor > agg_factor) 2791 agg_factor = jumbo_factor; 2792 } 2793 agg_ring_size = ring_size * agg_factor; 2794 2795 if (agg_ring_size) { 2796 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 2797 RX_DESC_CNT); 2798 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 2799 u32 tmp = agg_ring_size; 2800 2801 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 2802 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 2803 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 2804 tmp, agg_ring_size); 2805 } 2806 bp->rx_agg_ring_size = agg_ring_size; 2807 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 2808 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 2809 rx_space = rx_size + NET_SKB_PAD + 2810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2811 } 2812 2813 bp->rx_buf_use_size = rx_size; 2814 bp->rx_buf_size = rx_space; 2815 2816 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 2817 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 2818 2819 ring_size = bp->tx_ring_size; 2820 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 2821 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 2822 2823 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 2824 bp->cp_ring_size = ring_size; 2825 2826 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 2827 if (bp->cp_nr_pages > MAX_CP_PAGES) { 2828 bp->cp_nr_pages = MAX_CP_PAGES; 2829 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 2830 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 2831 ring_size, bp->cp_ring_size); 2832 } 2833 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 2834 bp->cp_ring_mask = bp->cp_bit - 1; 2835 } 2836 2837 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 2838 { 2839 if (page_mode) { 2840 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 2841 return -EOPNOTSUPP; 2842 bp->dev->max_mtu = 2843 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 2844 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 2845 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 2846 bp->rx_dir = DMA_BIDIRECTIONAL; 2847 bp->rx_skb_func = bnxt_rx_page_skb; 2848 /* Disable LRO or GRO_HW */ 2849 netdev_update_features(bp->dev); 2850 } else { 2851 bp->dev->max_mtu = bp->max_mtu; 2852 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 2853 bp->rx_dir = DMA_FROM_DEVICE; 2854 bp->rx_skb_func = bnxt_rx_skb; 2855 } 2856 return 0; 2857 } 2858 2859 static void bnxt_free_vnic_attributes(struct bnxt *bp) 2860 { 2861 int i; 2862 struct bnxt_vnic_info *vnic; 2863 struct pci_dev *pdev = bp->pdev; 2864 2865 if (!bp->vnic_info) 2866 return; 2867 2868 for (i = 0; i < bp->nr_vnics; i++) { 2869 vnic = &bp->vnic_info[i]; 2870 2871 kfree(vnic->fw_grp_ids); 2872 vnic->fw_grp_ids = NULL; 2873 2874 kfree(vnic->uc_list); 2875 vnic->uc_list = NULL; 2876 2877 if (vnic->mc_list) { 2878 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 2879 vnic->mc_list, vnic->mc_list_mapping); 2880 vnic->mc_list = NULL; 2881 } 2882 2883 if (vnic->rss_table) { 2884 dma_free_coherent(&pdev->dev, PAGE_SIZE, 2885 vnic->rss_table, 2886 vnic->rss_table_dma_addr); 2887 vnic->rss_table = NULL; 2888 } 2889 2890 vnic->rss_hash_key = NULL; 2891 vnic->flags = 0; 2892 } 2893 } 2894 2895 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 2896 { 2897 int i, rc = 0, size; 2898 struct bnxt_vnic_info *vnic; 2899 struct pci_dev *pdev = bp->pdev; 2900 int max_rings; 2901 2902 for (i = 0; i < bp->nr_vnics; i++) { 2903 vnic = &bp->vnic_info[i]; 2904 2905 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 2906 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 2907 2908 if (mem_size > 0) { 2909 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 2910 if (!vnic->uc_list) { 2911 rc = -ENOMEM; 2912 goto out; 2913 } 2914 } 2915 } 2916 2917 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 2918 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 2919 vnic->mc_list = 2920 dma_alloc_coherent(&pdev->dev, 2921 vnic->mc_list_size, 2922 &vnic->mc_list_mapping, 2923 GFP_KERNEL); 2924 if (!vnic->mc_list) { 2925 rc = -ENOMEM; 2926 goto out; 2927 } 2928 } 2929 2930 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 2931 max_rings = bp->rx_nr_rings; 2932 else 2933 max_rings = 1; 2934 2935 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 2936 if (!vnic->fw_grp_ids) { 2937 rc = -ENOMEM; 2938 goto out; 2939 } 2940 2941 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 2942 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 2943 continue; 2944 2945 /* Allocate rss table and hash key */ 2946 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2947 &vnic->rss_table_dma_addr, 2948 GFP_KERNEL); 2949 if (!vnic->rss_table) { 2950 rc = -ENOMEM; 2951 goto out; 2952 } 2953 2954 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 2955 2956 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 2957 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 2958 } 2959 return 0; 2960 2961 out: 2962 return rc; 2963 } 2964 2965 static void bnxt_free_hwrm_resources(struct bnxt *bp) 2966 { 2967 struct pci_dev *pdev = bp->pdev; 2968 2969 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 2970 bp->hwrm_cmd_resp_dma_addr); 2971 2972 bp->hwrm_cmd_resp_addr = NULL; 2973 if (bp->hwrm_dbg_resp_addr) { 2974 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, 2975 bp->hwrm_dbg_resp_addr, 2976 bp->hwrm_dbg_resp_dma_addr); 2977 2978 bp->hwrm_dbg_resp_addr = NULL; 2979 } 2980 } 2981 2982 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 2983 { 2984 struct pci_dev *pdev = bp->pdev; 2985 2986 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2987 &bp->hwrm_cmd_resp_dma_addr, 2988 GFP_KERNEL); 2989 if (!bp->hwrm_cmd_resp_addr) 2990 return -ENOMEM; 2991 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, 2992 HWRM_DBG_REG_BUF_SIZE, 2993 &bp->hwrm_dbg_resp_dma_addr, 2994 GFP_KERNEL); 2995 if (!bp->hwrm_dbg_resp_addr) 2996 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); 2997 2998 return 0; 2999 } 3000 3001 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3002 { 3003 if (bp->hwrm_short_cmd_req_addr) { 3004 struct pci_dev *pdev = bp->pdev; 3005 3006 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, 3007 bp->hwrm_short_cmd_req_addr, 3008 bp->hwrm_short_cmd_req_dma_addr); 3009 bp->hwrm_short_cmd_req_addr = NULL; 3010 } 3011 } 3012 3013 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3014 { 3015 struct pci_dev *pdev = bp->pdev; 3016 3017 bp->hwrm_short_cmd_req_addr = 3018 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, 3019 &bp->hwrm_short_cmd_req_dma_addr, 3020 GFP_KERNEL); 3021 if (!bp->hwrm_short_cmd_req_addr) 3022 return -ENOMEM; 3023 3024 return 0; 3025 } 3026 3027 static void bnxt_free_stats(struct bnxt *bp) 3028 { 3029 u32 size, i; 3030 struct pci_dev *pdev = bp->pdev; 3031 3032 if (bp->hw_rx_port_stats) { 3033 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3034 bp->hw_rx_port_stats, 3035 bp->hw_rx_port_stats_map); 3036 bp->hw_rx_port_stats = NULL; 3037 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3038 } 3039 3040 if (!bp->bnapi) 3041 return; 3042 3043 size = sizeof(struct ctx_hw_stats); 3044 3045 for (i = 0; i < bp->cp_nr_rings; i++) { 3046 struct bnxt_napi *bnapi = bp->bnapi[i]; 3047 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3048 3049 if (cpr->hw_stats) { 3050 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3051 cpr->hw_stats_map); 3052 cpr->hw_stats = NULL; 3053 } 3054 } 3055 } 3056 3057 static int bnxt_alloc_stats(struct bnxt *bp) 3058 { 3059 u32 size, i; 3060 struct pci_dev *pdev = bp->pdev; 3061 3062 size = sizeof(struct ctx_hw_stats); 3063 3064 for (i = 0; i < bp->cp_nr_rings; i++) { 3065 struct bnxt_napi *bnapi = bp->bnapi[i]; 3066 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3067 3068 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3069 &cpr->hw_stats_map, 3070 GFP_KERNEL); 3071 if (!cpr->hw_stats) 3072 return -ENOMEM; 3073 3074 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3075 } 3076 3077 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { 3078 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3079 sizeof(struct tx_port_stats) + 1024; 3080 3081 bp->hw_rx_port_stats = 3082 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3083 &bp->hw_rx_port_stats_map, 3084 GFP_KERNEL); 3085 if (!bp->hw_rx_port_stats) 3086 return -ENOMEM; 3087 3088 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 3089 512; 3090 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3091 sizeof(struct rx_port_stats) + 512; 3092 bp->flags |= BNXT_FLAG_PORT_STATS; 3093 } 3094 return 0; 3095 } 3096 3097 static void bnxt_clear_ring_indices(struct bnxt *bp) 3098 { 3099 int i; 3100 3101 if (!bp->bnapi) 3102 return; 3103 3104 for (i = 0; i < bp->cp_nr_rings; i++) { 3105 struct bnxt_napi *bnapi = bp->bnapi[i]; 3106 struct bnxt_cp_ring_info *cpr; 3107 struct bnxt_rx_ring_info *rxr; 3108 struct bnxt_tx_ring_info *txr; 3109 3110 if (!bnapi) 3111 continue; 3112 3113 cpr = &bnapi->cp_ring; 3114 cpr->cp_raw_cons = 0; 3115 3116 txr = bnapi->tx_ring; 3117 if (txr) { 3118 txr->tx_prod = 0; 3119 txr->tx_cons = 0; 3120 } 3121 3122 rxr = bnapi->rx_ring; 3123 if (rxr) { 3124 rxr->rx_prod = 0; 3125 rxr->rx_agg_prod = 0; 3126 rxr->rx_sw_agg_prod = 0; 3127 rxr->rx_next_cons = 0; 3128 } 3129 } 3130 } 3131 3132 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3133 { 3134 #ifdef CONFIG_RFS_ACCEL 3135 int i; 3136 3137 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3138 * safe to delete the hash table. 3139 */ 3140 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3141 struct hlist_head *head; 3142 struct hlist_node *tmp; 3143 struct bnxt_ntuple_filter *fltr; 3144 3145 head = &bp->ntp_fltr_hash_tbl[i]; 3146 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3147 hlist_del(&fltr->hash); 3148 kfree(fltr); 3149 } 3150 } 3151 if (irq_reinit) { 3152 kfree(bp->ntp_fltr_bmap); 3153 bp->ntp_fltr_bmap = NULL; 3154 } 3155 bp->ntp_fltr_count = 0; 3156 #endif 3157 } 3158 3159 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3160 { 3161 #ifdef CONFIG_RFS_ACCEL 3162 int i, rc = 0; 3163 3164 if (!(bp->flags & BNXT_FLAG_RFS)) 3165 return 0; 3166 3167 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3168 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3169 3170 bp->ntp_fltr_count = 0; 3171 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3172 sizeof(long), 3173 GFP_KERNEL); 3174 3175 if (!bp->ntp_fltr_bmap) 3176 rc = -ENOMEM; 3177 3178 return rc; 3179 #else 3180 return 0; 3181 #endif 3182 } 3183 3184 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3185 { 3186 bnxt_free_vnic_attributes(bp); 3187 bnxt_free_tx_rings(bp); 3188 bnxt_free_rx_rings(bp); 3189 bnxt_free_cp_rings(bp); 3190 bnxt_free_ntp_fltrs(bp, irq_re_init); 3191 if (irq_re_init) { 3192 bnxt_free_stats(bp); 3193 bnxt_free_ring_grps(bp); 3194 bnxt_free_vnics(bp); 3195 kfree(bp->tx_ring_map); 3196 bp->tx_ring_map = NULL; 3197 kfree(bp->tx_ring); 3198 bp->tx_ring = NULL; 3199 kfree(bp->rx_ring); 3200 bp->rx_ring = NULL; 3201 kfree(bp->bnapi); 3202 bp->bnapi = NULL; 3203 } else { 3204 bnxt_clear_ring_indices(bp); 3205 } 3206 } 3207 3208 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3209 { 3210 int i, j, rc, size, arr_size; 3211 void *bnapi; 3212 3213 if (irq_re_init) { 3214 /* Allocate bnapi mem pointer array and mem block for 3215 * all queues 3216 */ 3217 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3218 bp->cp_nr_rings); 3219 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3220 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3221 if (!bnapi) 3222 return -ENOMEM; 3223 3224 bp->bnapi = bnapi; 3225 bnapi += arr_size; 3226 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3227 bp->bnapi[i] = bnapi; 3228 bp->bnapi[i]->index = i; 3229 bp->bnapi[i]->bp = bp; 3230 } 3231 3232 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3233 sizeof(struct bnxt_rx_ring_info), 3234 GFP_KERNEL); 3235 if (!bp->rx_ring) 3236 return -ENOMEM; 3237 3238 for (i = 0; i < bp->rx_nr_rings; i++) { 3239 bp->rx_ring[i].bnapi = bp->bnapi[i]; 3240 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 3241 } 3242 3243 bp->tx_ring = kcalloc(bp->tx_nr_rings, 3244 sizeof(struct bnxt_tx_ring_info), 3245 GFP_KERNEL); 3246 if (!bp->tx_ring) 3247 return -ENOMEM; 3248 3249 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 3250 GFP_KERNEL); 3251 3252 if (!bp->tx_ring_map) 3253 return -ENOMEM; 3254 3255 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 3256 j = 0; 3257 else 3258 j = bp->rx_nr_rings; 3259 3260 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 3261 bp->tx_ring[i].bnapi = bp->bnapi[j]; 3262 bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; 3263 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 3264 if (i >= bp->tx_nr_rings_xdp) { 3265 bp->tx_ring[i].txq_index = i - 3266 bp->tx_nr_rings_xdp; 3267 bp->bnapi[j]->tx_int = bnxt_tx_int; 3268 } else { 3269 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 3270 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 3271 } 3272 } 3273 3274 rc = bnxt_alloc_stats(bp); 3275 if (rc) 3276 goto alloc_mem_err; 3277 3278 rc = bnxt_alloc_ntp_fltrs(bp); 3279 if (rc) 3280 goto alloc_mem_err; 3281 3282 rc = bnxt_alloc_vnics(bp); 3283 if (rc) 3284 goto alloc_mem_err; 3285 } 3286 3287 bnxt_init_ring_struct(bp); 3288 3289 rc = bnxt_alloc_rx_rings(bp); 3290 if (rc) 3291 goto alloc_mem_err; 3292 3293 rc = bnxt_alloc_tx_rings(bp); 3294 if (rc) 3295 goto alloc_mem_err; 3296 3297 rc = bnxt_alloc_cp_rings(bp); 3298 if (rc) 3299 goto alloc_mem_err; 3300 3301 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 3302 BNXT_VNIC_UCAST_FLAG; 3303 rc = bnxt_alloc_vnic_attributes(bp); 3304 if (rc) 3305 goto alloc_mem_err; 3306 return 0; 3307 3308 alloc_mem_err: 3309 bnxt_free_mem(bp, true); 3310 return rc; 3311 } 3312 3313 static void bnxt_disable_int(struct bnxt *bp) 3314 { 3315 int i; 3316 3317 if (!bp->bnapi) 3318 return; 3319 3320 for (i = 0; i < bp->cp_nr_rings; i++) { 3321 struct bnxt_napi *bnapi = bp->bnapi[i]; 3322 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3323 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3324 3325 if (ring->fw_ring_id != INVALID_HW_RING_ID) 3326 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3327 } 3328 } 3329 3330 static void bnxt_disable_int_sync(struct bnxt *bp) 3331 { 3332 int i; 3333 3334 atomic_inc(&bp->intr_sem); 3335 3336 bnxt_disable_int(bp); 3337 for (i = 0; i < bp->cp_nr_rings; i++) 3338 synchronize_irq(bp->irq_tbl[i].vector); 3339 } 3340 3341 static void bnxt_enable_int(struct bnxt *bp) 3342 { 3343 int i; 3344 3345 atomic_set(&bp->intr_sem, 0); 3346 for (i = 0; i < bp->cp_nr_rings; i++) { 3347 struct bnxt_napi *bnapi = bp->bnapi[i]; 3348 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3349 3350 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 3351 } 3352 } 3353 3354 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 3355 u16 cmpl_ring, u16 target_id) 3356 { 3357 struct input *req = request; 3358 3359 req->req_type = cpu_to_le16(req_type); 3360 req->cmpl_ring = cpu_to_le16(cmpl_ring); 3361 req->target_id = cpu_to_le16(target_id); 3362 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 3363 } 3364 3365 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 3366 int timeout, bool silent) 3367 { 3368 int i, intr_process, rc, tmo_count; 3369 struct input *req = msg; 3370 u32 *data = msg; 3371 __le32 *resp_len, *valid; 3372 u16 cp_ring_id, len = 0; 3373 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3374 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 3375 struct hwrm_short_input short_input = {0}; 3376 3377 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3378 memset(resp, 0, PAGE_SIZE); 3379 cp_ring_id = le16_to_cpu(req->cmpl_ring); 3380 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 3381 3382 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 3383 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 3384 3385 memcpy(short_cmd_req, req, msg_len); 3386 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - 3387 msg_len); 3388 3389 short_input.req_type = req->req_type; 3390 short_input.signature = 3391 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 3392 short_input.size = cpu_to_le16(msg_len); 3393 short_input.req_addr = 3394 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 3395 3396 data = (u32 *)&short_input; 3397 msg_len = sizeof(short_input); 3398 3399 /* Sync memory write before updating doorbell */ 3400 wmb(); 3401 3402 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 3403 } 3404 3405 /* Write request msg to hwrm channel */ 3406 __iowrite32_copy(bp->bar0, data, msg_len / 4); 3407 3408 for (i = msg_len; i < max_req_len; i += 4) 3409 writel(0, bp->bar0 + i); 3410 3411 /* currently supports only one outstanding message */ 3412 if (intr_process) 3413 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 3414 3415 /* Ring channel doorbell */ 3416 writel(1, bp->bar0 + 0x100); 3417 3418 if (!timeout) 3419 timeout = DFLT_HWRM_CMD_TIMEOUT; 3420 3421 i = 0; 3422 tmo_count = timeout * 40; 3423 if (intr_process) { 3424 /* Wait until hwrm response cmpl interrupt is processed */ 3425 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 3426 i++ < tmo_count) { 3427 usleep_range(25, 40); 3428 } 3429 3430 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 3431 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 3432 le16_to_cpu(req->req_type)); 3433 return -1; 3434 } 3435 } else { 3436 /* Check if response len is updated */ 3437 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 3438 for (i = 0; i < tmo_count; i++) { 3439 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 3440 HWRM_RESP_LEN_SFT; 3441 if (len) 3442 break; 3443 usleep_range(25, 40); 3444 } 3445 3446 if (i >= tmo_count) { 3447 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 3448 timeout, le16_to_cpu(req->req_type), 3449 le16_to_cpu(req->seq_id), len); 3450 return -1; 3451 } 3452 3453 /* Last word of resp contains valid bit */ 3454 valid = bp->hwrm_cmd_resp_addr + len - 4; 3455 for (i = 0; i < 5; i++) { 3456 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) 3457 break; 3458 udelay(1); 3459 } 3460 3461 if (i >= 5) { 3462 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 3463 timeout, le16_to_cpu(req->req_type), 3464 le16_to_cpu(req->seq_id), len, *valid); 3465 return -1; 3466 } 3467 } 3468 3469 rc = le16_to_cpu(resp->error_code); 3470 if (rc && !silent) 3471 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 3472 le16_to_cpu(resp->req_type), 3473 le16_to_cpu(resp->seq_id), rc); 3474 return rc; 3475 } 3476 3477 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3478 { 3479 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3480 } 3481 3482 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3483 int timeout) 3484 { 3485 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3486 } 3487 3488 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3489 { 3490 int rc; 3491 3492 mutex_lock(&bp->hwrm_cmd_lock); 3493 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 3494 mutex_unlock(&bp->hwrm_cmd_lock); 3495 return rc; 3496 } 3497 3498 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3499 int timeout) 3500 { 3501 int rc; 3502 3503 mutex_lock(&bp->hwrm_cmd_lock); 3504 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3505 mutex_unlock(&bp->hwrm_cmd_lock); 3506 return rc; 3507 } 3508 3509 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 3510 int bmap_size) 3511 { 3512 struct hwrm_func_drv_rgtr_input req = {0}; 3513 DECLARE_BITMAP(async_events_bmap, 256); 3514 u32 *events = (u32 *)async_events_bmap; 3515 int i; 3516 3517 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3518 3519 req.enables = 3520 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 3521 3522 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 3523 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) 3524 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 3525 3526 if (bmap && bmap_size) { 3527 for (i = 0; i < bmap_size; i++) { 3528 if (test_bit(i, bmap)) 3529 __set_bit(i, async_events_bmap); 3530 } 3531 } 3532 3533 for (i = 0; i < 8; i++) 3534 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 3535 3536 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3537 } 3538 3539 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 3540 { 3541 struct hwrm_func_drv_rgtr_input req = {0}; 3542 3543 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3544 3545 req.enables = 3546 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 3547 FUNC_DRV_RGTR_REQ_ENABLES_VER); 3548 3549 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 3550 req.ver_maj = DRV_VER_MAJ; 3551 req.ver_min = DRV_VER_MIN; 3552 req.ver_upd = DRV_VER_UPD; 3553 3554 if (BNXT_PF(bp)) { 3555 u32 data[8]; 3556 int i; 3557 3558 memset(data, 0, sizeof(data)); 3559 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 3560 u16 cmd = bnxt_vf_req_snif[i]; 3561 unsigned int bit, idx; 3562 3563 idx = cmd / 32; 3564 bit = cmd % 32; 3565 data[idx] |= 1 << bit; 3566 } 3567 3568 for (i = 0; i < 8; i++) 3569 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 3570 3571 req.enables |= 3572 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 3573 } 3574 3575 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3576 } 3577 3578 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 3579 { 3580 struct hwrm_func_drv_unrgtr_input req = {0}; 3581 3582 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 3583 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3584 } 3585 3586 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 3587 { 3588 u32 rc = 0; 3589 struct hwrm_tunnel_dst_port_free_input req = {0}; 3590 3591 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 3592 req.tunnel_type = tunnel_type; 3593 3594 switch (tunnel_type) { 3595 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 3596 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 3597 break; 3598 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 3599 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 3600 break; 3601 default: 3602 break; 3603 } 3604 3605 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3606 if (rc) 3607 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 3608 rc); 3609 return rc; 3610 } 3611 3612 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 3613 u8 tunnel_type) 3614 { 3615 u32 rc = 0; 3616 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 3617 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3618 3619 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 3620 3621 req.tunnel_type = tunnel_type; 3622 req.tunnel_dst_port_val = port; 3623 3624 mutex_lock(&bp->hwrm_cmd_lock); 3625 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3626 if (rc) { 3627 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 3628 rc); 3629 goto err_out; 3630 } 3631 3632 switch (tunnel_type) { 3633 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 3634 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3635 break; 3636 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 3637 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3638 break; 3639 default: 3640 break; 3641 } 3642 3643 err_out: 3644 mutex_unlock(&bp->hwrm_cmd_lock); 3645 return rc; 3646 } 3647 3648 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 3649 { 3650 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 3651 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3652 3653 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 3654 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3655 3656 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 3657 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 3658 req.mask = cpu_to_le32(vnic->rx_mask); 3659 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3660 } 3661 3662 #ifdef CONFIG_RFS_ACCEL 3663 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 3664 struct bnxt_ntuple_filter *fltr) 3665 { 3666 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 3667 3668 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 3669 req.ntuple_filter_id = fltr->filter_id; 3670 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3671 } 3672 3673 #define BNXT_NTP_FLTR_FLAGS \ 3674 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 3675 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 3676 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 3677 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 3678 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 3679 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 3680 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 3681 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 3682 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 3683 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 3684 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 3685 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 3686 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 3687 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 3688 3689 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 3690 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 3691 3692 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 3693 struct bnxt_ntuple_filter *fltr) 3694 { 3695 int rc = 0; 3696 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 3697 struct hwrm_cfa_ntuple_filter_alloc_output *resp = 3698 bp->hwrm_cmd_resp_addr; 3699 struct flow_keys *keys = &fltr->fkeys; 3700 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; 3701 3702 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 3703 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 3704 3705 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 3706 3707 req.ethertype = htons(ETH_P_IP); 3708 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 3709 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 3710 req.ip_protocol = keys->basic.ip_proto; 3711 3712 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 3713 int i; 3714 3715 req.ethertype = htons(ETH_P_IPV6); 3716 req.ip_addr_type = 3717 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 3718 *(struct in6_addr *)&req.src_ipaddr[0] = 3719 keys->addrs.v6addrs.src; 3720 *(struct in6_addr *)&req.dst_ipaddr[0] = 3721 keys->addrs.v6addrs.dst; 3722 for (i = 0; i < 4; i++) { 3723 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3724 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3725 } 3726 } else { 3727 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 3728 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3729 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 3730 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3731 } 3732 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 3733 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 3734 req.tunnel_type = 3735 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 3736 } 3737 3738 req.src_port = keys->ports.src; 3739 req.src_port_mask = cpu_to_be16(0xffff); 3740 req.dst_port = keys->ports.dst; 3741 req.dst_port_mask = cpu_to_be16(0xffff); 3742 3743 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 3744 mutex_lock(&bp->hwrm_cmd_lock); 3745 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3746 if (!rc) 3747 fltr->filter_id = resp->ntuple_filter_id; 3748 mutex_unlock(&bp->hwrm_cmd_lock); 3749 return rc; 3750 } 3751 #endif 3752 3753 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 3754 u8 *mac_addr) 3755 { 3756 u32 rc = 0; 3757 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 3758 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3759 3760 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 3761 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 3762 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 3763 req.flags |= 3764 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 3765 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 3766 req.enables = 3767 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 3768 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 3769 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 3770 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 3771 req.l2_addr_mask[0] = 0xff; 3772 req.l2_addr_mask[1] = 0xff; 3773 req.l2_addr_mask[2] = 0xff; 3774 req.l2_addr_mask[3] = 0xff; 3775 req.l2_addr_mask[4] = 0xff; 3776 req.l2_addr_mask[5] = 0xff; 3777 3778 mutex_lock(&bp->hwrm_cmd_lock); 3779 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3780 if (!rc) 3781 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 3782 resp->l2_filter_id; 3783 mutex_unlock(&bp->hwrm_cmd_lock); 3784 return rc; 3785 } 3786 3787 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 3788 { 3789 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 3790 int rc = 0; 3791 3792 /* Any associated ntuple filters will also be cleared by firmware. */ 3793 mutex_lock(&bp->hwrm_cmd_lock); 3794 for (i = 0; i < num_of_vnics; i++) { 3795 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3796 3797 for (j = 0; j < vnic->uc_filter_count; j++) { 3798 struct hwrm_cfa_l2_filter_free_input req = {0}; 3799 3800 bnxt_hwrm_cmd_hdr_init(bp, &req, 3801 HWRM_CFA_L2_FILTER_FREE, -1, -1); 3802 3803 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 3804 3805 rc = _hwrm_send_message(bp, &req, sizeof(req), 3806 HWRM_CMD_TIMEOUT); 3807 } 3808 vnic->uc_filter_count = 0; 3809 } 3810 mutex_unlock(&bp->hwrm_cmd_lock); 3811 3812 return rc; 3813 } 3814 3815 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 3816 { 3817 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3818 struct hwrm_vnic_tpa_cfg_input req = {0}; 3819 3820 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3821 3822 if (tpa_flags) { 3823 u16 mss = bp->dev->mtu - 40; 3824 u32 nsegs, n, segs = 0, flags; 3825 3826 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 3827 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 3828 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 3829 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 3830 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 3831 if (tpa_flags & BNXT_FLAG_GRO) 3832 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 3833 3834 req.flags = cpu_to_le32(flags); 3835 3836 req.enables = 3837 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 3838 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 3839 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 3840 3841 /* Number of segs are log2 units, and first packet is not 3842 * included as part of this units. 3843 */ 3844 if (mss <= BNXT_RX_PAGE_SIZE) { 3845 n = BNXT_RX_PAGE_SIZE / mss; 3846 nsegs = (MAX_SKB_FRAGS - 1) * n; 3847 } else { 3848 n = mss / BNXT_RX_PAGE_SIZE; 3849 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 3850 n++; 3851 nsegs = (MAX_SKB_FRAGS - n) / n; 3852 } 3853 3854 segs = ilog2(nsegs); 3855 req.max_agg_segs = cpu_to_le16(segs); 3856 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); 3857 3858 req.min_agg_len = cpu_to_le32(512); 3859 } 3860 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3861 3862 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3863 } 3864 3865 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 3866 { 3867 u32 i, j, max_rings; 3868 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3869 struct hwrm_vnic_rss_cfg_input req = {0}; 3870 3871 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 3872 return 0; 3873 3874 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 3875 if (set_rss) { 3876 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 3877 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 3878 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3879 max_rings = bp->rx_nr_rings - 1; 3880 else 3881 max_rings = bp->rx_nr_rings; 3882 } else { 3883 max_rings = 1; 3884 } 3885 3886 /* Fill the RSS indirection table with ring group ids */ 3887 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 3888 if (j == max_rings) 3889 j = 0; 3890 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 3891 } 3892 3893 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 3894 req.hash_key_tbl_addr = 3895 cpu_to_le64(vnic->rss_hash_key_dma_addr); 3896 } 3897 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3898 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3899 } 3900 3901 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 3902 { 3903 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3904 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 3905 3906 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 3907 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 3908 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 3909 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 3910 req.enables = 3911 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 3912 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 3913 /* thresholds not implemented in firmware yet */ 3914 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 3915 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 3916 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3917 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3918 } 3919 3920 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 3921 u16 ctx_idx) 3922 { 3923 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 3924 3925 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 3926 req.rss_cos_lb_ctx_id = 3927 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 3928 3929 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3930 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 3931 } 3932 3933 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 3934 { 3935 int i, j; 3936 3937 for (i = 0; i < bp->nr_vnics; i++) { 3938 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3939 3940 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 3941 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 3942 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 3943 } 3944 } 3945 bp->rsscos_nr_ctxs = 0; 3946 } 3947 3948 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 3949 { 3950 int rc; 3951 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 3952 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 3953 bp->hwrm_cmd_resp_addr; 3954 3955 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 3956 -1); 3957 3958 mutex_lock(&bp->hwrm_cmd_lock); 3959 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3960 if (!rc) 3961 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 3962 le16_to_cpu(resp->rss_cos_lb_ctx_id); 3963 mutex_unlock(&bp->hwrm_cmd_lock); 3964 3965 return rc; 3966 } 3967 3968 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 3969 { 3970 unsigned int ring = 0, grp_idx; 3971 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3972 struct hwrm_vnic_cfg_input req = {0}; 3973 u16 def_vlan = 0; 3974 3975 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3976 3977 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 3978 /* Only RSS support for now TBD: COS & LB */ 3979 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 3980 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3981 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3982 VNIC_CFG_REQ_ENABLES_MRU); 3983 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 3984 req.rss_rule = 3985 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 3986 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3987 VNIC_CFG_REQ_ENABLES_MRU); 3988 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 3989 } else { 3990 req.rss_rule = cpu_to_le16(0xffff); 3991 } 3992 3993 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 3994 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 3995 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 3996 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 3997 } else { 3998 req.cos_rule = cpu_to_le16(0xffff); 3999 } 4000 4001 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4002 ring = 0; 4003 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 4004 ring = vnic_id - 1; 4005 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 4006 ring = bp->rx_nr_rings - 1; 4007 4008 grp_idx = bp->rx_ring[ring].bnapi->index; 4009 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4010 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 4011 4012 req.lb_rule = cpu_to_le16(0xffff); 4013 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 4014 VLAN_HLEN); 4015 4016 #ifdef CONFIG_BNXT_SRIOV 4017 if (BNXT_VF(bp)) 4018 def_vlan = bp->vf.vlan; 4019 #endif 4020 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 4021 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 4022 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 4023 req.flags |= 4024 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); 4025 4026 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4027 } 4028 4029 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 4030 { 4031 u32 rc = 0; 4032 4033 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 4034 struct hwrm_vnic_free_input req = {0}; 4035 4036 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 4037 req.vnic_id = 4038 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 4039 4040 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4041 if (rc) 4042 return rc; 4043 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 4044 } 4045 return rc; 4046 } 4047 4048 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 4049 { 4050 u16 i; 4051 4052 for (i = 0; i < bp->nr_vnics; i++) 4053 bnxt_hwrm_vnic_free_one(bp, i); 4054 } 4055 4056 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 4057 unsigned int start_rx_ring_idx, 4058 unsigned int nr_rings) 4059 { 4060 int rc = 0; 4061 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 4062 struct hwrm_vnic_alloc_input req = {0}; 4063 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4064 4065 /* map ring groups to this vnic */ 4066 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 4067 grp_idx = bp->rx_ring[i].bnapi->index; 4068 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 4069 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 4070 j, nr_rings); 4071 break; 4072 } 4073 bp->vnic_info[vnic_id].fw_grp_ids[j] = 4074 bp->grp_info[grp_idx].fw_grp_id; 4075 } 4076 4077 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 4078 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 4079 if (vnic_id == 0) 4080 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 4081 4082 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 4083 4084 mutex_lock(&bp->hwrm_cmd_lock); 4085 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4086 if (!rc) 4087 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); 4088 mutex_unlock(&bp->hwrm_cmd_lock); 4089 return rc; 4090 } 4091 4092 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 4093 { 4094 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4095 struct hwrm_vnic_qcaps_input req = {0}; 4096 int rc; 4097 4098 if (bp->hwrm_spec_code < 0x10600) 4099 return 0; 4100 4101 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 4102 mutex_lock(&bp->hwrm_cmd_lock); 4103 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4104 if (!rc) { 4105 if (resp->flags & 4106 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 4107 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 4108 } 4109 mutex_unlock(&bp->hwrm_cmd_lock); 4110 return rc; 4111 } 4112 4113 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 4114 { 4115 u16 i; 4116 u32 rc = 0; 4117 4118 mutex_lock(&bp->hwrm_cmd_lock); 4119 for (i = 0; i < bp->rx_nr_rings; i++) { 4120 struct hwrm_ring_grp_alloc_input req = {0}; 4121 struct hwrm_ring_grp_alloc_output *resp = 4122 bp->hwrm_cmd_resp_addr; 4123 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 4124 4125 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 4126 4127 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 4128 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 4129 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 4130 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 4131 4132 rc = _hwrm_send_message(bp, &req, sizeof(req), 4133 HWRM_CMD_TIMEOUT); 4134 if (rc) 4135 break; 4136 4137 bp->grp_info[grp_idx].fw_grp_id = 4138 le32_to_cpu(resp->ring_group_id); 4139 } 4140 mutex_unlock(&bp->hwrm_cmd_lock); 4141 return rc; 4142 } 4143 4144 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 4145 { 4146 u16 i; 4147 u32 rc = 0; 4148 struct hwrm_ring_grp_free_input req = {0}; 4149 4150 if (!bp->grp_info) 4151 return 0; 4152 4153 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 4154 4155 mutex_lock(&bp->hwrm_cmd_lock); 4156 for (i = 0; i < bp->cp_nr_rings; i++) { 4157 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 4158 continue; 4159 req.ring_group_id = 4160 cpu_to_le32(bp->grp_info[i].fw_grp_id); 4161 4162 rc = _hwrm_send_message(bp, &req, sizeof(req), 4163 HWRM_CMD_TIMEOUT); 4164 if (rc) 4165 break; 4166 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4167 } 4168 mutex_unlock(&bp->hwrm_cmd_lock); 4169 return rc; 4170 } 4171 4172 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 4173 struct bnxt_ring_struct *ring, 4174 u32 ring_type, u32 map_index, 4175 u32 stats_ctx_id) 4176 { 4177 int rc = 0, err = 0; 4178 struct hwrm_ring_alloc_input req = {0}; 4179 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4180 u16 ring_id; 4181 4182 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 4183 4184 req.enables = 0; 4185 if (ring->nr_pages > 1) { 4186 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); 4187 /* Page size is in log2 units */ 4188 req.page_size = BNXT_PAGE_SHIFT; 4189 req.page_tbl_depth = 1; 4190 } else { 4191 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); 4192 } 4193 req.fbo = 0; 4194 /* Association of ring index with doorbell index and MSIX number */ 4195 req.logical_id = cpu_to_le16(map_index); 4196 4197 switch (ring_type) { 4198 case HWRM_RING_ALLOC_TX: 4199 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 4200 /* Association of transmit ring with completion ring */ 4201 req.cmpl_ring_id = 4202 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); 4203 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 4204 req.stat_ctx_id = cpu_to_le32(stats_ctx_id); 4205 req.queue_id = cpu_to_le16(ring->queue_id); 4206 break; 4207 case HWRM_RING_ALLOC_RX: 4208 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4209 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 4210 break; 4211 case HWRM_RING_ALLOC_AGG: 4212 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4213 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 4214 break; 4215 case HWRM_RING_ALLOC_CMPL: 4216 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 4217 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 4218 if (bp->flags & BNXT_FLAG_USING_MSIX) 4219 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 4220 break; 4221 default: 4222 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 4223 ring_type); 4224 return -1; 4225 } 4226 4227 mutex_lock(&bp->hwrm_cmd_lock); 4228 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4229 err = le16_to_cpu(resp->error_code); 4230 ring_id = le16_to_cpu(resp->ring_id); 4231 mutex_unlock(&bp->hwrm_cmd_lock); 4232 4233 if (rc || err) { 4234 switch (ring_type) { 4235 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4236 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", 4237 rc, err); 4238 return -1; 4239 4240 case RING_FREE_REQ_RING_TYPE_RX: 4241 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", 4242 rc, err); 4243 return -1; 4244 4245 case RING_FREE_REQ_RING_TYPE_TX: 4246 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", 4247 rc, err); 4248 return -1; 4249 4250 default: 4251 netdev_err(bp->dev, "Invalid ring\n"); 4252 return -1; 4253 } 4254 } 4255 ring->fw_ring_id = ring_id; 4256 return rc; 4257 } 4258 4259 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 4260 { 4261 int rc; 4262 4263 if (BNXT_PF(bp)) { 4264 struct hwrm_func_cfg_input req = {0}; 4265 4266 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4267 req.fid = cpu_to_le16(0xffff); 4268 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4269 req.async_event_cr = cpu_to_le16(idx); 4270 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4271 } else { 4272 struct hwrm_func_vf_cfg_input req = {0}; 4273 4274 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4275 req.enables = 4276 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4277 req.async_event_cr = cpu_to_le16(idx); 4278 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4279 } 4280 return rc; 4281 } 4282 4283 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 4284 { 4285 int i, rc = 0; 4286 4287 for (i = 0; i < bp->cp_nr_rings; i++) { 4288 struct bnxt_napi *bnapi = bp->bnapi[i]; 4289 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4290 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4291 4292 cpr->cp_doorbell = bp->bar1 + i * 0x80; 4293 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 4294 INVALID_STATS_CTX_ID); 4295 if (rc) 4296 goto err_out; 4297 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 4298 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4299 4300 if (!i) { 4301 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 4302 if (rc) 4303 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 4304 } 4305 } 4306 4307 for (i = 0; i < bp->tx_nr_rings; i++) { 4308 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4309 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4310 u32 map_idx = txr->bnapi->index; 4311 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; 4312 4313 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, 4314 map_idx, fw_stats_ctx); 4315 if (rc) 4316 goto err_out; 4317 txr->tx_doorbell = bp->bar1 + map_idx * 0x80; 4318 } 4319 4320 for (i = 0; i < bp->rx_nr_rings; i++) { 4321 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4322 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4323 u32 map_idx = rxr->bnapi->index; 4324 4325 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, 4326 map_idx, INVALID_STATS_CTX_ID); 4327 if (rc) 4328 goto err_out; 4329 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; 4330 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 4331 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 4332 } 4333 4334 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 4335 for (i = 0; i < bp->rx_nr_rings; i++) { 4336 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4337 struct bnxt_ring_struct *ring = 4338 &rxr->rx_agg_ring_struct; 4339 u32 grp_idx = rxr->bnapi->index; 4340 u32 map_idx = grp_idx + bp->rx_nr_rings; 4341 4342 rc = hwrm_ring_alloc_send_msg(bp, ring, 4343 HWRM_RING_ALLOC_AGG, 4344 map_idx, 4345 INVALID_STATS_CTX_ID); 4346 if (rc) 4347 goto err_out; 4348 4349 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; 4350 writel(DB_KEY_RX | rxr->rx_agg_prod, 4351 rxr->rx_agg_doorbell); 4352 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 4353 } 4354 } 4355 err_out: 4356 return rc; 4357 } 4358 4359 static int hwrm_ring_free_send_msg(struct bnxt *bp, 4360 struct bnxt_ring_struct *ring, 4361 u32 ring_type, int cmpl_ring_id) 4362 { 4363 int rc; 4364 struct hwrm_ring_free_input req = {0}; 4365 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 4366 u16 error_code; 4367 4368 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 4369 req.ring_type = ring_type; 4370 req.ring_id = cpu_to_le16(ring->fw_ring_id); 4371 4372 mutex_lock(&bp->hwrm_cmd_lock); 4373 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4374 error_code = le16_to_cpu(resp->error_code); 4375 mutex_unlock(&bp->hwrm_cmd_lock); 4376 4377 if (rc || error_code) { 4378 switch (ring_type) { 4379 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4380 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", 4381 rc); 4382 return rc; 4383 case RING_FREE_REQ_RING_TYPE_RX: 4384 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", 4385 rc); 4386 return rc; 4387 case RING_FREE_REQ_RING_TYPE_TX: 4388 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", 4389 rc); 4390 return rc; 4391 default: 4392 netdev_err(bp->dev, "Invalid ring\n"); 4393 return -1; 4394 } 4395 } 4396 return 0; 4397 } 4398 4399 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 4400 { 4401 int i; 4402 4403 if (!bp->bnapi) 4404 return; 4405 4406 for (i = 0; i < bp->tx_nr_rings; i++) { 4407 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4408 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4409 u32 grp_idx = txr->bnapi->index; 4410 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4411 4412 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4413 hwrm_ring_free_send_msg(bp, ring, 4414 RING_FREE_REQ_RING_TYPE_TX, 4415 close_path ? cmpl_ring_id : 4416 INVALID_HW_RING_ID); 4417 ring->fw_ring_id = INVALID_HW_RING_ID; 4418 } 4419 } 4420 4421 for (i = 0; i < bp->rx_nr_rings; i++) { 4422 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4423 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4424 u32 grp_idx = rxr->bnapi->index; 4425 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4426 4427 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4428 hwrm_ring_free_send_msg(bp, ring, 4429 RING_FREE_REQ_RING_TYPE_RX, 4430 close_path ? cmpl_ring_id : 4431 INVALID_HW_RING_ID); 4432 ring->fw_ring_id = INVALID_HW_RING_ID; 4433 bp->grp_info[grp_idx].rx_fw_ring_id = 4434 INVALID_HW_RING_ID; 4435 } 4436 } 4437 4438 for (i = 0; i < bp->rx_nr_rings; i++) { 4439 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4440 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 4441 u32 grp_idx = rxr->bnapi->index; 4442 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4443 4444 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4445 hwrm_ring_free_send_msg(bp, ring, 4446 RING_FREE_REQ_RING_TYPE_RX, 4447 close_path ? cmpl_ring_id : 4448 INVALID_HW_RING_ID); 4449 ring->fw_ring_id = INVALID_HW_RING_ID; 4450 bp->grp_info[grp_idx].agg_fw_ring_id = 4451 INVALID_HW_RING_ID; 4452 } 4453 } 4454 4455 /* The completion rings are about to be freed. After that the 4456 * IRQ doorbell will not work anymore. So we need to disable 4457 * IRQ here. 4458 */ 4459 bnxt_disable_int_sync(bp); 4460 4461 for (i = 0; i < bp->cp_nr_rings; i++) { 4462 struct bnxt_napi *bnapi = bp->bnapi[i]; 4463 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4464 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4465 4466 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4467 hwrm_ring_free_send_msg(bp, ring, 4468 RING_FREE_REQ_RING_TYPE_L2_CMPL, 4469 INVALID_HW_RING_ID); 4470 ring->fw_ring_id = INVALID_HW_RING_ID; 4471 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4472 } 4473 } 4474 } 4475 4476 /* Caller must hold bp->hwrm_cmd_lock */ 4477 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 4478 { 4479 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4480 struct hwrm_func_qcfg_input req = {0}; 4481 int rc; 4482 4483 if (bp->hwrm_spec_code < 0x10601) 4484 return 0; 4485 4486 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4487 req.fid = cpu_to_le16(fid); 4488 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4489 if (!rc) 4490 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 4491 4492 return rc; 4493 } 4494 4495 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) 4496 { 4497 struct hwrm_func_cfg_input req = {0}; 4498 int rc; 4499 4500 if (bp->hwrm_spec_code < 0x10601) 4501 return 0; 4502 4503 if (BNXT_VF(bp)) 4504 return 0; 4505 4506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4507 req.fid = cpu_to_le16(0xffff); 4508 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); 4509 req.num_tx_rings = cpu_to_le16(*tx_rings); 4510 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4511 if (rc) 4512 return rc; 4513 4514 mutex_lock(&bp->hwrm_cmd_lock); 4515 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); 4516 mutex_unlock(&bp->hwrm_cmd_lock); 4517 if (!rc) 4518 bp->tx_reserved_rings = *tx_rings; 4519 return rc; 4520 } 4521 4522 static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) 4523 { 4524 struct hwrm_func_cfg_input req = {0}; 4525 int rc; 4526 4527 if (bp->hwrm_spec_code < 0x10801) 4528 return 0; 4529 4530 if (BNXT_VF(bp)) 4531 return 0; 4532 4533 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4534 req.fid = cpu_to_le16(0xffff); 4535 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST); 4536 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); 4537 req.num_tx_rings = cpu_to_le16(tx_rings); 4538 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4539 if (rc) 4540 return -ENOMEM; 4541 return 0; 4542 } 4543 4544 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, 4545 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 4546 { 4547 u16 val, tmr, max, flags; 4548 4549 max = hw_coal->bufs_per_record * 128; 4550 if (hw_coal->budget) 4551 max = hw_coal->bufs_per_record * hw_coal->budget; 4552 4553 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 4554 req->num_cmpl_aggr_int = cpu_to_le16(val); 4555 4556 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 4557 val = min_t(u16, val, 63); 4558 req->num_cmpl_dma_aggr = cpu_to_le16(val); 4559 4560 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 4561 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63); 4562 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 4563 4564 tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); 4565 tmr = max_t(u16, tmr, 1); 4566 req->int_lat_tmr_max = cpu_to_le16(tmr); 4567 4568 /* min timer set to 1/2 of interrupt timer */ 4569 val = tmr / 2; 4570 req->int_lat_tmr_min = cpu_to_le16(val); 4571 4572 /* buf timer set to 1/4 of interrupt timer */ 4573 val = max_t(u16, tmr / 4, 1); 4574 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 4575 4576 tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq); 4577 tmr = max_t(u16, tmr, 1); 4578 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); 4579 4580 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4581 if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 4582 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 4583 req->flags = cpu_to_le16(flags); 4584 } 4585 4586 int bnxt_hwrm_set_coal(struct bnxt *bp) 4587 { 4588 int i, rc = 0; 4589 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 4590 req_tx = {0}, *req; 4591 4592 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 4593 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4594 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 4595 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4596 4597 bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx); 4598 bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx); 4599 4600 mutex_lock(&bp->hwrm_cmd_lock); 4601 for (i = 0; i < bp->cp_nr_rings; i++) { 4602 struct bnxt_napi *bnapi = bp->bnapi[i]; 4603 4604 req = &req_rx; 4605 if (!bnapi->rx_ring) 4606 req = &req_tx; 4607 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); 4608 4609 rc = _hwrm_send_message(bp, req, sizeof(*req), 4610 HWRM_CMD_TIMEOUT); 4611 if (rc) 4612 break; 4613 } 4614 mutex_unlock(&bp->hwrm_cmd_lock); 4615 return rc; 4616 } 4617 4618 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 4619 { 4620 int rc = 0, i; 4621 struct hwrm_stat_ctx_free_input req = {0}; 4622 4623 if (!bp->bnapi) 4624 return 0; 4625 4626 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4627 return 0; 4628 4629 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 4630 4631 mutex_lock(&bp->hwrm_cmd_lock); 4632 for (i = 0; i < bp->cp_nr_rings; i++) { 4633 struct bnxt_napi *bnapi = bp->bnapi[i]; 4634 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4635 4636 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 4637 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 4638 4639 rc = _hwrm_send_message(bp, &req, sizeof(req), 4640 HWRM_CMD_TIMEOUT); 4641 if (rc) 4642 break; 4643 4644 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4645 } 4646 } 4647 mutex_unlock(&bp->hwrm_cmd_lock); 4648 return rc; 4649 } 4650 4651 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 4652 { 4653 int rc = 0, i; 4654 struct hwrm_stat_ctx_alloc_input req = {0}; 4655 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4656 4657 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4658 return 0; 4659 4660 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 4661 4662 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 4663 4664 mutex_lock(&bp->hwrm_cmd_lock); 4665 for (i = 0; i < bp->cp_nr_rings; i++) { 4666 struct bnxt_napi *bnapi = bp->bnapi[i]; 4667 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4668 4669 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 4670 4671 rc = _hwrm_send_message(bp, &req, sizeof(req), 4672 HWRM_CMD_TIMEOUT); 4673 if (rc) 4674 break; 4675 4676 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 4677 4678 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 4679 } 4680 mutex_unlock(&bp->hwrm_cmd_lock); 4681 return rc; 4682 } 4683 4684 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 4685 { 4686 struct hwrm_func_qcfg_input req = {0}; 4687 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4688 u16 flags; 4689 int rc; 4690 4691 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4692 req.fid = cpu_to_le16(0xffff); 4693 mutex_lock(&bp->hwrm_cmd_lock); 4694 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4695 if (rc) 4696 goto func_qcfg_exit; 4697 4698 #ifdef CONFIG_BNXT_SRIOV 4699 if (BNXT_VF(bp)) { 4700 struct bnxt_vf_info *vf = &bp->vf; 4701 4702 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 4703 } 4704 #endif 4705 flags = le16_to_cpu(resp->flags); 4706 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 4707 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 4708 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; 4709 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 4710 bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; 4711 } 4712 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 4713 bp->flags |= BNXT_FLAG_MULTI_HOST; 4714 4715 switch (resp->port_partition_type) { 4716 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 4717 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 4718 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 4719 bp->port_partition_type = resp->port_partition_type; 4720 break; 4721 } 4722 if (bp->hwrm_spec_code < 0x10707 || 4723 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 4724 bp->br_mode = BRIDGE_MODE_VEB; 4725 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 4726 bp->br_mode = BRIDGE_MODE_VEPA; 4727 else 4728 bp->br_mode = BRIDGE_MODE_UNDEF; 4729 4730 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 4731 if (!bp->max_mtu) 4732 bp->max_mtu = BNXT_MAX_MTU; 4733 4734 func_qcfg_exit: 4735 mutex_unlock(&bp->hwrm_cmd_lock); 4736 return rc; 4737 } 4738 4739 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 4740 { 4741 int rc = 0; 4742 struct hwrm_func_qcaps_input req = {0}; 4743 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4744 4745 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 4746 req.fid = cpu_to_le16(0xffff); 4747 4748 mutex_lock(&bp->hwrm_cmd_lock); 4749 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4750 if (rc) 4751 goto hwrm_func_qcaps_exit; 4752 4753 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)) 4754 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 4755 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)) 4756 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 4757 4758 bp->tx_push_thresh = 0; 4759 if (resp->flags & 4760 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) 4761 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 4762 4763 if (BNXT_PF(bp)) { 4764 struct bnxt_pf_info *pf = &bp->pf; 4765 4766 pf->fw_fid = le16_to_cpu(resp->fid); 4767 pf->port_id = le16_to_cpu(resp->port_id); 4768 bp->dev->dev_port = pf->port_id; 4769 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4770 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4771 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4772 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4773 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4774 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4775 if (!pf->max_hw_ring_grps) 4776 pf->max_hw_ring_grps = pf->max_tx_rings; 4777 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4778 pf->max_vnics = le16_to_cpu(resp->max_vnics); 4779 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4780 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 4781 pf->max_vfs = le16_to_cpu(resp->max_vfs); 4782 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 4783 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 4784 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 4785 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 4786 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 4787 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 4788 if (resp->flags & 4789 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)) 4790 bp->flags |= BNXT_FLAG_WOL_CAP; 4791 } else { 4792 #ifdef CONFIG_BNXT_SRIOV 4793 struct bnxt_vf_info *vf = &bp->vf; 4794 4795 vf->fw_fid = le16_to_cpu(resp->fid); 4796 4797 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4798 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4799 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4800 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4801 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4802 if (!vf->max_hw_ring_grps) 4803 vf->max_hw_ring_grps = vf->max_tx_rings; 4804 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4805 vf->max_vnics = le16_to_cpu(resp->max_vnics); 4806 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4807 4808 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 4809 #endif 4810 } 4811 4812 hwrm_func_qcaps_exit: 4813 mutex_unlock(&bp->hwrm_cmd_lock); 4814 return rc; 4815 } 4816 4817 static int bnxt_hwrm_func_reset(struct bnxt *bp) 4818 { 4819 struct hwrm_func_reset_input req = {0}; 4820 4821 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 4822 req.enables = 0; 4823 4824 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 4825 } 4826 4827 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 4828 { 4829 int rc = 0; 4830 struct hwrm_queue_qportcfg_input req = {0}; 4831 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 4832 u8 i, *qptr; 4833 4834 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 4835 4836 mutex_lock(&bp->hwrm_cmd_lock); 4837 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4838 if (rc) 4839 goto qportcfg_exit; 4840 4841 if (!resp->max_configurable_queues) { 4842 rc = -EINVAL; 4843 goto qportcfg_exit; 4844 } 4845 bp->max_tc = resp->max_configurable_queues; 4846 bp->max_lltc = resp->max_configurable_lossless_queues; 4847 if (bp->max_tc > BNXT_MAX_QUEUE) 4848 bp->max_tc = BNXT_MAX_QUEUE; 4849 4850 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 4851 bp->max_tc = 1; 4852 4853 if (bp->max_lltc > bp->max_tc) 4854 bp->max_lltc = bp->max_tc; 4855 4856 qptr = &resp->queue_id0; 4857 for (i = 0; i < bp->max_tc; i++) { 4858 bp->q_info[i].queue_id = *qptr++; 4859 bp->q_info[i].queue_profile = *qptr++; 4860 } 4861 4862 qportcfg_exit: 4863 mutex_unlock(&bp->hwrm_cmd_lock); 4864 return rc; 4865 } 4866 4867 static int bnxt_hwrm_ver_get(struct bnxt *bp) 4868 { 4869 int rc; 4870 struct hwrm_ver_get_input req = {0}; 4871 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 4872 u32 dev_caps_cfg; 4873 4874 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 4875 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 4876 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 4877 req.hwrm_intf_min = HWRM_VERSION_MINOR; 4878 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 4879 mutex_lock(&bp->hwrm_cmd_lock); 4880 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4881 if (rc) 4882 goto hwrm_ver_get_exit; 4883 4884 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 4885 4886 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | 4887 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; 4888 if (resp->hwrm_intf_maj < 1) { 4889 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 4890 resp->hwrm_intf_maj, resp->hwrm_intf_min, 4891 resp->hwrm_intf_upd); 4892 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 4893 } 4894 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", 4895 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, 4896 resp->hwrm_fw_rsvd); 4897 4898 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 4899 if (!bp->hwrm_cmd_timeout) 4900 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 4901 4902 if (resp->hwrm_intf_maj >= 1) 4903 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 4904 4905 bp->chip_num = le16_to_cpu(resp->chip_num); 4906 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 4907 !resp->chip_metal) 4908 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 4909 4910 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 4911 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 4912 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 4913 bp->flags |= BNXT_FLAG_SHORT_CMD; 4914 4915 hwrm_ver_get_exit: 4916 mutex_unlock(&bp->hwrm_cmd_lock); 4917 return rc; 4918 } 4919 4920 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 4921 { 4922 struct hwrm_fw_set_time_input req = {0}; 4923 struct tm tm; 4924 time64_t now = ktime_get_real_seconds(); 4925 4926 if (bp->hwrm_spec_code < 0x10400) 4927 return -EOPNOTSUPP; 4928 4929 time64_to_tm(now, 0, &tm); 4930 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 4931 req.year = cpu_to_le16(1900 + tm.tm_year); 4932 req.month = 1 + tm.tm_mon; 4933 req.day = tm.tm_mday; 4934 req.hour = tm.tm_hour; 4935 req.minute = tm.tm_min; 4936 req.second = tm.tm_sec; 4937 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4938 } 4939 4940 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 4941 { 4942 int rc; 4943 struct bnxt_pf_info *pf = &bp->pf; 4944 struct hwrm_port_qstats_input req = {0}; 4945 4946 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 4947 return 0; 4948 4949 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 4950 req.port_id = cpu_to_le16(pf->port_id); 4951 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 4952 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 4953 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4954 return rc; 4955 } 4956 4957 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 4958 { 4959 if (bp->vxlan_port_cnt) { 4960 bnxt_hwrm_tunnel_dst_port_free( 4961 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 4962 } 4963 bp->vxlan_port_cnt = 0; 4964 if (bp->nge_port_cnt) { 4965 bnxt_hwrm_tunnel_dst_port_free( 4966 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 4967 } 4968 bp->nge_port_cnt = 0; 4969 } 4970 4971 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 4972 { 4973 int rc, i; 4974 u32 tpa_flags = 0; 4975 4976 if (set_tpa) 4977 tpa_flags = bp->flags & BNXT_FLAG_TPA; 4978 for (i = 0; i < bp->nr_vnics; i++) { 4979 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4980 if (rc) { 4981 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4982 i, rc); 4983 return rc; 4984 } 4985 } 4986 return 0; 4987 } 4988 4989 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 4990 { 4991 int i; 4992 4993 for (i = 0; i < bp->nr_vnics; i++) 4994 bnxt_hwrm_vnic_set_rss(bp, i, false); 4995 } 4996 4997 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 4998 bool irq_re_init) 4999 { 5000 if (bp->vnic_info) { 5001 bnxt_hwrm_clear_vnic_filter(bp); 5002 /* clear all RSS setting before free vnic ctx */ 5003 bnxt_hwrm_clear_vnic_rss(bp); 5004 bnxt_hwrm_vnic_ctx_free(bp); 5005 /* before free the vnic, undo the vnic tpa settings */ 5006 if (bp->flags & BNXT_FLAG_TPA) 5007 bnxt_set_tpa(bp, false); 5008 bnxt_hwrm_vnic_free(bp); 5009 } 5010 bnxt_hwrm_ring_free(bp, close_path); 5011 bnxt_hwrm_ring_grp_free(bp); 5012 if (irq_re_init) { 5013 bnxt_hwrm_stat_ctx_free(bp); 5014 bnxt_hwrm_free_tunnel_ports(bp); 5015 } 5016 } 5017 5018 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 5019 { 5020 struct hwrm_func_cfg_input req = {0}; 5021 int rc; 5022 5023 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5024 req.fid = cpu_to_le16(0xffff); 5025 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 5026 if (br_mode == BRIDGE_MODE_VEB) 5027 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 5028 else if (br_mode == BRIDGE_MODE_VEPA) 5029 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 5030 else 5031 return -EINVAL; 5032 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5033 if (rc) 5034 rc = -EIO; 5035 return rc; 5036 } 5037 5038 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 5039 { 5040 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5041 int rc; 5042 5043 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 5044 goto skip_rss_ctx; 5045 5046 /* allocate context for vnic */ 5047 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 5048 if (rc) { 5049 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 5050 vnic_id, rc); 5051 goto vnic_setup_err; 5052 } 5053 bp->rsscos_nr_ctxs++; 5054 5055 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5056 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 5057 if (rc) { 5058 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 5059 vnic_id, rc); 5060 goto vnic_setup_err; 5061 } 5062 bp->rsscos_nr_ctxs++; 5063 } 5064 5065 skip_rss_ctx: 5066 /* configure default vnic, ring grp */ 5067 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 5068 if (rc) { 5069 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 5070 vnic_id, rc); 5071 goto vnic_setup_err; 5072 } 5073 5074 /* Enable RSS hashing on vnic */ 5075 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 5076 if (rc) { 5077 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 5078 vnic_id, rc); 5079 goto vnic_setup_err; 5080 } 5081 5082 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5083 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 5084 if (rc) { 5085 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 5086 vnic_id, rc); 5087 } 5088 } 5089 5090 vnic_setup_err: 5091 return rc; 5092 } 5093 5094 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 5095 { 5096 #ifdef CONFIG_RFS_ACCEL 5097 int i, rc = 0; 5098 5099 for (i = 0; i < bp->rx_nr_rings; i++) { 5100 struct bnxt_vnic_info *vnic; 5101 u16 vnic_id = i + 1; 5102 u16 ring_id = i; 5103 5104 if (vnic_id >= bp->nr_vnics) 5105 break; 5106 5107 vnic = &bp->vnic_info[vnic_id]; 5108 vnic->flags |= BNXT_VNIC_RFS_FLAG; 5109 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 5110 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 5111 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 5112 if (rc) { 5113 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 5114 vnic_id, rc); 5115 break; 5116 } 5117 rc = bnxt_setup_vnic(bp, vnic_id); 5118 if (rc) 5119 break; 5120 } 5121 return rc; 5122 #else 5123 return 0; 5124 #endif 5125 } 5126 5127 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 5128 static bool bnxt_promisc_ok(struct bnxt *bp) 5129 { 5130 #ifdef CONFIG_BNXT_SRIOV 5131 if (BNXT_VF(bp) && !bp->vf.vlan) 5132 return false; 5133 #endif 5134 return true; 5135 } 5136 5137 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 5138 { 5139 unsigned int rc = 0; 5140 5141 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 5142 if (rc) { 5143 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 5144 rc); 5145 return rc; 5146 } 5147 5148 rc = bnxt_hwrm_vnic_cfg(bp, 1); 5149 if (rc) { 5150 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 5151 rc); 5152 return rc; 5153 } 5154 return rc; 5155 } 5156 5157 static int bnxt_cfg_rx_mode(struct bnxt *); 5158 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 5159 5160 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 5161 { 5162 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5163 int rc = 0; 5164 unsigned int rx_nr_rings = bp->rx_nr_rings; 5165 5166 if (irq_re_init) { 5167 rc = bnxt_hwrm_stat_ctx_alloc(bp); 5168 if (rc) { 5169 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 5170 rc); 5171 goto err_out; 5172 } 5173 if (bp->tx_reserved_rings != bp->tx_nr_rings) { 5174 int tx = bp->tx_nr_rings; 5175 5176 if (bnxt_hwrm_reserve_tx_rings(bp, &tx) || 5177 tx < bp->tx_nr_rings) { 5178 rc = -ENOMEM; 5179 goto err_out; 5180 } 5181 } 5182 } 5183 5184 rc = bnxt_hwrm_ring_alloc(bp); 5185 if (rc) { 5186 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 5187 goto err_out; 5188 } 5189 5190 rc = bnxt_hwrm_ring_grp_alloc(bp); 5191 if (rc) { 5192 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 5193 goto err_out; 5194 } 5195 5196 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5197 rx_nr_rings--; 5198 5199 /* default vnic 0 */ 5200 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 5201 if (rc) { 5202 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 5203 goto err_out; 5204 } 5205 5206 rc = bnxt_setup_vnic(bp, 0); 5207 if (rc) 5208 goto err_out; 5209 5210 if (bp->flags & BNXT_FLAG_RFS) { 5211 rc = bnxt_alloc_rfs_vnics(bp); 5212 if (rc) 5213 goto err_out; 5214 } 5215 5216 if (bp->flags & BNXT_FLAG_TPA) { 5217 rc = bnxt_set_tpa(bp, true); 5218 if (rc) 5219 goto err_out; 5220 } 5221 5222 if (BNXT_VF(bp)) 5223 bnxt_update_vf_mac(bp); 5224 5225 /* Filter for default vnic 0 */ 5226 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 5227 if (rc) { 5228 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 5229 goto err_out; 5230 } 5231 vnic->uc_filter_count = 1; 5232 5233 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 5234 5235 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 5236 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5237 5238 if (bp->dev->flags & IFF_ALLMULTI) { 5239 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 5240 vnic->mc_list_count = 0; 5241 } else { 5242 u32 mask = 0; 5243 5244 bnxt_mc_list_updated(bp, &mask); 5245 vnic->rx_mask |= mask; 5246 } 5247 5248 rc = bnxt_cfg_rx_mode(bp); 5249 if (rc) 5250 goto err_out; 5251 5252 rc = bnxt_hwrm_set_coal(bp); 5253 if (rc) 5254 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 5255 rc); 5256 5257 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5258 rc = bnxt_setup_nitroa0_vnic(bp); 5259 if (rc) 5260 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 5261 rc); 5262 } 5263 5264 if (BNXT_VF(bp)) { 5265 bnxt_hwrm_func_qcfg(bp); 5266 netdev_update_features(bp->dev); 5267 } 5268 5269 return 0; 5270 5271 err_out: 5272 bnxt_hwrm_resource_free(bp, 0, true); 5273 5274 return rc; 5275 } 5276 5277 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 5278 { 5279 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 5280 return 0; 5281 } 5282 5283 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5284 { 5285 bnxt_init_cp_rings(bp); 5286 bnxt_init_rx_rings(bp); 5287 bnxt_init_tx_rings(bp); 5288 bnxt_init_ring_grps(bp, irq_re_init); 5289 bnxt_init_vnics(bp); 5290 5291 return bnxt_init_chip(bp, irq_re_init); 5292 } 5293 5294 static int bnxt_set_real_num_queues(struct bnxt *bp) 5295 { 5296 int rc; 5297 struct net_device *dev = bp->dev; 5298 5299 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 5300 bp->tx_nr_rings_xdp); 5301 if (rc) 5302 return rc; 5303 5304 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 5305 if (rc) 5306 return rc; 5307 5308 #ifdef CONFIG_RFS_ACCEL 5309 if (bp->flags & BNXT_FLAG_RFS) 5310 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 5311 #endif 5312 5313 return rc; 5314 } 5315 5316 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5317 bool shared) 5318 { 5319 int _rx = *rx, _tx = *tx; 5320 5321 if (shared) { 5322 *rx = min_t(int, _rx, max); 5323 *tx = min_t(int, _tx, max); 5324 } else { 5325 if (max < 2) 5326 return -ENOMEM; 5327 5328 while (_rx + _tx > max) { 5329 if (_rx > _tx && _rx > 1) 5330 _rx--; 5331 else if (_tx > 1) 5332 _tx--; 5333 } 5334 *rx = _rx; 5335 *tx = _tx; 5336 } 5337 return 0; 5338 } 5339 5340 static void bnxt_setup_msix(struct bnxt *bp) 5341 { 5342 const int len = sizeof(bp->irq_tbl[0].name); 5343 struct net_device *dev = bp->dev; 5344 int tcs, i; 5345 5346 tcs = netdev_get_num_tc(dev); 5347 if (tcs > 1) { 5348 int i, off, count; 5349 5350 for (i = 0; i < tcs; i++) { 5351 count = bp->tx_nr_rings_per_tc; 5352 off = i * count; 5353 netdev_set_tc_queue(dev, i, count, off); 5354 } 5355 } 5356 5357 for (i = 0; i < bp->cp_nr_rings; i++) { 5358 char *attr; 5359 5360 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5361 attr = "TxRx"; 5362 else if (i < bp->rx_nr_rings) 5363 attr = "rx"; 5364 else 5365 attr = "tx"; 5366 5367 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, 5368 i); 5369 bp->irq_tbl[i].handler = bnxt_msix; 5370 } 5371 } 5372 5373 static void bnxt_setup_inta(struct bnxt *bp) 5374 { 5375 const int len = sizeof(bp->irq_tbl[0].name); 5376 5377 if (netdev_get_num_tc(bp->dev)) 5378 netdev_reset_tc(bp->dev); 5379 5380 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 5381 0); 5382 bp->irq_tbl[0].handler = bnxt_inta; 5383 } 5384 5385 static int bnxt_setup_int_mode(struct bnxt *bp) 5386 { 5387 int rc; 5388 5389 if (bp->flags & BNXT_FLAG_USING_MSIX) 5390 bnxt_setup_msix(bp); 5391 else 5392 bnxt_setup_inta(bp); 5393 5394 rc = bnxt_set_real_num_queues(bp); 5395 return rc; 5396 } 5397 5398 #ifdef CONFIG_RFS_ACCEL 5399 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 5400 { 5401 #if defined(CONFIG_BNXT_SRIOV) 5402 if (BNXT_VF(bp)) 5403 return bp->vf.max_rsscos_ctxs; 5404 #endif 5405 return bp->pf.max_rsscos_ctxs; 5406 } 5407 5408 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 5409 { 5410 #if defined(CONFIG_BNXT_SRIOV) 5411 if (BNXT_VF(bp)) 5412 return bp->vf.max_vnics; 5413 #endif 5414 return bp->pf.max_vnics; 5415 } 5416 #endif 5417 5418 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 5419 { 5420 #if defined(CONFIG_BNXT_SRIOV) 5421 if (BNXT_VF(bp)) 5422 return bp->vf.max_stat_ctxs; 5423 #endif 5424 return bp->pf.max_stat_ctxs; 5425 } 5426 5427 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) 5428 { 5429 #if defined(CONFIG_BNXT_SRIOV) 5430 if (BNXT_VF(bp)) 5431 bp->vf.max_stat_ctxs = max; 5432 else 5433 #endif 5434 bp->pf.max_stat_ctxs = max; 5435 } 5436 5437 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 5438 { 5439 #if defined(CONFIG_BNXT_SRIOV) 5440 if (BNXT_VF(bp)) 5441 return bp->vf.max_cp_rings; 5442 #endif 5443 return bp->pf.max_cp_rings; 5444 } 5445 5446 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) 5447 { 5448 #if defined(CONFIG_BNXT_SRIOV) 5449 if (BNXT_VF(bp)) 5450 bp->vf.max_cp_rings = max; 5451 else 5452 #endif 5453 bp->pf.max_cp_rings = max; 5454 } 5455 5456 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 5457 { 5458 #if defined(CONFIG_BNXT_SRIOV) 5459 if (BNXT_VF(bp)) 5460 return min_t(unsigned int, bp->vf.max_irqs, 5461 bp->vf.max_cp_rings); 5462 #endif 5463 return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings); 5464 } 5465 5466 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 5467 { 5468 #if defined(CONFIG_BNXT_SRIOV) 5469 if (BNXT_VF(bp)) 5470 bp->vf.max_irqs = max_irqs; 5471 else 5472 #endif 5473 bp->pf.max_irqs = max_irqs; 5474 } 5475 5476 static int bnxt_init_msix(struct bnxt *bp) 5477 { 5478 int i, total_vecs, rc = 0, min = 1; 5479 struct msix_entry *msix_ent; 5480 5481 total_vecs = bnxt_get_max_func_irqs(bp); 5482 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 5483 if (!msix_ent) 5484 return -ENOMEM; 5485 5486 for (i = 0; i < total_vecs; i++) { 5487 msix_ent[i].entry = i; 5488 msix_ent[i].vector = 0; 5489 } 5490 5491 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 5492 min = 2; 5493 5494 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 5495 if (total_vecs < 0) { 5496 rc = -ENODEV; 5497 goto msix_setup_exit; 5498 } 5499 5500 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 5501 if (bp->irq_tbl) { 5502 for (i = 0; i < total_vecs; i++) 5503 bp->irq_tbl[i].vector = msix_ent[i].vector; 5504 5505 bp->total_irqs = total_vecs; 5506 /* Trim rings based upon num of vectors allocated */ 5507 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 5508 total_vecs, min == 1); 5509 if (rc) 5510 goto msix_setup_exit; 5511 5512 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5513 bp->cp_nr_rings = (min == 1) ? 5514 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5515 bp->tx_nr_rings + bp->rx_nr_rings; 5516 5517 } else { 5518 rc = -ENOMEM; 5519 goto msix_setup_exit; 5520 } 5521 bp->flags |= BNXT_FLAG_USING_MSIX; 5522 kfree(msix_ent); 5523 return 0; 5524 5525 msix_setup_exit: 5526 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 5527 kfree(bp->irq_tbl); 5528 bp->irq_tbl = NULL; 5529 pci_disable_msix(bp->pdev); 5530 kfree(msix_ent); 5531 return rc; 5532 } 5533 5534 static int bnxt_init_inta(struct bnxt *bp) 5535 { 5536 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 5537 if (!bp->irq_tbl) 5538 return -ENOMEM; 5539 5540 bp->total_irqs = 1; 5541 bp->rx_nr_rings = 1; 5542 bp->tx_nr_rings = 1; 5543 bp->cp_nr_rings = 1; 5544 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5545 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5546 bp->irq_tbl[0].vector = bp->pdev->irq; 5547 return 0; 5548 } 5549 5550 static int bnxt_init_int_mode(struct bnxt *bp) 5551 { 5552 int rc = 0; 5553 5554 if (bp->flags & BNXT_FLAG_MSIX_CAP) 5555 rc = bnxt_init_msix(bp); 5556 5557 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 5558 /* fallback to INTA */ 5559 rc = bnxt_init_inta(bp); 5560 } 5561 return rc; 5562 } 5563 5564 static void bnxt_clear_int_mode(struct bnxt *bp) 5565 { 5566 if (bp->flags & BNXT_FLAG_USING_MSIX) 5567 pci_disable_msix(bp->pdev); 5568 5569 kfree(bp->irq_tbl); 5570 bp->irq_tbl = NULL; 5571 bp->flags &= ~BNXT_FLAG_USING_MSIX; 5572 } 5573 5574 static void bnxt_free_irq(struct bnxt *bp) 5575 { 5576 struct bnxt_irq *irq; 5577 int i; 5578 5579 #ifdef CONFIG_RFS_ACCEL 5580 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 5581 bp->dev->rx_cpu_rmap = NULL; 5582 #endif 5583 if (!bp->irq_tbl) 5584 return; 5585 5586 for (i = 0; i < bp->cp_nr_rings; i++) { 5587 irq = &bp->irq_tbl[i]; 5588 if (irq->requested) { 5589 if (irq->have_cpumask) { 5590 irq_set_affinity_hint(irq->vector, NULL); 5591 free_cpumask_var(irq->cpu_mask); 5592 irq->have_cpumask = 0; 5593 } 5594 free_irq(irq->vector, bp->bnapi[i]); 5595 } 5596 5597 irq->requested = 0; 5598 } 5599 } 5600 5601 static int bnxt_request_irq(struct bnxt *bp) 5602 { 5603 int i, j, rc = 0; 5604 unsigned long flags = 0; 5605 #ifdef CONFIG_RFS_ACCEL 5606 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; 5607 #endif 5608 5609 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 5610 flags = IRQF_SHARED; 5611 5612 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 5613 struct bnxt_irq *irq = &bp->irq_tbl[i]; 5614 #ifdef CONFIG_RFS_ACCEL 5615 if (rmap && bp->bnapi[i]->rx_ring) { 5616 rc = irq_cpu_rmap_add(rmap, irq->vector); 5617 if (rc) 5618 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 5619 j); 5620 j++; 5621 } 5622 #endif 5623 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 5624 bp->bnapi[i]); 5625 if (rc) 5626 break; 5627 5628 irq->requested = 1; 5629 5630 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 5631 int numa_node = dev_to_node(&bp->pdev->dev); 5632 5633 irq->have_cpumask = 1; 5634 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 5635 irq->cpu_mask); 5636 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 5637 if (rc) { 5638 netdev_warn(bp->dev, 5639 "Set affinity failed, IRQ = %d\n", 5640 irq->vector); 5641 break; 5642 } 5643 } 5644 } 5645 return rc; 5646 } 5647 5648 static void bnxt_del_napi(struct bnxt *bp) 5649 { 5650 int i; 5651 5652 if (!bp->bnapi) 5653 return; 5654 5655 for (i = 0; i < bp->cp_nr_rings; i++) { 5656 struct bnxt_napi *bnapi = bp->bnapi[i]; 5657 5658 napi_hash_del(&bnapi->napi); 5659 netif_napi_del(&bnapi->napi); 5660 } 5661 /* We called napi_hash_del() before netif_napi_del(), we need 5662 * to respect an RCU grace period before freeing napi structures. 5663 */ 5664 synchronize_net(); 5665 } 5666 5667 static void bnxt_init_napi(struct bnxt *bp) 5668 { 5669 int i; 5670 unsigned int cp_nr_rings = bp->cp_nr_rings; 5671 struct bnxt_napi *bnapi; 5672 5673 if (bp->flags & BNXT_FLAG_USING_MSIX) { 5674 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5675 cp_nr_rings--; 5676 for (i = 0; i < cp_nr_rings; i++) { 5677 bnapi = bp->bnapi[i]; 5678 netif_napi_add(bp->dev, &bnapi->napi, 5679 bnxt_poll, 64); 5680 } 5681 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5682 bnapi = bp->bnapi[cp_nr_rings]; 5683 netif_napi_add(bp->dev, &bnapi->napi, 5684 bnxt_poll_nitroa0, 64); 5685 } 5686 } else { 5687 bnapi = bp->bnapi[0]; 5688 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 5689 } 5690 } 5691 5692 static void bnxt_disable_napi(struct bnxt *bp) 5693 { 5694 int i; 5695 5696 if (!bp->bnapi) 5697 return; 5698 5699 for (i = 0; i < bp->cp_nr_rings; i++) 5700 napi_disable(&bp->bnapi[i]->napi); 5701 } 5702 5703 static void bnxt_enable_napi(struct bnxt *bp) 5704 { 5705 int i; 5706 5707 for (i = 0; i < bp->cp_nr_rings; i++) { 5708 bp->bnapi[i]->in_reset = false; 5709 napi_enable(&bp->bnapi[i]->napi); 5710 } 5711 } 5712 5713 void bnxt_tx_disable(struct bnxt *bp) 5714 { 5715 int i; 5716 struct bnxt_tx_ring_info *txr; 5717 5718 if (bp->tx_ring) { 5719 for (i = 0; i < bp->tx_nr_rings; i++) { 5720 txr = &bp->tx_ring[i]; 5721 txr->dev_state = BNXT_DEV_STATE_CLOSING; 5722 } 5723 } 5724 /* Stop all TX queues */ 5725 netif_tx_disable(bp->dev); 5726 netif_carrier_off(bp->dev); 5727 } 5728 5729 void bnxt_tx_enable(struct bnxt *bp) 5730 { 5731 int i; 5732 struct bnxt_tx_ring_info *txr; 5733 5734 for (i = 0; i < bp->tx_nr_rings; i++) { 5735 txr = &bp->tx_ring[i]; 5736 txr->dev_state = 0; 5737 } 5738 netif_tx_wake_all_queues(bp->dev); 5739 if (bp->link_info.link_up) 5740 netif_carrier_on(bp->dev); 5741 } 5742 5743 static void bnxt_report_link(struct bnxt *bp) 5744 { 5745 if (bp->link_info.link_up) { 5746 const char *duplex; 5747 const char *flow_ctrl; 5748 u32 speed; 5749 u16 fec; 5750 5751 netif_carrier_on(bp->dev); 5752 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 5753 duplex = "full"; 5754 else 5755 duplex = "half"; 5756 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 5757 flow_ctrl = "ON - receive & transmit"; 5758 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 5759 flow_ctrl = "ON - transmit"; 5760 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 5761 flow_ctrl = "ON - receive"; 5762 else 5763 flow_ctrl = "none"; 5764 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 5765 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 5766 speed, duplex, flow_ctrl); 5767 if (bp->flags & BNXT_FLAG_EEE_CAP) 5768 netdev_info(bp->dev, "EEE is %s\n", 5769 bp->eee.eee_active ? "active" : 5770 "not active"); 5771 fec = bp->link_info.fec_cfg; 5772 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 5773 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 5774 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 5775 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 5776 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 5777 } else { 5778 netif_carrier_off(bp->dev); 5779 netdev_err(bp->dev, "NIC Link is Down\n"); 5780 } 5781 } 5782 5783 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 5784 { 5785 int rc = 0; 5786 struct hwrm_port_phy_qcaps_input req = {0}; 5787 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5788 struct bnxt_link_info *link_info = &bp->link_info; 5789 5790 if (bp->hwrm_spec_code < 0x10201) 5791 return 0; 5792 5793 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 5794 5795 mutex_lock(&bp->hwrm_cmd_lock); 5796 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5797 if (rc) 5798 goto hwrm_phy_qcaps_exit; 5799 5800 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 5801 struct ethtool_eee *eee = &bp->eee; 5802 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 5803 5804 bp->flags |= BNXT_FLAG_EEE_CAP; 5805 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5806 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 5807 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 5808 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 5809 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 5810 } 5811 if (resp->supported_speeds_auto_mode) 5812 link_info->support_auto_speeds = 5813 le16_to_cpu(resp->supported_speeds_auto_mode); 5814 5815 bp->port_count = resp->port_cnt; 5816 5817 hwrm_phy_qcaps_exit: 5818 mutex_unlock(&bp->hwrm_cmd_lock); 5819 return rc; 5820 } 5821 5822 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 5823 { 5824 int rc = 0; 5825 struct bnxt_link_info *link_info = &bp->link_info; 5826 struct hwrm_port_phy_qcfg_input req = {0}; 5827 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5828 u8 link_up = link_info->link_up; 5829 u16 diff; 5830 5831 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 5832 5833 mutex_lock(&bp->hwrm_cmd_lock); 5834 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5835 if (rc) { 5836 mutex_unlock(&bp->hwrm_cmd_lock); 5837 return rc; 5838 } 5839 5840 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 5841 link_info->phy_link_status = resp->link; 5842 link_info->duplex = resp->duplex_cfg; 5843 if (bp->hwrm_spec_code >= 0x10800) 5844 link_info->duplex = resp->duplex_state; 5845 link_info->pause = resp->pause; 5846 link_info->auto_mode = resp->auto_mode; 5847 link_info->auto_pause_setting = resp->auto_pause; 5848 link_info->lp_pause = resp->link_partner_adv_pause; 5849 link_info->force_pause_setting = resp->force_pause; 5850 link_info->duplex_setting = resp->duplex_cfg; 5851 if (link_info->phy_link_status == BNXT_LINK_LINK) 5852 link_info->link_speed = le16_to_cpu(resp->link_speed); 5853 else 5854 link_info->link_speed = 0; 5855 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 5856 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 5857 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 5858 link_info->lp_auto_link_speeds = 5859 le16_to_cpu(resp->link_partner_adv_speeds); 5860 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 5861 link_info->phy_ver[0] = resp->phy_maj; 5862 link_info->phy_ver[1] = resp->phy_min; 5863 link_info->phy_ver[2] = resp->phy_bld; 5864 link_info->media_type = resp->media_type; 5865 link_info->phy_type = resp->phy_type; 5866 link_info->transceiver = resp->xcvr_pkg_type; 5867 link_info->phy_addr = resp->eee_config_phy_addr & 5868 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 5869 link_info->module_status = resp->module_status; 5870 5871 if (bp->flags & BNXT_FLAG_EEE_CAP) { 5872 struct ethtool_eee *eee = &bp->eee; 5873 u16 fw_speeds; 5874 5875 eee->eee_active = 0; 5876 if (resp->eee_config_phy_addr & 5877 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 5878 eee->eee_active = 1; 5879 fw_speeds = le16_to_cpu( 5880 resp->link_partner_adv_eee_link_speed_mask); 5881 eee->lp_advertised = 5882 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5883 } 5884 5885 /* Pull initial EEE config */ 5886 if (!chng_link_state) { 5887 if (resp->eee_config_phy_addr & 5888 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 5889 eee->eee_enabled = 1; 5890 5891 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 5892 eee->advertised = 5893 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5894 5895 if (resp->eee_config_phy_addr & 5896 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 5897 __le32 tmr; 5898 5899 eee->tx_lpi_enabled = 1; 5900 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 5901 eee->tx_lpi_timer = le32_to_cpu(tmr) & 5902 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 5903 } 5904 } 5905 } 5906 5907 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 5908 if (bp->hwrm_spec_code >= 0x10504) 5909 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 5910 5911 /* TODO: need to add more logic to report VF link */ 5912 if (chng_link_state) { 5913 if (link_info->phy_link_status == BNXT_LINK_LINK) 5914 link_info->link_up = 1; 5915 else 5916 link_info->link_up = 0; 5917 if (link_up != link_info->link_up) 5918 bnxt_report_link(bp); 5919 } else { 5920 /* alwasy link down if not require to update link state */ 5921 link_info->link_up = 0; 5922 } 5923 mutex_unlock(&bp->hwrm_cmd_lock); 5924 5925 diff = link_info->support_auto_speeds ^ link_info->advertising; 5926 if ((link_info->support_auto_speeds | diff) != 5927 link_info->support_auto_speeds) { 5928 /* An advertised speed is no longer supported, so we need to 5929 * update the advertisement settings. Caller holds RTNL 5930 * so we can modify link settings. 5931 */ 5932 link_info->advertising = link_info->support_auto_speeds; 5933 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 5934 bnxt_hwrm_set_link_setting(bp, true, false); 5935 } 5936 return 0; 5937 } 5938 5939 static void bnxt_get_port_module_status(struct bnxt *bp) 5940 { 5941 struct bnxt_link_info *link_info = &bp->link_info; 5942 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 5943 u8 module_status; 5944 5945 if (bnxt_update_link(bp, true)) 5946 return; 5947 5948 module_status = link_info->module_status; 5949 switch (module_status) { 5950 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 5951 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 5952 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 5953 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 5954 bp->pf.port_id); 5955 if (bp->hwrm_spec_code >= 0x10201) { 5956 netdev_warn(bp->dev, "Module part number %s\n", 5957 resp->phy_vendor_partnumber); 5958 } 5959 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 5960 netdev_warn(bp->dev, "TX is disabled\n"); 5961 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 5962 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 5963 } 5964 } 5965 5966 static void 5967 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 5968 { 5969 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 5970 if (bp->hwrm_spec_code >= 0x10201) 5971 req->auto_pause = 5972 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 5973 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5974 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 5975 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5976 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 5977 req->enables |= 5978 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5979 } else { 5980 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5981 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 5982 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5983 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 5984 req->enables |= 5985 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 5986 if (bp->hwrm_spec_code >= 0x10201) { 5987 req->auto_pause = req->force_pause; 5988 req->enables |= cpu_to_le32( 5989 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5990 } 5991 } 5992 } 5993 5994 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 5995 struct hwrm_port_phy_cfg_input *req) 5996 { 5997 u8 autoneg = bp->link_info.autoneg; 5998 u16 fw_link_speed = bp->link_info.req_link_speed; 5999 u16 advertising = bp->link_info.advertising; 6000 6001 if (autoneg & BNXT_AUTONEG_SPEED) { 6002 req->auto_mode |= 6003 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 6004 6005 req->enables |= cpu_to_le32( 6006 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 6007 req->auto_link_speed_mask = cpu_to_le16(advertising); 6008 6009 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 6010 req->flags |= 6011 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 6012 } else { 6013 req->force_link_speed = cpu_to_le16(fw_link_speed); 6014 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 6015 } 6016 6017 /* tell chimp that the setting takes effect immediately */ 6018 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 6019 } 6020 6021 int bnxt_hwrm_set_pause(struct bnxt *bp) 6022 { 6023 struct hwrm_port_phy_cfg_input req = {0}; 6024 int rc; 6025 6026 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6027 bnxt_hwrm_set_pause_common(bp, &req); 6028 6029 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 6030 bp->link_info.force_link_chng) 6031 bnxt_hwrm_set_link_common(bp, &req); 6032 6033 mutex_lock(&bp->hwrm_cmd_lock); 6034 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6035 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 6036 /* since changing of pause setting doesn't trigger any link 6037 * change event, the driver needs to update the current pause 6038 * result upon successfully return of the phy_cfg command 6039 */ 6040 bp->link_info.pause = 6041 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 6042 bp->link_info.auto_pause_setting = 0; 6043 if (!bp->link_info.force_link_chng) 6044 bnxt_report_link(bp); 6045 } 6046 bp->link_info.force_link_chng = false; 6047 mutex_unlock(&bp->hwrm_cmd_lock); 6048 return rc; 6049 } 6050 6051 static void bnxt_hwrm_set_eee(struct bnxt *bp, 6052 struct hwrm_port_phy_cfg_input *req) 6053 { 6054 struct ethtool_eee *eee = &bp->eee; 6055 6056 if (eee->eee_enabled) { 6057 u16 eee_speeds; 6058 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 6059 6060 if (eee->tx_lpi_enabled) 6061 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 6062 else 6063 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 6064 6065 req->flags |= cpu_to_le32(flags); 6066 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 6067 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 6068 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 6069 } else { 6070 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 6071 } 6072 } 6073 6074 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 6075 { 6076 struct hwrm_port_phy_cfg_input req = {0}; 6077 6078 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6079 if (set_pause) 6080 bnxt_hwrm_set_pause_common(bp, &req); 6081 6082 bnxt_hwrm_set_link_common(bp, &req); 6083 6084 if (set_eee) 6085 bnxt_hwrm_set_eee(bp, &req); 6086 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6087 } 6088 6089 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 6090 { 6091 struct hwrm_port_phy_cfg_input req = {0}; 6092 6093 if (!BNXT_SINGLE_PF(bp)) 6094 return 0; 6095 6096 if (pci_num_vf(bp->pdev)) 6097 return 0; 6098 6099 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6100 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 6101 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6102 } 6103 6104 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 6105 { 6106 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6107 struct hwrm_port_led_qcaps_input req = {0}; 6108 struct bnxt_pf_info *pf = &bp->pf; 6109 int rc; 6110 6111 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 6112 return 0; 6113 6114 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 6115 req.port_id = cpu_to_le16(pf->port_id); 6116 mutex_lock(&bp->hwrm_cmd_lock); 6117 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6118 if (rc) { 6119 mutex_unlock(&bp->hwrm_cmd_lock); 6120 return rc; 6121 } 6122 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 6123 int i; 6124 6125 bp->num_leds = resp->num_leds; 6126 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 6127 bp->num_leds); 6128 for (i = 0; i < bp->num_leds; i++) { 6129 struct bnxt_led_info *led = &bp->leds[i]; 6130 __le16 caps = led->led_state_caps; 6131 6132 if (!led->led_group_id || 6133 !BNXT_LED_ALT_BLINK_CAP(caps)) { 6134 bp->num_leds = 0; 6135 break; 6136 } 6137 } 6138 } 6139 mutex_unlock(&bp->hwrm_cmd_lock); 6140 return 0; 6141 } 6142 6143 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 6144 { 6145 struct hwrm_wol_filter_alloc_input req = {0}; 6146 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6147 int rc; 6148 6149 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 6150 req.port_id = cpu_to_le16(bp->pf.port_id); 6151 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 6152 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 6153 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 6154 mutex_lock(&bp->hwrm_cmd_lock); 6155 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6156 if (!rc) 6157 bp->wol_filter_id = resp->wol_filter_id; 6158 mutex_unlock(&bp->hwrm_cmd_lock); 6159 return rc; 6160 } 6161 6162 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 6163 { 6164 struct hwrm_wol_filter_free_input req = {0}; 6165 int rc; 6166 6167 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 6168 req.port_id = cpu_to_le16(bp->pf.port_id); 6169 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 6170 req.wol_filter_id = bp->wol_filter_id; 6171 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6172 return rc; 6173 } 6174 6175 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 6176 { 6177 struct hwrm_wol_filter_qcfg_input req = {0}; 6178 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6179 u16 next_handle = 0; 6180 int rc; 6181 6182 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 6183 req.port_id = cpu_to_le16(bp->pf.port_id); 6184 req.handle = cpu_to_le16(handle); 6185 mutex_lock(&bp->hwrm_cmd_lock); 6186 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6187 if (!rc) { 6188 next_handle = le16_to_cpu(resp->next_handle); 6189 if (next_handle != 0) { 6190 if (resp->wol_type == 6191 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 6192 bp->wol = 1; 6193 bp->wol_filter_id = resp->wol_filter_id; 6194 } 6195 } 6196 } 6197 mutex_unlock(&bp->hwrm_cmd_lock); 6198 return next_handle; 6199 } 6200 6201 static void bnxt_get_wol_settings(struct bnxt *bp) 6202 { 6203 u16 handle = 0; 6204 6205 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 6206 return; 6207 6208 do { 6209 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 6210 } while (handle && handle != 0xffff); 6211 } 6212 6213 static bool bnxt_eee_config_ok(struct bnxt *bp) 6214 { 6215 struct ethtool_eee *eee = &bp->eee; 6216 struct bnxt_link_info *link_info = &bp->link_info; 6217 6218 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 6219 return true; 6220 6221 if (eee->eee_enabled) { 6222 u32 advertising = 6223 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 6224 6225 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 6226 eee->eee_enabled = 0; 6227 return false; 6228 } 6229 if (eee->advertised & ~advertising) { 6230 eee->advertised = advertising & eee->supported; 6231 return false; 6232 } 6233 } 6234 return true; 6235 } 6236 6237 static int bnxt_update_phy_setting(struct bnxt *bp) 6238 { 6239 int rc; 6240 bool update_link = false; 6241 bool update_pause = false; 6242 bool update_eee = false; 6243 struct bnxt_link_info *link_info = &bp->link_info; 6244 6245 rc = bnxt_update_link(bp, true); 6246 if (rc) { 6247 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 6248 rc); 6249 return rc; 6250 } 6251 if (!BNXT_SINGLE_PF(bp)) 6252 return 0; 6253 6254 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 6255 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 6256 link_info->req_flow_ctrl) 6257 update_pause = true; 6258 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 6259 link_info->force_pause_setting != link_info->req_flow_ctrl) 6260 update_pause = true; 6261 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 6262 if (BNXT_AUTO_MODE(link_info->auto_mode)) 6263 update_link = true; 6264 if (link_info->req_link_speed != link_info->force_link_speed) 6265 update_link = true; 6266 if (link_info->req_duplex != link_info->duplex_setting) 6267 update_link = true; 6268 } else { 6269 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 6270 update_link = true; 6271 if (link_info->advertising != link_info->auto_link_speeds) 6272 update_link = true; 6273 } 6274 6275 /* The last close may have shutdown the link, so need to call 6276 * PHY_CFG to bring it back up. 6277 */ 6278 if (!netif_carrier_ok(bp->dev)) 6279 update_link = true; 6280 6281 if (!bnxt_eee_config_ok(bp)) 6282 update_eee = true; 6283 6284 if (update_link) 6285 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 6286 else if (update_pause) 6287 rc = bnxt_hwrm_set_pause(bp); 6288 if (rc) { 6289 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 6290 rc); 6291 return rc; 6292 } 6293 6294 return rc; 6295 } 6296 6297 /* Common routine to pre-map certain register block to different GRC window. 6298 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 6299 * in PF and 3 windows in VF that can be customized to map in different 6300 * register blocks. 6301 */ 6302 static void bnxt_preset_reg_win(struct bnxt *bp) 6303 { 6304 if (BNXT_PF(bp)) { 6305 /* CAG registers map to GRC window #4 */ 6306 writel(BNXT_CAG_REG_BASE, 6307 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 6308 } 6309 } 6310 6311 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6312 { 6313 int rc = 0; 6314 6315 bnxt_preset_reg_win(bp); 6316 netif_carrier_off(bp->dev); 6317 if (irq_re_init) { 6318 rc = bnxt_setup_int_mode(bp); 6319 if (rc) { 6320 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 6321 rc); 6322 return rc; 6323 } 6324 } 6325 if ((bp->flags & BNXT_FLAG_RFS) && 6326 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 6327 /* disable RFS if falling back to INTA */ 6328 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 6329 bp->flags &= ~BNXT_FLAG_RFS; 6330 } 6331 6332 rc = bnxt_alloc_mem(bp, irq_re_init); 6333 if (rc) { 6334 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 6335 goto open_err_free_mem; 6336 } 6337 6338 if (irq_re_init) { 6339 bnxt_init_napi(bp); 6340 rc = bnxt_request_irq(bp); 6341 if (rc) { 6342 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 6343 goto open_err; 6344 } 6345 } 6346 6347 bnxt_enable_napi(bp); 6348 6349 rc = bnxt_init_nic(bp, irq_re_init); 6350 if (rc) { 6351 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 6352 goto open_err; 6353 } 6354 6355 if (link_re_init) { 6356 mutex_lock(&bp->link_lock); 6357 rc = bnxt_update_phy_setting(bp); 6358 mutex_unlock(&bp->link_lock); 6359 if (rc) 6360 netdev_warn(bp->dev, "failed to update phy settings\n"); 6361 } 6362 6363 if (irq_re_init) 6364 udp_tunnel_get_rx_info(bp->dev); 6365 6366 set_bit(BNXT_STATE_OPEN, &bp->state); 6367 bnxt_enable_int(bp); 6368 /* Enable TX queues */ 6369 bnxt_tx_enable(bp); 6370 mod_timer(&bp->timer, jiffies + bp->current_interval); 6371 /* Poll link status and check for SFP+ module status */ 6372 bnxt_get_port_module_status(bp); 6373 6374 /* VF-reps may need to be re-opened after the PF is re-opened */ 6375 if (BNXT_PF(bp)) 6376 bnxt_vf_reps_open(bp); 6377 return 0; 6378 6379 open_err: 6380 bnxt_disable_napi(bp); 6381 bnxt_del_napi(bp); 6382 6383 open_err_free_mem: 6384 bnxt_free_skbs(bp); 6385 bnxt_free_irq(bp); 6386 bnxt_free_mem(bp, true); 6387 return rc; 6388 } 6389 6390 /* rtnl_lock held */ 6391 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6392 { 6393 int rc = 0; 6394 6395 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 6396 if (rc) { 6397 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 6398 dev_close(bp->dev); 6399 } 6400 return rc; 6401 } 6402 6403 /* rtnl_lock held, open the NIC half way by allocating all resources, but 6404 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 6405 * self tests. 6406 */ 6407 int bnxt_half_open_nic(struct bnxt *bp) 6408 { 6409 int rc = 0; 6410 6411 rc = bnxt_alloc_mem(bp, false); 6412 if (rc) { 6413 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 6414 goto half_open_err; 6415 } 6416 rc = bnxt_init_nic(bp, false); 6417 if (rc) { 6418 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 6419 goto half_open_err; 6420 } 6421 return 0; 6422 6423 half_open_err: 6424 bnxt_free_skbs(bp); 6425 bnxt_free_mem(bp, false); 6426 dev_close(bp->dev); 6427 return rc; 6428 } 6429 6430 /* rtnl_lock held, this call can only be made after a previous successful 6431 * call to bnxt_half_open_nic(). 6432 */ 6433 void bnxt_half_close_nic(struct bnxt *bp) 6434 { 6435 bnxt_hwrm_resource_free(bp, false, false); 6436 bnxt_free_skbs(bp); 6437 bnxt_free_mem(bp, false); 6438 } 6439 6440 static int bnxt_open(struct net_device *dev) 6441 { 6442 struct bnxt *bp = netdev_priv(dev); 6443 6444 return __bnxt_open_nic(bp, true, true); 6445 } 6446 6447 static bool bnxt_drv_busy(struct bnxt *bp) 6448 { 6449 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 6450 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 6451 } 6452 6453 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6454 { 6455 int rc = 0; 6456 6457 #ifdef CONFIG_BNXT_SRIOV 6458 if (bp->sriov_cfg) { 6459 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 6460 !bp->sriov_cfg, 6461 BNXT_SRIOV_CFG_WAIT_TMO); 6462 if (rc) 6463 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 6464 } 6465 6466 /* Close the VF-reps before closing PF */ 6467 if (BNXT_PF(bp)) 6468 bnxt_vf_reps_close(bp); 6469 #endif 6470 /* Change device state to avoid TX queue wake up's */ 6471 bnxt_tx_disable(bp); 6472 6473 clear_bit(BNXT_STATE_OPEN, &bp->state); 6474 smp_mb__after_atomic(); 6475 while (bnxt_drv_busy(bp)) 6476 msleep(20); 6477 6478 /* Flush rings and and disable interrupts */ 6479 bnxt_shutdown_nic(bp, irq_re_init); 6480 6481 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 6482 6483 bnxt_disable_napi(bp); 6484 del_timer_sync(&bp->timer); 6485 bnxt_free_skbs(bp); 6486 6487 if (irq_re_init) { 6488 bnxt_free_irq(bp); 6489 bnxt_del_napi(bp); 6490 } 6491 bnxt_free_mem(bp, irq_re_init); 6492 return rc; 6493 } 6494 6495 static int bnxt_close(struct net_device *dev) 6496 { 6497 struct bnxt *bp = netdev_priv(dev); 6498 6499 bnxt_close_nic(bp, true, true); 6500 bnxt_hwrm_shutdown_link(bp); 6501 return 0; 6502 } 6503 6504 /* rtnl_lock held */ 6505 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6506 { 6507 switch (cmd) { 6508 case SIOCGMIIPHY: 6509 /* fallthru */ 6510 case SIOCGMIIREG: { 6511 if (!netif_running(dev)) 6512 return -EAGAIN; 6513 6514 return 0; 6515 } 6516 6517 case SIOCSMIIREG: 6518 if (!netif_running(dev)) 6519 return -EAGAIN; 6520 6521 return 0; 6522 6523 default: 6524 /* do nothing */ 6525 break; 6526 } 6527 return -EOPNOTSUPP; 6528 } 6529 6530 static void 6531 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6532 { 6533 u32 i; 6534 struct bnxt *bp = netdev_priv(dev); 6535 6536 set_bit(BNXT_STATE_READ_STATS, &bp->state); 6537 /* Make sure bnxt_close_nic() sees that we are reading stats before 6538 * we check the BNXT_STATE_OPEN flag. 6539 */ 6540 smp_mb__after_atomic(); 6541 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6542 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6543 return; 6544 } 6545 6546 /* TODO check if we need to synchronize with bnxt_close path */ 6547 for (i = 0; i < bp->cp_nr_rings; i++) { 6548 struct bnxt_napi *bnapi = bp->bnapi[i]; 6549 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6550 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 6551 6552 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 6553 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 6554 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 6555 6556 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 6557 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 6558 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 6559 6560 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 6561 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 6562 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 6563 6564 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 6565 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 6566 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 6567 6568 stats->rx_missed_errors += 6569 le64_to_cpu(hw_stats->rx_discard_pkts); 6570 6571 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 6572 6573 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 6574 } 6575 6576 if (bp->flags & BNXT_FLAG_PORT_STATS) { 6577 struct rx_port_stats *rx = bp->hw_rx_port_stats; 6578 struct tx_port_stats *tx = bp->hw_tx_port_stats; 6579 6580 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 6581 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 6582 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 6583 le64_to_cpu(rx->rx_ovrsz_frames) + 6584 le64_to_cpu(rx->rx_runt_frames); 6585 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 6586 le64_to_cpu(rx->rx_jbr_frames); 6587 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 6588 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 6589 stats->tx_errors = le64_to_cpu(tx->tx_err); 6590 } 6591 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6592 } 6593 6594 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 6595 { 6596 struct net_device *dev = bp->dev; 6597 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6598 struct netdev_hw_addr *ha; 6599 u8 *haddr; 6600 int mc_count = 0; 6601 bool update = false; 6602 int off = 0; 6603 6604 netdev_for_each_mc_addr(ha, dev) { 6605 if (mc_count >= BNXT_MAX_MC_ADDRS) { 6606 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6607 vnic->mc_list_count = 0; 6608 return false; 6609 } 6610 haddr = ha->addr; 6611 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 6612 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 6613 update = true; 6614 } 6615 off += ETH_ALEN; 6616 mc_count++; 6617 } 6618 if (mc_count) 6619 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 6620 6621 if (mc_count != vnic->mc_list_count) { 6622 vnic->mc_list_count = mc_count; 6623 update = true; 6624 } 6625 return update; 6626 } 6627 6628 static bool bnxt_uc_list_updated(struct bnxt *bp) 6629 { 6630 struct net_device *dev = bp->dev; 6631 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6632 struct netdev_hw_addr *ha; 6633 int off = 0; 6634 6635 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 6636 return true; 6637 6638 netdev_for_each_uc_addr(ha, dev) { 6639 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 6640 return true; 6641 6642 off += ETH_ALEN; 6643 } 6644 return false; 6645 } 6646 6647 static void bnxt_set_rx_mode(struct net_device *dev) 6648 { 6649 struct bnxt *bp = netdev_priv(dev); 6650 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6651 u32 mask = vnic->rx_mask; 6652 bool mc_update = false; 6653 bool uc_update; 6654 6655 if (!netif_running(dev)) 6656 return; 6657 6658 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 6659 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 6660 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 6661 6662 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 6663 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6664 6665 uc_update = bnxt_uc_list_updated(bp); 6666 6667 if (dev->flags & IFF_ALLMULTI) { 6668 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6669 vnic->mc_list_count = 0; 6670 } else { 6671 mc_update = bnxt_mc_list_updated(bp, &mask); 6672 } 6673 6674 if (mask != vnic->rx_mask || uc_update || mc_update) { 6675 vnic->rx_mask = mask; 6676 6677 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6678 bnxt_queue_sp_work(bp); 6679 } 6680 } 6681 6682 static int bnxt_cfg_rx_mode(struct bnxt *bp) 6683 { 6684 struct net_device *dev = bp->dev; 6685 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6686 struct netdev_hw_addr *ha; 6687 int i, off = 0, rc; 6688 bool uc_update; 6689 6690 netif_addr_lock_bh(dev); 6691 uc_update = bnxt_uc_list_updated(bp); 6692 netif_addr_unlock_bh(dev); 6693 6694 if (!uc_update) 6695 goto skip_uc; 6696 6697 mutex_lock(&bp->hwrm_cmd_lock); 6698 for (i = 1; i < vnic->uc_filter_count; i++) { 6699 struct hwrm_cfa_l2_filter_free_input req = {0}; 6700 6701 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 6702 -1); 6703 6704 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 6705 6706 rc = _hwrm_send_message(bp, &req, sizeof(req), 6707 HWRM_CMD_TIMEOUT); 6708 } 6709 mutex_unlock(&bp->hwrm_cmd_lock); 6710 6711 vnic->uc_filter_count = 1; 6712 6713 netif_addr_lock_bh(dev); 6714 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 6715 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6716 } else { 6717 netdev_for_each_uc_addr(ha, dev) { 6718 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 6719 off += ETH_ALEN; 6720 vnic->uc_filter_count++; 6721 } 6722 } 6723 netif_addr_unlock_bh(dev); 6724 6725 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 6726 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 6727 if (rc) { 6728 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 6729 rc); 6730 vnic->uc_filter_count = i; 6731 return rc; 6732 } 6733 } 6734 6735 skip_uc: 6736 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 6737 if (rc) 6738 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 6739 rc); 6740 6741 return rc; 6742 } 6743 6744 /* If the chip and firmware supports RFS */ 6745 static bool bnxt_rfs_supported(struct bnxt *bp) 6746 { 6747 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 6748 return true; 6749 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6750 return true; 6751 return false; 6752 } 6753 6754 /* If runtime conditions support RFS */ 6755 static bool bnxt_rfs_capable(struct bnxt *bp) 6756 { 6757 #ifdef CONFIG_RFS_ACCEL 6758 int vnics, max_vnics, max_rss_ctxs; 6759 6760 if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) 6761 return false; 6762 6763 vnics = 1 + bp->rx_nr_rings; 6764 max_vnics = bnxt_get_max_func_vnics(bp); 6765 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 6766 6767 /* RSS contexts not a limiting factor */ 6768 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6769 max_rss_ctxs = max_vnics; 6770 if (vnics > max_vnics || vnics > max_rss_ctxs) { 6771 netdev_warn(bp->dev, 6772 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 6773 min(max_rss_ctxs - 1, max_vnics - 1)); 6774 return false; 6775 } 6776 6777 return true; 6778 #else 6779 return false; 6780 #endif 6781 } 6782 6783 static netdev_features_t bnxt_fix_features(struct net_device *dev, 6784 netdev_features_t features) 6785 { 6786 struct bnxt *bp = netdev_priv(dev); 6787 6788 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 6789 features &= ~NETIF_F_NTUPLE; 6790 6791 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 6792 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 6793 6794 if (!(features & NETIF_F_GRO)) 6795 features &= ~NETIF_F_GRO_HW; 6796 6797 if (features & NETIF_F_GRO_HW) 6798 features &= ~NETIF_F_LRO; 6799 6800 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 6801 * turned on or off together. 6802 */ 6803 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 6804 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 6805 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 6806 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6807 NETIF_F_HW_VLAN_STAG_RX); 6808 else 6809 features |= NETIF_F_HW_VLAN_CTAG_RX | 6810 NETIF_F_HW_VLAN_STAG_RX; 6811 } 6812 #ifdef CONFIG_BNXT_SRIOV 6813 if (BNXT_VF(bp)) { 6814 if (bp->vf.vlan) { 6815 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6816 NETIF_F_HW_VLAN_STAG_RX); 6817 } 6818 } 6819 #endif 6820 return features; 6821 } 6822 6823 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 6824 { 6825 struct bnxt *bp = netdev_priv(dev); 6826 u32 flags = bp->flags; 6827 u32 changes; 6828 int rc = 0; 6829 bool re_init = false; 6830 bool update_tpa = false; 6831 6832 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 6833 if (features & NETIF_F_GRO_HW) 6834 flags |= BNXT_FLAG_GRO; 6835 else if (features & NETIF_F_LRO) 6836 flags |= BNXT_FLAG_LRO; 6837 6838 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 6839 flags &= ~BNXT_FLAG_TPA; 6840 6841 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6842 flags |= BNXT_FLAG_STRIP_VLAN; 6843 6844 if (features & NETIF_F_NTUPLE) 6845 flags |= BNXT_FLAG_RFS; 6846 6847 changes = flags ^ bp->flags; 6848 if (changes & BNXT_FLAG_TPA) { 6849 update_tpa = true; 6850 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 6851 (flags & BNXT_FLAG_TPA) == 0) 6852 re_init = true; 6853 } 6854 6855 if (changes & ~BNXT_FLAG_TPA) 6856 re_init = true; 6857 6858 if (flags != bp->flags) { 6859 u32 old_flags = bp->flags; 6860 6861 bp->flags = flags; 6862 6863 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6864 if (update_tpa) 6865 bnxt_set_ring_params(bp); 6866 return rc; 6867 } 6868 6869 if (re_init) { 6870 bnxt_close_nic(bp, false, false); 6871 if (update_tpa) 6872 bnxt_set_ring_params(bp); 6873 6874 return bnxt_open_nic(bp, false, false); 6875 } 6876 if (update_tpa) { 6877 rc = bnxt_set_tpa(bp, 6878 (flags & BNXT_FLAG_TPA) ? 6879 true : false); 6880 if (rc) 6881 bp->flags = old_flags; 6882 } 6883 } 6884 return rc; 6885 } 6886 6887 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 6888 { 6889 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 6890 int i = bnapi->index; 6891 6892 if (!txr) 6893 return; 6894 6895 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 6896 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 6897 txr->tx_cons); 6898 } 6899 6900 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 6901 { 6902 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 6903 int i = bnapi->index; 6904 6905 if (!rxr) 6906 return; 6907 6908 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 6909 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 6910 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 6911 rxr->rx_sw_agg_prod); 6912 } 6913 6914 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 6915 { 6916 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6917 int i = bnapi->index; 6918 6919 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 6920 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 6921 } 6922 6923 static void bnxt_dbg_dump_states(struct bnxt *bp) 6924 { 6925 int i; 6926 struct bnxt_napi *bnapi; 6927 6928 for (i = 0; i < bp->cp_nr_rings; i++) { 6929 bnapi = bp->bnapi[i]; 6930 if (netif_msg_drv(bp)) { 6931 bnxt_dump_tx_sw_state(bnapi); 6932 bnxt_dump_rx_sw_state(bnapi); 6933 bnxt_dump_cp_sw_state(bnapi); 6934 } 6935 } 6936 } 6937 6938 static void bnxt_reset_task(struct bnxt *bp, bool silent) 6939 { 6940 if (!silent) 6941 bnxt_dbg_dump_states(bp); 6942 if (netif_running(bp->dev)) { 6943 int rc; 6944 6945 if (!silent) 6946 bnxt_ulp_stop(bp); 6947 bnxt_close_nic(bp, false, false); 6948 rc = bnxt_open_nic(bp, false, false); 6949 if (!silent && !rc) 6950 bnxt_ulp_start(bp); 6951 } 6952 } 6953 6954 static void bnxt_tx_timeout(struct net_device *dev) 6955 { 6956 struct bnxt *bp = netdev_priv(dev); 6957 6958 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6959 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6960 bnxt_queue_sp_work(bp); 6961 } 6962 6963 #ifdef CONFIG_NET_POLL_CONTROLLER 6964 static void bnxt_poll_controller(struct net_device *dev) 6965 { 6966 struct bnxt *bp = netdev_priv(dev); 6967 int i; 6968 6969 /* Only process tx rings/combined rings in netpoll mode. */ 6970 for (i = 0; i < bp->tx_nr_rings; i++) { 6971 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6972 6973 napi_schedule(&txr->bnapi->napi); 6974 } 6975 } 6976 #endif 6977 6978 static void bnxt_timer(struct timer_list *t) 6979 { 6980 struct bnxt *bp = from_timer(bp, t, timer); 6981 struct net_device *dev = bp->dev; 6982 6983 if (!netif_running(dev)) 6984 return; 6985 6986 if (atomic_read(&bp->intr_sem) != 0) 6987 goto bnxt_restart_timer; 6988 6989 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 6990 bp->stats_coal_ticks) { 6991 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6992 bnxt_queue_sp_work(bp); 6993 } 6994 6995 if (bnxt_tc_flower_enabled(bp)) { 6996 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 6997 bnxt_queue_sp_work(bp); 6998 } 6999 bnxt_restart_timer: 7000 mod_timer(&bp->timer, jiffies + bp->current_interval); 7001 } 7002 7003 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 7004 { 7005 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 7006 * set. If the device is being closed, bnxt_close() may be holding 7007 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 7008 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 7009 */ 7010 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7011 rtnl_lock(); 7012 } 7013 7014 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 7015 { 7016 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7017 rtnl_unlock(); 7018 } 7019 7020 /* Only called from bnxt_sp_task() */ 7021 static void bnxt_reset(struct bnxt *bp, bool silent) 7022 { 7023 bnxt_rtnl_lock_sp(bp); 7024 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7025 bnxt_reset_task(bp, silent); 7026 bnxt_rtnl_unlock_sp(bp); 7027 } 7028 7029 static void bnxt_cfg_ntp_filters(struct bnxt *); 7030 7031 static void bnxt_sp_task(struct work_struct *work) 7032 { 7033 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 7034 7035 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7036 smp_mb__after_atomic(); 7037 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 7038 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7039 return; 7040 } 7041 7042 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 7043 bnxt_cfg_rx_mode(bp); 7044 7045 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 7046 bnxt_cfg_ntp_filters(bp); 7047 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 7048 bnxt_hwrm_exec_fwd_req(bp); 7049 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 7050 bnxt_hwrm_tunnel_dst_port_alloc( 7051 bp, bp->vxlan_port, 7052 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7053 } 7054 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 7055 bnxt_hwrm_tunnel_dst_port_free( 7056 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7057 } 7058 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 7059 bnxt_hwrm_tunnel_dst_port_alloc( 7060 bp, bp->nge_port, 7061 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7062 } 7063 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 7064 bnxt_hwrm_tunnel_dst_port_free( 7065 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7066 } 7067 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7068 bnxt_hwrm_port_qstats(bp); 7069 7070 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7071 int rc; 7072 7073 mutex_lock(&bp->link_lock); 7074 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7075 &bp->sp_event)) 7076 bnxt_hwrm_phy_qcaps(bp); 7077 7078 rc = bnxt_update_link(bp, true); 7079 mutex_unlock(&bp->link_lock); 7080 if (rc) 7081 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7082 rc); 7083 } 7084 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7085 mutex_lock(&bp->link_lock); 7086 bnxt_get_port_module_status(bp); 7087 mutex_unlock(&bp->link_lock); 7088 } 7089 7090 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 7091 bnxt_tc_flow_stats_work(bp); 7092 7093 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 7094 * must be the last functions to be called before exiting. 7095 */ 7096 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7097 bnxt_reset(bp, false); 7098 7099 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 7100 bnxt_reset(bp, true); 7101 7102 smp_mb__before_atomic(); 7103 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7104 } 7105 7106 /* Under rtnl_lock */ 7107 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 7108 int tx_xdp) 7109 { 7110 int max_rx, max_tx, tx_sets = 1; 7111 int tx_rings_needed; 7112 int rc; 7113 7114 if (tcs) 7115 tx_sets = tcs; 7116 7117 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 7118 if (rc) 7119 return rc; 7120 7121 if (max_rx < rx) 7122 return -ENOMEM; 7123 7124 tx_rings_needed = tx * tx_sets + tx_xdp; 7125 if (max_tx < tx_rings_needed) 7126 return -ENOMEM; 7127 7128 return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed); 7129 } 7130 7131 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 7132 { 7133 if (bp->bar2) { 7134 pci_iounmap(pdev, bp->bar2); 7135 bp->bar2 = NULL; 7136 } 7137 7138 if (bp->bar1) { 7139 pci_iounmap(pdev, bp->bar1); 7140 bp->bar1 = NULL; 7141 } 7142 7143 if (bp->bar0) { 7144 pci_iounmap(pdev, bp->bar0); 7145 bp->bar0 = NULL; 7146 } 7147 } 7148 7149 static void bnxt_cleanup_pci(struct bnxt *bp) 7150 { 7151 bnxt_unmap_bars(bp, bp->pdev); 7152 pci_release_regions(bp->pdev); 7153 pci_disable_device(bp->pdev); 7154 } 7155 7156 static void bnxt_init_dflt_coal(struct bnxt *bp) 7157 { 7158 struct bnxt_coal *coal; 7159 7160 /* Tick values in micro seconds. 7161 * 1 coal_buf x bufs_per_record = 1 completion record. 7162 */ 7163 coal = &bp->rx_coal; 7164 coal->coal_ticks = 14; 7165 coal->coal_bufs = 30; 7166 coal->coal_ticks_irq = 1; 7167 coal->coal_bufs_irq = 2; 7168 coal->idle_thresh = 25; 7169 coal->bufs_per_record = 2; 7170 coal->budget = 64; /* NAPI budget */ 7171 7172 coal = &bp->tx_coal; 7173 coal->coal_ticks = 28; 7174 coal->coal_bufs = 30; 7175 coal->coal_ticks_irq = 2; 7176 coal->coal_bufs_irq = 2; 7177 coal->bufs_per_record = 1; 7178 7179 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 7180 } 7181 7182 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 7183 { 7184 int rc; 7185 struct bnxt *bp = netdev_priv(dev); 7186 7187 SET_NETDEV_DEV(dev, &pdev->dev); 7188 7189 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 7190 rc = pci_enable_device(pdev); 7191 if (rc) { 7192 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 7193 goto init_err; 7194 } 7195 7196 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 7197 dev_err(&pdev->dev, 7198 "Cannot find PCI device base address, aborting\n"); 7199 rc = -ENODEV; 7200 goto init_err_disable; 7201 } 7202 7203 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 7204 if (rc) { 7205 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 7206 goto init_err_disable; 7207 } 7208 7209 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 7210 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 7211 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 7212 goto init_err_disable; 7213 } 7214 7215 pci_set_master(pdev); 7216 7217 bp->dev = dev; 7218 bp->pdev = pdev; 7219 7220 bp->bar0 = pci_ioremap_bar(pdev, 0); 7221 if (!bp->bar0) { 7222 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 7223 rc = -ENOMEM; 7224 goto init_err_release; 7225 } 7226 7227 bp->bar1 = pci_ioremap_bar(pdev, 2); 7228 if (!bp->bar1) { 7229 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 7230 rc = -ENOMEM; 7231 goto init_err_release; 7232 } 7233 7234 bp->bar2 = pci_ioremap_bar(pdev, 4); 7235 if (!bp->bar2) { 7236 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 7237 rc = -ENOMEM; 7238 goto init_err_release; 7239 } 7240 7241 pci_enable_pcie_error_reporting(pdev); 7242 7243 INIT_WORK(&bp->sp_task, bnxt_sp_task); 7244 7245 spin_lock_init(&bp->ntp_fltr_lock); 7246 7247 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 7248 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 7249 7250 bnxt_init_dflt_coal(bp); 7251 7252 timer_setup(&bp->timer, bnxt_timer, 0); 7253 bp->current_interval = BNXT_TIMER_INTERVAL; 7254 7255 clear_bit(BNXT_STATE_OPEN, &bp->state); 7256 return 0; 7257 7258 init_err_release: 7259 bnxt_unmap_bars(bp, pdev); 7260 pci_release_regions(pdev); 7261 7262 init_err_disable: 7263 pci_disable_device(pdev); 7264 7265 init_err: 7266 return rc; 7267 } 7268 7269 /* rtnl_lock held */ 7270 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 7271 { 7272 struct sockaddr *addr = p; 7273 struct bnxt *bp = netdev_priv(dev); 7274 int rc = 0; 7275 7276 if (!is_valid_ether_addr(addr->sa_data)) 7277 return -EADDRNOTAVAIL; 7278 7279 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 7280 return 0; 7281 7282 rc = bnxt_approve_mac(bp, addr->sa_data); 7283 if (rc) 7284 return rc; 7285 7286 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 7287 if (netif_running(dev)) { 7288 bnxt_close_nic(bp, false, false); 7289 rc = bnxt_open_nic(bp, false, false); 7290 } 7291 7292 return rc; 7293 } 7294 7295 /* rtnl_lock held */ 7296 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 7297 { 7298 struct bnxt *bp = netdev_priv(dev); 7299 7300 if (netif_running(dev)) 7301 bnxt_close_nic(bp, false, false); 7302 7303 dev->mtu = new_mtu; 7304 bnxt_set_ring_params(bp); 7305 7306 if (netif_running(dev)) 7307 return bnxt_open_nic(bp, false, false); 7308 7309 return 0; 7310 } 7311 7312 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 7313 { 7314 struct bnxt *bp = netdev_priv(dev); 7315 bool sh = false; 7316 int rc; 7317 7318 if (tc > bp->max_tc) { 7319 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 7320 tc, bp->max_tc); 7321 return -EINVAL; 7322 } 7323 7324 if (netdev_get_num_tc(dev) == tc) 7325 return 0; 7326 7327 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7328 sh = true; 7329 7330 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 7331 sh, tc, bp->tx_nr_rings_xdp); 7332 if (rc) 7333 return rc; 7334 7335 /* Needs to close the device and do hw resource re-allocations */ 7336 if (netif_running(bp->dev)) 7337 bnxt_close_nic(bp, true, false); 7338 7339 if (tc) { 7340 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 7341 netdev_set_num_tc(dev, tc); 7342 } else { 7343 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7344 netdev_reset_tc(dev); 7345 } 7346 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 7347 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7348 bp->tx_nr_rings + bp->rx_nr_rings; 7349 bp->num_stat_ctxs = bp->cp_nr_rings; 7350 7351 if (netif_running(bp->dev)) 7352 return bnxt_open_nic(bp, true, false); 7353 7354 return 0; 7355 } 7356 7357 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 7358 void *cb_priv) 7359 { 7360 struct bnxt *bp = cb_priv; 7361 7362 if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev)) 7363 return -EOPNOTSUPP; 7364 7365 switch (type) { 7366 case TC_SETUP_CLSFLOWER: 7367 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 7368 default: 7369 return -EOPNOTSUPP; 7370 } 7371 } 7372 7373 static int bnxt_setup_tc_block(struct net_device *dev, 7374 struct tc_block_offload *f) 7375 { 7376 struct bnxt *bp = netdev_priv(dev); 7377 7378 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 7379 return -EOPNOTSUPP; 7380 7381 switch (f->command) { 7382 case TC_BLOCK_BIND: 7383 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, 7384 bp, bp); 7385 case TC_BLOCK_UNBIND: 7386 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); 7387 return 0; 7388 default: 7389 return -EOPNOTSUPP; 7390 } 7391 } 7392 7393 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 7394 void *type_data) 7395 { 7396 switch (type) { 7397 case TC_SETUP_BLOCK: 7398 return bnxt_setup_tc_block(dev, type_data); 7399 case TC_SETUP_QDISC_MQPRIO: { 7400 struct tc_mqprio_qopt *mqprio = type_data; 7401 7402 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 7403 7404 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 7405 } 7406 default: 7407 return -EOPNOTSUPP; 7408 } 7409 } 7410 7411 #ifdef CONFIG_RFS_ACCEL 7412 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 7413 struct bnxt_ntuple_filter *f2) 7414 { 7415 struct flow_keys *keys1 = &f1->fkeys; 7416 struct flow_keys *keys2 = &f2->fkeys; 7417 7418 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 7419 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 7420 keys1->ports.ports == keys2->ports.ports && 7421 keys1->basic.ip_proto == keys2->basic.ip_proto && 7422 keys1->basic.n_proto == keys2->basic.n_proto && 7423 keys1->control.flags == keys2->control.flags && 7424 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 7425 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 7426 return true; 7427 7428 return false; 7429 } 7430 7431 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 7432 u16 rxq_index, u32 flow_id) 7433 { 7434 struct bnxt *bp = netdev_priv(dev); 7435 struct bnxt_ntuple_filter *fltr, *new_fltr; 7436 struct flow_keys *fkeys; 7437 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 7438 int rc = 0, idx, bit_id, l2_idx = 0; 7439 struct hlist_head *head; 7440 7441 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 7442 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 7443 int off = 0, j; 7444 7445 netif_addr_lock_bh(dev); 7446 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 7447 if (ether_addr_equal(eth->h_dest, 7448 vnic->uc_list + off)) { 7449 l2_idx = j + 1; 7450 break; 7451 } 7452 } 7453 netif_addr_unlock_bh(dev); 7454 if (!l2_idx) 7455 return -EINVAL; 7456 } 7457 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 7458 if (!new_fltr) 7459 return -ENOMEM; 7460 7461 fkeys = &new_fltr->fkeys; 7462 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 7463 rc = -EPROTONOSUPPORT; 7464 goto err_free; 7465 } 7466 7467 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 7468 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 7469 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 7470 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 7471 rc = -EPROTONOSUPPORT; 7472 goto err_free; 7473 } 7474 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 7475 bp->hwrm_spec_code < 0x10601) { 7476 rc = -EPROTONOSUPPORT; 7477 goto err_free; 7478 } 7479 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && 7480 bp->hwrm_spec_code < 0x10601) { 7481 rc = -EPROTONOSUPPORT; 7482 goto err_free; 7483 } 7484 7485 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 7486 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 7487 7488 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 7489 head = &bp->ntp_fltr_hash_tbl[idx]; 7490 rcu_read_lock(); 7491 hlist_for_each_entry_rcu(fltr, head, hash) { 7492 if (bnxt_fltr_match(fltr, new_fltr)) { 7493 rcu_read_unlock(); 7494 rc = 0; 7495 goto err_free; 7496 } 7497 } 7498 rcu_read_unlock(); 7499 7500 spin_lock_bh(&bp->ntp_fltr_lock); 7501 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 7502 BNXT_NTP_FLTR_MAX_FLTR, 0); 7503 if (bit_id < 0) { 7504 spin_unlock_bh(&bp->ntp_fltr_lock); 7505 rc = -ENOMEM; 7506 goto err_free; 7507 } 7508 7509 new_fltr->sw_id = (u16)bit_id; 7510 new_fltr->flow_id = flow_id; 7511 new_fltr->l2_fltr_idx = l2_idx; 7512 new_fltr->rxq = rxq_index; 7513 hlist_add_head_rcu(&new_fltr->hash, head); 7514 bp->ntp_fltr_count++; 7515 spin_unlock_bh(&bp->ntp_fltr_lock); 7516 7517 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7518 bnxt_queue_sp_work(bp); 7519 7520 return new_fltr->sw_id; 7521 7522 err_free: 7523 kfree(new_fltr); 7524 return rc; 7525 } 7526 7527 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7528 { 7529 int i; 7530 7531 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 7532 struct hlist_head *head; 7533 struct hlist_node *tmp; 7534 struct bnxt_ntuple_filter *fltr; 7535 int rc; 7536 7537 head = &bp->ntp_fltr_hash_tbl[i]; 7538 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 7539 bool del = false; 7540 7541 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 7542 if (rps_may_expire_flow(bp->dev, fltr->rxq, 7543 fltr->flow_id, 7544 fltr->sw_id)) { 7545 bnxt_hwrm_cfa_ntuple_filter_free(bp, 7546 fltr); 7547 del = true; 7548 } 7549 } else { 7550 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 7551 fltr); 7552 if (rc) 7553 del = true; 7554 else 7555 set_bit(BNXT_FLTR_VALID, &fltr->state); 7556 } 7557 7558 if (del) { 7559 spin_lock_bh(&bp->ntp_fltr_lock); 7560 hlist_del_rcu(&fltr->hash); 7561 bp->ntp_fltr_count--; 7562 spin_unlock_bh(&bp->ntp_fltr_lock); 7563 synchronize_rcu(); 7564 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 7565 kfree(fltr); 7566 } 7567 } 7568 } 7569 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 7570 netdev_info(bp->dev, "Receive PF driver unload event!"); 7571 } 7572 7573 #else 7574 7575 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7576 { 7577 } 7578 7579 #endif /* CONFIG_RFS_ACCEL */ 7580 7581 static void bnxt_udp_tunnel_add(struct net_device *dev, 7582 struct udp_tunnel_info *ti) 7583 { 7584 struct bnxt *bp = netdev_priv(dev); 7585 7586 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7587 return; 7588 7589 if (!netif_running(dev)) 7590 return; 7591 7592 switch (ti->type) { 7593 case UDP_TUNNEL_TYPE_VXLAN: 7594 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 7595 return; 7596 7597 bp->vxlan_port_cnt++; 7598 if (bp->vxlan_port_cnt == 1) { 7599 bp->vxlan_port = ti->port; 7600 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7601 bnxt_queue_sp_work(bp); 7602 } 7603 break; 7604 case UDP_TUNNEL_TYPE_GENEVE: 7605 if (bp->nge_port_cnt && bp->nge_port != ti->port) 7606 return; 7607 7608 bp->nge_port_cnt++; 7609 if (bp->nge_port_cnt == 1) { 7610 bp->nge_port = ti->port; 7611 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 7612 } 7613 break; 7614 default: 7615 return; 7616 } 7617 7618 bnxt_queue_sp_work(bp); 7619 } 7620 7621 static void bnxt_udp_tunnel_del(struct net_device *dev, 7622 struct udp_tunnel_info *ti) 7623 { 7624 struct bnxt *bp = netdev_priv(dev); 7625 7626 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7627 return; 7628 7629 if (!netif_running(dev)) 7630 return; 7631 7632 switch (ti->type) { 7633 case UDP_TUNNEL_TYPE_VXLAN: 7634 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 7635 return; 7636 bp->vxlan_port_cnt--; 7637 7638 if (bp->vxlan_port_cnt != 0) 7639 return; 7640 7641 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 7642 break; 7643 case UDP_TUNNEL_TYPE_GENEVE: 7644 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 7645 return; 7646 bp->nge_port_cnt--; 7647 7648 if (bp->nge_port_cnt != 0) 7649 return; 7650 7651 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 7652 break; 7653 default: 7654 return; 7655 } 7656 7657 bnxt_queue_sp_work(bp); 7658 } 7659 7660 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7661 struct net_device *dev, u32 filter_mask, 7662 int nlflags) 7663 { 7664 struct bnxt *bp = netdev_priv(dev); 7665 7666 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 7667 nlflags, filter_mask, NULL); 7668 } 7669 7670 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 7671 u16 flags) 7672 { 7673 struct bnxt *bp = netdev_priv(dev); 7674 struct nlattr *attr, *br_spec; 7675 int rem, rc = 0; 7676 7677 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 7678 return -EOPNOTSUPP; 7679 7680 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7681 if (!br_spec) 7682 return -EINVAL; 7683 7684 nla_for_each_nested(attr, br_spec, rem) { 7685 u16 mode; 7686 7687 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7688 continue; 7689 7690 if (nla_len(attr) < sizeof(mode)) 7691 return -EINVAL; 7692 7693 mode = nla_get_u16(attr); 7694 if (mode == bp->br_mode) 7695 break; 7696 7697 rc = bnxt_hwrm_set_br_mode(bp, mode); 7698 if (!rc) 7699 bp->br_mode = mode; 7700 break; 7701 } 7702 return rc; 7703 } 7704 7705 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, 7706 size_t len) 7707 { 7708 struct bnxt *bp = netdev_priv(dev); 7709 int rc; 7710 7711 /* The PF and it's VF-reps only support the switchdev framework */ 7712 if (!BNXT_PF(bp)) 7713 return -EOPNOTSUPP; 7714 7715 rc = snprintf(buf, len, "p%d", bp->pf.port_id); 7716 7717 if (rc >= len) 7718 return -EOPNOTSUPP; 7719 return 0; 7720 } 7721 7722 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) 7723 { 7724 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 7725 return -EOPNOTSUPP; 7726 7727 /* The PF and it's VF-reps only support the switchdev framework */ 7728 if (!BNXT_PF(bp)) 7729 return -EOPNOTSUPP; 7730 7731 switch (attr->id) { 7732 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 7733 /* In SRIOV each PF-pool (PF + child VFs) serves as a 7734 * switching domain, the PF's perm mac-addr can be used 7735 * as the unique parent-id 7736 */ 7737 attr->u.ppid.id_len = ETH_ALEN; 7738 ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); 7739 break; 7740 default: 7741 return -EOPNOTSUPP; 7742 } 7743 return 0; 7744 } 7745 7746 static int bnxt_swdev_port_attr_get(struct net_device *dev, 7747 struct switchdev_attr *attr) 7748 { 7749 return bnxt_port_attr_get(netdev_priv(dev), attr); 7750 } 7751 7752 static const struct switchdev_ops bnxt_switchdev_ops = { 7753 .switchdev_port_attr_get = bnxt_swdev_port_attr_get 7754 }; 7755 7756 static const struct net_device_ops bnxt_netdev_ops = { 7757 .ndo_open = bnxt_open, 7758 .ndo_start_xmit = bnxt_start_xmit, 7759 .ndo_stop = bnxt_close, 7760 .ndo_get_stats64 = bnxt_get_stats64, 7761 .ndo_set_rx_mode = bnxt_set_rx_mode, 7762 .ndo_do_ioctl = bnxt_ioctl, 7763 .ndo_validate_addr = eth_validate_addr, 7764 .ndo_set_mac_address = bnxt_change_mac_addr, 7765 .ndo_change_mtu = bnxt_change_mtu, 7766 .ndo_fix_features = bnxt_fix_features, 7767 .ndo_set_features = bnxt_set_features, 7768 .ndo_tx_timeout = bnxt_tx_timeout, 7769 #ifdef CONFIG_BNXT_SRIOV 7770 .ndo_get_vf_config = bnxt_get_vf_config, 7771 .ndo_set_vf_mac = bnxt_set_vf_mac, 7772 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 7773 .ndo_set_vf_rate = bnxt_set_vf_bw, 7774 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 7775 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 7776 #endif 7777 #ifdef CONFIG_NET_POLL_CONTROLLER 7778 .ndo_poll_controller = bnxt_poll_controller, 7779 #endif 7780 .ndo_setup_tc = bnxt_setup_tc, 7781 #ifdef CONFIG_RFS_ACCEL 7782 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 7783 #endif 7784 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 7785 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 7786 .ndo_bpf = bnxt_xdp, 7787 .ndo_bridge_getlink = bnxt_bridge_getlink, 7788 .ndo_bridge_setlink = bnxt_bridge_setlink, 7789 .ndo_get_phys_port_name = bnxt_get_phys_port_name 7790 }; 7791 7792 static void bnxt_remove_one(struct pci_dev *pdev) 7793 { 7794 struct net_device *dev = pci_get_drvdata(pdev); 7795 struct bnxt *bp = netdev_priv(dev); 7796 7797 if (BNXT_PF(bp)) { 7798 bnxt_sriov_disable(bp); 7799 bnxt_dl_unregister(bp); 7800 } 7801 7802 pci_disable_pcie_error_reporting(pdev); 7803 unregister_netdev(dev); 7804 bnxt_shutdown_tc(bp); 7805 bnxt_cancel_sp_work(bp); 7806 bp->sp_event = 0; 7807 7808 bnxt_clear_int_mode(bp); 7809 bnxt_hwrm_func_drv_unrgtr(bp); 7810 bnxt_free_hwrm_resources(bp); 7811 bnxt_free_hwrm_short_cmd_req(bp); 7812 bnxt_ethtool_free(bp); 7813 bnxt_dcb_free(bp); 7814 kfree(bp->edev); 7815 bp->edev = NULL; 7816 bnxt_cleanup_pci(bp); 7817 free_netdev(dev); 7818 } 7819 7820 static int bnxt_probe_phy(struct bnxt *bp) 7821 { 7822 int rc = 0; 7823 struct bnxt_link_info *link_info = &bp->link_info; 7824 7825 rc = bnxt_hwrm_phy_qcaps(bp); 7826 if (rc) { 7827 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 7828 rc); 7829 return rc; 7830 } 7831 mutex_init(&bp->link_lock); 7832 7833 rc = bnxt_update_link(bp, false); 7834 if (rc) { 7835 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 7836 rc); 7837 return rc; 7838 } 7839 7840 /* Older firmware does not have supported_auto_speeds, so assume 7841 * that all supported speeds can be autonegotiated. 7842 */ 7843 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 7844 link_info->support_auto_speeds = link_info->support_speeds; 7845 7846 /*initialize the ethool setting copy with NVM settings */ 7847 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 7848 link_info->autoneg = BNXT_AUTONEG_SPEED; 7849 if (bp->hwrm_spec_code >= 0x10201) { 7850 if (link_info->auto_pause_setting & 7851 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 7852 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7853 } else { 7854 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7855 } 7856 link_info->advertising = link_info->auto_link_speeds; 7857 } else { 7858 link_info->req_link_speed = link_info->force_link_speed; 7859 link_info->req_duplex = link_info->duplex_setting; 7860 } 7861 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 7862 link_info->req_flow_ctrl = 7863 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 7864 else 7865 link_info->req_flow_ctrl = link_info->force_pause_setting; 7866 return rc; 7867 } 7868 7869 static int bnxt_get_max_irq(struct pci_dev *pdev) 7870 { 7871 u16 ctrl; 7872 7873 if (!pdev->msix_cap) 7874 return 1; 7875 7876 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 7877 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 7878 } 7879 7880 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7881 int *max_cp) 7882 { 7883 int max_ring_grps = 0; 7884 7885 #ifdef CONFIG_BNXT_SRIOV 7886 if (!BNXT_PF(bp)) { 7887 *max_tx = bp->vf.max_tx_rings; 7888 *max_rx = bp->vf.max_rx_rings; 7889 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); 7890 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); 7891 max_ring_grps = bp->vf.max_hw_ring_grps; 7892 } else 7893 #endif 7894 { 7895 *max_tx = bp->pf.max_tx_rings; 7896 *max_rx = bp->pf.max_rx_rings; 7897 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); 7898 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); 7899 max_ring_grps = bp->pf.max_hw_ring_grps; 7900 } 7901 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 7902 *max_cp -= 1; 7903 *max_rx -= 2; 7904 } 7905 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7906 *max_rx >>= 1; 7907 *max_rx = min_t(int, *max_rx, max_ring_grps); 7908 } 7909 7910 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 7911 { 7912 int rx, tx, cp; 7913 7914 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 7915 if (!rx || !tx || !cp) 7916 return -ENOMEM; 7917 7918 *max_rx = rx; 7919 *max_tx = tx; 7920 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 7921 } 7922 7923 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7924 bool shared) 7925 { 7926 int rc; 7927 7928 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7929 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 7930 /* Not enough rings, try disabling agg rings. */ 7931 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7932 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7933 if (rc) 7934 return rc; 7935 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7936 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 7937 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 7938 bnxt_set_ring_params(bp); 7939 } 7940 7941 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 7942 int max_cp, max_stat, max_irq; 7943 7944 /* Reserve minimum resources for RoCE */ 7945 max_cp = bnxt_get_max_func_cp_rings(bp); 7946 max_stat = bnxt_get_max_func_stat_ctxs(bp); 7947 max_irq = bnxt_get_max_func_irqs(bp); 7948 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 7949 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 7950 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 7951 return 0; 7952 7953 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 7954 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 7955 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 7956 max_cp = min_t(int, max_cp, max_irq); 7957 max_cp = min_t(int, max_cp, max_stat); 7958 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 7959 if (rc) 7960 rc = 0; 7961 } 7962 return rc; 7963 } 7964 7965 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 7966 { 7967 int dflt_rings, max_rx_rings, max_tx_rings, rc; 7968 7969 if (sh) 7970 bp->flags |= BNXT_FLAG_SHARED_RINGS; 7971 dflt_rings = netif_get_num_default_rss_queues(); 7972 /* Reduce default rings to reduce memory usage on multi-port cards */ 7973 if (bp->port_count > 1) 7974 dflt_rings = min_t(int, dflt_rings, 4); 7975 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 7976 if (rc) 7977 return rc; 7978 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 7979 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 7980 7981 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); 7982 if (rc) 7983 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 7984 7985 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7986 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7987 bp->tx_nr_rings + bp->rx_nr_rings; 7988 bp->num_stat_ctxs = bp->cp_nr_rings; 7989 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7990 bp->rx_nr_rings++; 7991 bp->cp_nr_rings++; 7992 } 7993 return rc; 7994 } 7995 7996 void bnxt_restore_pf_fw_resources(struct bnxt *bp) 7997 { 7998 ASSERT_RTNL(); 7999 bnxt_hwrm_func_qcaps(bp); 8000 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); 8001 } 8002 8003 static int bnxt_init_mac_addr(struct bnxt *bp) 8004 { 8005 int rc = 0; 8006 8007 if (BNXT_PF(bp)) { 8008 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 8009 } else { 8010 #ifdef CONFIG_BNXT_SRIOV 8011 struct bnxt_vf_info *vf = &bp->vf; 8012 8013 if (is_valid_ether_addr(vf->mac_addr)) { 8014 /* overwrite netdev dev_adr with admin VF MAC */ 8015 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 8016 } else { 8017 eth_hw_addr_random(bp->dev); 8018 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 8019 } 8020 #endif 8021 } 8022 return rc; 8023 } 8024 8025 static void bnxt_parse_log_pcie_link(struct bnxt *bp) 8026 { 8027 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 8028 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 8029 8030 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) || 8031 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 8032 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 8033 else 8034 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n", 8035 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 8036 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 8037 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 8038 "Unknown", width); 8039 } 8040 8041 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8042 { 8043 static int version_printed; 8044 struct net_device *dev; 8045 struct bnxt *bp; 8046 int rc, max_irqs; 8047 8048 if (pci_is_bridge(pdev)) 8049 return -ENODEV; 8050 8051 if (version_printed++ == 0) 8052 pr_info("%s", version); 8053 8054 max_irqs = bnxt_get_max_irq(pdev); 8055 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 8056 if (!dev) 8057 return -ENOMEM; 8058 8059 bp = netdev_priv(dev); 8060 8061 if (bnxt_vf_pciid(ent->driver_data)) 8062 bp->flags |= BNXT_FLAG_VF; 8063 8064 if (pdev->msix_cap) 8065 bp->flags |= BNXT_FLAG_MSIX_CAP; 8066 8067 rc = bnxt_init_board(pdev, dev); 8068 if (rc < 0) 8069 goto init_err_free; 8070 8071 dev->netdev_ops = &bnxt_netdev_ops; 8072 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 8073 dev->ethtool_ops = &bnxt_ethtool_ops; 8074 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); 8075 pci_set_drvdata(pdev, dev); 8076 8077 rc = bnxt_alloc_hwrm_resources(bp); 8078 if (rc) 8079 goto init_err_pci_clean; 8080 8081 mutex_init(&bp->hwrm_cmd_lock); 8082 rc = bnxt_hwrm_ver_get(bp); 8083 if (rc) 8084 goto init_err_pci_clean; 8085 8086 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 8087 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 8088 if (rc) 8089 goto init_err_pci_clean; 8090 } 8091 8092 rc = bnxt_hwrm_func_reset(bp); 8093 if (rc) 8094 goto init_err_pci_clean; 8095 8096 bnxt_hwrm_fw_set_time(bp); 8097 8098 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 8099 NETIF_F_TSO | NETIF_F_TSO6 | 8100 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 8101 NETIF_F_GSO_IPXIP4 | 8102 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 8103 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 8104 NETIF_F_RXCSUM | NETIF_F_GRO; 8105 8106 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 8107 dev->hw_features |= NETIF_F_LRO; 8108 8109 dev->hw_enc_features = 8110 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 8111 NETIF_F_TSO | NETIF_F_TSO6 | 8112 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 8113 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 8114 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 8115 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 8116 NETIF_F_GSO_GRE_CSUM; 8117 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 8118 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 8119 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 8120 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 8121 dev->hw_features |= NETIF_F_GRO_HW; 8122 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 8123 if (dev->features & NETIF_F_GRO_HW) 8124 dev->features &= ~NETIF_F_LRO; 8125 dev->priv_flags |= IFF_UNICAST_FLT; 8126 8127 #ifdef CONFIG_BNXT_SRIOV 8128 init_waitqueue_head(&bp->sriov_cfg_wait); 8129 mutex_init(&bp->sriov_lock); 8130 #endif 8131 bp->gro_func = bnxt_gro_func_5730x; 8132 if (BNXT_CHIP_P4_PLUS(bp)) 8133 bp->gro_func = bnxt_gro_func_5731x; 8134 else 8135 bp->flags |= BNXT_FLAG_DOUBLE_DB; 8136 8137 rc = bnxt_hwrm_func_drv_rgtr(bp); 8138 if (rc) 8139 goto init_err_pci_clean; 8140 8141 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 8142 if (rc) 8143 goto init_err_pci_clean; 8144 8145 bp->ulp_probe = bnxt_ulp_probe; 8146 8147 /* Get the MAX capabilities for this function */ 8148 rc = bnxt_hwrm_func_qcaps(bp); 8149 if (rc) { 8150 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 8151 rc); 8152 rc = -1; 8153 goto init_err_pci_clean; 8154 } 8155 rc = bnxt_init_mac_addr(bp); 8156 if (rc) { 8157 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 8158 rc = -EADDRNOTAVAIL; 8159 goto init_err_pci_clean; 8160 } 8161 rc = bnxt_hwrm_queue_qportcfg(bp); 8162 if (rc) { 8163 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 8164 rc); 8165 rc = -1; 8166 goto init_err_pci_clean; 8167 } 8168 8169 bnxt_hwrm_func_qcfg(bp); 8170 bnxt_hwrm_port_led_qcaps(bp); 8171 bnxt_ethtool_init(bp); 8172 bnxt_dcb_init(bp); 8173 8174 /* MTU range: 60 - FW defined max */ 8175 dev->min_mtu = ETH_ZLEN; 8176 dev->max_mtu = bp->max_mtu; 8177 8178 rc = bnxt_probe_phy(bp); 8179 if (rc) 8180 goto init_err_pci_clean; 8181 8182 bnxt_set_rx_skb_mode(bp, false); 8183 bnxt_set_tpa_flags(bp); 8184 bnxt_set_ring_params(bp); 8185 bnxt_set_max_func_irqs(bp, max_irqs); 8186 rc = bnxt_set_dflt_rings(bp, true); 8187 if (rc) { 8188 netdev_err(bp->dev, "Not enough rings available.\n"); 8189 rc = -ENOMEM; 8190 goto init_err_pci_clean; 8191 } 8192 8193 /* Default RSS hash cfg. */ 8194 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 8195 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 8196 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 8197 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 8198 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 8199 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 8200 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 8201 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 8202 } 8203 8204 bnxt_hwrm_vnic_qcaps(bp); 8205 if (bnxt_rfs_supported(bp)) { 8206 dev->hw_features |= NETIF_F_NTUPLE; 8207 if (bnxt_rfs_capable(bp)) { 8208 bp->flags |= BNXT_FLAG_RFS; 8209 dev->features |= NETIF_F_NTUPLE; 8210 } 8211 } 8212 8213 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 8214 bp->flags |= BNXT_FLAG_STRIP_VLAN; 8215 8216 rc = bnxt_init_int_mode(bp); 8217 if (rc) 8218 goto init_err_pci_clean; 8219 8220 bnxt_get_wol_settings(bp); 8221 if (bp->flags & BNXT_FLAG_WOL_CAP) 8222 device_set_wakeup_enable(&pdev->dev, bp->wol); 8223 else 8224 device_set_wakeup_capable(&pdev->dev, false); 8225 8226 if (BNXT_PF(bp)) { 8227 if (!bnxt_pf_wq) { 8228 bnxt_pf_wq = 8229 create_singlethread_workqueue("bnxt_pf_wq"); 8230 if (!bnxt_pf_wq) { 8231 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 8232 goto init_err_pci_clean; 8233 } 8234 } 8235 bnxt_init_tc(bp); 8236 } 8237 8238 rc = register_netdev(dev); 8239 if (rc) 8240 goto init_err_cleanup_tc; 8241 8242 if (BNXT_PF(bp)) 8243 bnxt_dl_register(bp); 8244 8245 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 8246 board_info[ent->driver_data].name, 8247 (long)pci_resource_start(pdev, 0), dev->dev_addr); 8248 8249 bnxt_parse_log_pcie_link(bp); 8250 8251 return 0; 8252 8253 init_err_cleanup_tc: 8254 bnxt_shutdown_tc(bp); 8255 bnxt_clear_int_mode(bp); 8256 8257 init_err_pci_clean: 8258 bnxt_cleanup_pci(bp); 8259 8260 init_err_free: 8261 free_netdev(dev); 8262 return rc; 8263 } 8264 8265 static void bnxt_shutdown(struct pci_dev *pdev) 8266 { 8267 struct net_device *dev = pci_get_drvdata(pdev); 8268 struct bnxt *bp; 8269 8270 if (!dev) 8271 return; 8272 8273 rtnl_lock(); 8274 bp = netdev_priv(dev); 8275 if (!bp) 8276 goto shutdown_exit; 8277 8278 if (netif_running(dev)) 8279 dev_close(dev); 8280 8281 bnxt_ulp_shutdown(bp); 8282 8283 if (system_state == SYSTEM_POWER_OFF) { 8284 bnxt_clear_int_mode(bp); 8285 pci_wake_from_d3(pdev, bp->wol); 8286 pci_set_power_state(pdev, PCI_D3hot); 8287 } 8288 8289 shutdown_exit: 8290 rtnl_unlock(); 8291 } 8292 8293 #ifdef CONFIG_PM_SLEEP 8294 static int bnxt_suspend(struct device *device) 8295 { 8296 struct pci_dev *pdev = to_pci_dev(device); 8297 struct net_device *dev = pci_get_drvdata(pdev); 8298 struct bnxt *bp = netdev_priv(dev); 8299 int rc = 0; 8300 8301 rtnl_lock(); 8302 if (netif_running(dev)) { 8303 netif_device_detach(dev); 8304 rc = bnxt_close(dev); 8305 } 8306 bnxt_hwrm_func_drv_unrgtr(bp); 8307 rtnl_unlock(); 8308 return rc; 8309 } 8310 8311 static int bnxt_resume(struct device *device) 8312 { 8313 struct pci_dev *pdev = to_pci_dev(device); 8314 struct net_device *dev = pci_get_drvdata(pdev); 8315 struct bnxt *bp = netdev_priv(dev); 8316 int rc = 0; 8317 8318 rtnl_lock(); 8319 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { 8320 rc = -ENODEV; 8321 goto resume_exit; 8322 } 8323 rc = bnxt_hwrm_func_reset(bp); 8324 if (rc) { 8325 rc = -EBUSY; 8326 goto resume_exit; 8327 } 8328 bnxt_get_wol_settings(bp); 8329 if (netif_running(dev)) { 8330 rc = bnxt_open(dev); 8331 if (!rc) 8332 netif_device_attach(dev); 8333 } 8334 8335 resume_exit: 8336 rtnl_unlock(); 8337 return rc; 8338 } 8339 8340 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 8341 #define BNXT_PM_OPS (&bnxt_pm_ops) 8342 8343 #else 8344 8345 #define BNXT_PM_OPS NULL 8346 8347 #endif /* CONFIG_PM_SLEEP */ 8348 8349 /** 8350 * bnxt_io_error_detected - called when PCI error is detected 8351 * @pdev: Pointer to PCI device 8352 * @state: The current pci connection state 8353 * 8354 * This function is called after a PCI bus error affecting 8355 * this device has been detected. 8356 */ 8357 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 8358 pci_channel_state_t state) 8359 { 8360 struct net_device *netdev = pci_get_drvdata(pdev); 8361 struct bnxt *bp = netdev_priv(netdev); 8362 8363 netdev_info(netdev, "PCI I/O error detected\n"); 8364 8365 rtnl_lock(); 8366 netif_device_detach(netdev); 8367 8368 bnxt_ulp_stop(bp); 8369 8370 if (state == pci_channel_io_perm_failure) { 8371 rtnl_unlock(); 8372 return PCI_ERS_RESULT_DISCONNECT; 8373 } 8374 8375 if (netif_running(netdev)) 8376 bnxt_close(netdev); 8377 8378 pci_disable_device(pdev); 8379 rtnl_unlock(); 8380 8381 /* Request a slot slot reset. */ 8382 return PCI_ERS_RESULT_NEED_RESET; 8383 } 8384 8385 /** 8386 * bnxt_io_slot_reset - called after the pci bus has been reset. 8387 * @pdev: Pointer to PCI device 8388 * 8389 * Restart the card from scratch, as if from a cold-boot. 8390 * At this point, the card has exprienced a hard reset, 8391 * followed by fixups by BIOS, and has its config space 8392 * set up identically to what it was at cold boot. 8393 */ 8394 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 8395 { 8396 struct net_device *netdev = pci_get_drvdata(pdev); 8397 struct bnxt *bp = netdev_priv(netdev); 8398 int err = 0; 8399 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 8400 8401 netdev_info(bp->dev, "PCI Slot Reset\n"); 8402 8403 rtnl_lock(); 8404 8405 if (pci_enable_device(pdev)) { 8406 dev_err(&pdev->dev, 8407 "Cannot re-enable PCI device after reset.\n"); 8408 } else { 8409 pci_set_master(pdev); 8410 8411 err = bnxt_hwrm_func_reset(bp); 8412 if (!err && netif_running(netdev)) 8413 err = bnxt_open(netdev); 8414 8415 if (!err) { 8416 result = PCI_ERS_RESULT_RECOVERED; 8417 bnxt_ulp_start(bp); 8418 } 8419 } 8420 8421 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 8422 dev_close(netdev); 8423 8424 rtnl_unlock(); 8425 8426 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8427 if (err) { 8428 dev_err(&pdev->dev, 8429 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 8430 err); /* non-fatal, continue */ 8431 } 8432 8433 return PCI_ERS_RESULT_RECOVERED; 8434 } 8435 8436 /** 8437 * bnxt_io_resume - called when traffic can start flowing again. 8438 * @pdev: Pointer to PCI device 8439 * 8440 * This callback is called when the error recovery driver tells 8441 * us that its OK to resume normal operation. 8442 */ 8443 static void bnxt_io_resume(struct pci_dev *pdev) 8444 { 8445 struct net_device *netdev = pci_get_drvdata(pdev); 8446 8447 rtnl_lock(); 8448 8449 netif_device_attach(netdev); 8450 8451 rtnl_unlock(); 8452 } 8453 8454 static const struct pci_error_handlers bnxt_err_handler = { 8455 .error_detected = bnxt_io_error_detected, 8456 .slot_reset = bnxt_io_slot_reset, 8457 .resume = bnxt_io_resume 8458 }; 8459 8460 static struct pci_driver bnxt_pci_driver = { 8461 .name = DRV_MODULE_NAME, 8462 .id_table = bnxt_pci_tbl, 8463 .probe = bnxt_init_one, 8464 .remove = bnxt_remove_one, 8465 .shutdown = bnxt_shutdown, 8466 .driver.pm = BNXT_PM_OPS, 8467 .err_handler = &bnxt_err_handler, 8468 #if defined(CONFIG_BNXT_SRIOV) 8469 .sriov_configure = bnxt_sriov_configure, 8470 #endif 8471 }; 8472 8473 static int __init bnxt_init(void) 8474 { 8475 return pci_register_driver(&bnxt_pci_driver); 8476 } 8477 8478 static void __exit bnxt_exit(void) 8479 { 8480 pci_unregister_driver(&bnxt_pci_driver); 8481 if (bnxt_pf_wq) 8482 destroy_workqueue(bnxt_pf_wq); 8483 } 8484 8485 module_init(bnxt_init); 8486 module_exit(bnxt_exit); 8487