1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2005 - 2016 Broadcom
4 * All rights reserved.
5 *
6 * Contact Information:
7 * linux-drivers@emulex.com
8 *
9 * Emulex
10 * 3333 Susan Street
11 * Costa Mesa, CA 92626
12 */
13
14 #include <linux/prefetch.h>
15 #include <linux/module.h>
16 #include "be.h"
17 #include "be_cmds.h"
18 #include <asm/div64.h>
19 #include <linux/if_bridge.h>
20 #include <net/busy_poll.h>
21 #include <net/vxlan.h>
22
23 MODULE_DESCRIPTION(DRV_DESC);
24 MODULE_AUTHOR("Emulex Corporation");
25 MODULE_LICENSE("GPL");
26
27 /* num_vfs module param is obsolete.
28 * Use sysfs method to enable/disable VFs.
29 */
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, 0444);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, 0444);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 /* Per-module error detection/recovery workq shared across all functions.
39 * Each function schedules its own work request on this shared workq.
40 */
41 static struct workqueue_struct *be_err_recovery_workq;
42
43 static const struct pci_device_id be_dev_ids[] = {
44 #ifdef CONFIG_BE2NET_BE2
45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
47 #endif /* CONFIG_BE2NET_BE2 */
48 #ifdef CONFIG_BE2NET_BE3
49 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
50 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
51 #endif /* CONFIG_BE2NET_BE3 */
52 #ifdef CONFIG_BE2NET_LANCER
53 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
55 #endif /* CONFIG_BE2NET_LANCER */
56 #ifdef CONFIG_BE2NET_SKYHAWK
57 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
58 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
59 #endif /* CONFIG_BE2NET_SKYHAWK */
60 { 0 }
61 };
62 MODULE_DEVICE_TABLE(pci, be_dev_ids);
63
64 /* Workqueue used by all functions for defering cmd calls to the adapter */
65 static struct workqueue_struct *be_wq;
66
67 /* UE Status Low CSR */
68 static const char * const ue_status_low_desc[] = {
69 "CEV",
70 "CTX",
71 "DBUF",
72 "ERX",
73 "Host",
74 "MPU",
75 "NDMA",
76 "PTC ",
77 "RDMA ",
78 "RXF ",
79 "RXIPS ",
80 "RXULP0 ",
81 "RXULP1 ",
82 "RXULP2 ",
83 "TIM ",
84 "TPOST ",
85 "TPRE ",
86 "TXIPS ",
87 "TXULP0 ",
88 "TXULP1 ",
89 "UC ",
90 "WDMA ",
91 "TXULP2 ",
92 "HOST1 ",
93 "P0_OB_LINK ",
94 "P1_OB_LINK ",
95 "HOST_GPIO ",
96 "MBOX ",
97 "ERX2 ",
98 "SPARE ",
99 "JTAG ",
100 "MPU_INTPEND "
101 };
102
103 /* UE Status High CSR */
104 static const char * const ue_status_hi_desc[] = {
105 "LPCMEMHOST",
106 "MGMT_MAC",
107 "PCS0ONLINE",
108 "MPU_IRAM",
109 "PCS1ONLINE",
110 "PCTL0",
111 "PCTL1",
112 "PMEM",
113 "RR",
114 "TXPB",
115 "RXPP",
116 "XAUI",
117 "TXP",
118 "ARM",
119 "IPC",
120 "HOST2",
121 "HOST3",
122 "HOST4",
123 "HOST5",
124 "HOST6",
125 "HOST7",
126 "ECRC",
127 "Poison TLP",
128 "NETC",
129 "PERIPH",
130 "LLTXULP",
131 "D2P",
132 "RCON",
133 "LDMA",
134 "LLTXP",
135 "LLTXPB",
136 "Unknown"
137 };
138
139 #define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
140 BE_IF_FLAGS_BROADCAST | \
141 BE_IF_FLAGS_MULTICAST | \
142 BE_IF_FLAGS_PASS_L3L4_ERRORS)
143
be_queue_free(struct be_adapter * adapter,struct be_queue_info * q)144 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
145 {
146 struct be_dma_mem *mem = &q->dma_mem;
147
148 if (mem->va) {
149 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
150 mem->dma);
151 mem->va = NULL;
152 }
153 }
154
be_queue_alloc(struct be_adapter * adapter,struct be_queue_info * q,u16 len,u16 entry_size)155 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
156 u16 len, u16 entry_size)
157 {
158 struct be_dma_mem *mem = &q->dma_mem;
159
160 memset(q, 0, sizeof(*q));
161 q->len = len;
162 q->entry_size = entry_size;
163 mem->size = len * entry_size;
164 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
165 &mem->dma, GFP_KERNEL);
166 if (!mem->va)
167 return -ENOMEM;
168 return 0;
169 }
170
be_reg_intr_set(struct be_adapter * adapter,bool enable)171 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
172 {
173 u32 reg, enabled;
174
175 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
176 ®);
177 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
178
179 if (!enabled && enable)
180 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
181 else if (enabled && !enable)
182 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
183 else
184 return;
185
186 pci_write_config_dword(adapter->pdev,
187 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
188 }
189
be_intr_set(struct be_adapter * adapter,bool enable)190 static void be_intr_set(struct be_adapter *adapter, bool enable)
191 {
192 int status = 0;
193
194 /* On lancer interrupts can't be controlled via this register */
195 if (lancer_chip(adapter))
196 return;
197
198 if (be_check_error(adapter, BE_ERROR_EEH))
199 return;
200
201 status = be_cmd_intr_set(adapter, enable);
202 if (status)
203 be_reg_intr_set(adapter, enable);
204 }
205
be_rxq_notify(struct be_adapter * adapter,u16 qid,u16 posted)206 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
207 {
208 u32 val = 0;
209
210 if (be_check_error(adapter, BE_ERROR_HW))
211 return;
212
213 val |= qid & DB_RQ_RING_ID_MASK;
214 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
215
216 wmb();
217 iowrite32(val, adapter->db + DB_RQ_OFFSET);
218 }
219
be_txq_notify(struct be_adapter * adapter,struct be_tx_obj * txo,u16 posted)220 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
221 u16 posted)
222 {
223 u32 val = 0;
224
225 if (be_check_error(adapter, BE_ERROR_HW))
226 return;
227
228 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
229 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
230
231 wmb();
232 iowrite32(val, adapter->db + txo->db_offset);
233 }
234
be_eq_notify(struct be_adapter * adapter,u16 qid,bool arm,bool clear_int,u16 num_popped,u32 eq_delay_mult_enc)235 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
236 bool arm, bool clear_int, u16 num_popped,
237 u32 eq_delay_mult_enc)
238 {
239 u32 val = 0;
240
241 val |= qid & DB_EQ_RING_ID_MASK;
242 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
243
244 if (be_check_error(adapter, BE_ERROR_HW))
245 return;
246
247 if (arm)
248 val |= 1 << DB_EQ_REARM_SHIFT;
249 if (clear_int)
250 val |= 1 << DB_EQ_CLR_SHIFT;
251 val |= 1 << DB_EQ_EVNT_SHIFT;
252 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
253 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
254 iowrite32(val, adapter->db + DB_EQ_OFFSET);
255 }
256
be_cq_notify(struct be_adapter * adapter,u16 qid,bool arm,u16 num_popped)257 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
258 {
259 u32 val = 0;
260
261 val |= qid & DB_CQ_RING_ID_MASK;
262 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
263 DB_CQ_RING_ID_EXT_MASK_SHIFT);
264
265 if (be_check_error(adapter, BE_ERROR_HW))
266 return;
267
268 if (arm)
269 val |= 1 << DB_CQ_REARM_SHIFT;
270 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
271 iowrite32(val, adapter->db + DB_CQ_OFFSET);
272 }
273
be_dev_mac_add(struct be_adapter * adapter,const u8 * mac)274 static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
275 {
276 int i;
277
278 /* Check if mac has already been added as part of uc-list */
279 for (i = 0; i < adapter->uc_macs; i++) {
280 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
281 /* mac already added, skip addition */
282 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
283 return 0;
284 }
285 }
286
287 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
288 &adapter->pmac_id[0], 0);
289 }
290
be_dev_mac_del(struct be_adapter * adapter,int pmac_id)291 static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
292 {
293 int i;
294
295 /* Skip deletion if the programmed mac is
296 * being used in uc-list
297 */
298 for (i = 0; i < adapter->uc_macs; i++) {
299 if (adapter->pmac_id[i + 1] == pmac_id)
300 return;
301 }
302 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
303 }
304
be_mac_addr_set(struct net_device * netdev,void * p)305 static int be_mac_addr_set(struct net_device *netdev, void *p)
306 {
307 struct be_adapter *adapter = netdev_priv(netdev);
308 struct device *dev = &adapter->pdev->dev;
309 struct sockaddr *addr = p;
310 int status;
311 u8 mac[ETH_ALEN];
312 u32 old_pmac_id = adapter->pmac_id[0];
313
314 if (!is_valid_ether_addr(addr->sa_data))
315 return -EADDRNOTAVAIL;
316
317 /* Proceed further only if, User provided MAC is different
318 * from active MAC
319 */
320 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
321 return 0;
322
323 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
324 * address
325 */
326 if (BEx_chip(adapter) && be_virtfn(adapter) &&
327 !check_privilege(adapter, BE_PRIV_FILTMGMT))
328 return -EPERM;
329
330 /* if device is not running, copy MAC to netdev->dev_addr */
331 if (!netif_running(netdev))
332 goto done;
333
334 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
335 * privilege or if PF did not provision the new MAC address.
336 * On BE3, this cmd will always fail if the VF doesn't have the
337 * FILTMGMT privilege. This failure is OK, only if the PF programmed
338 * the MAC for the VF.
339 */
340 mutex_lock(&adapter->rx_filter_lock);
341 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
342 if (!status) {
343
344 /* Delete the old programmed MAC. This call may fail if the
345 * old MAC was already deleted by the PF driver.
346 */
347 if (adapter->pmac_id[0] != old_pmac_id)
348 be_dev_mac_del(adapter, old_pmac_id);
349 }
350
351 mutex_unlock(&adapter->rx_filter_lock);
352 /* Decide if the new MAC is successfully activated only after
353 * querying the FW
354 */
355 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
356 adapter->if_handle, true, 0);
357 if (status)
358 goto err;
359
360 /* The MAC change did not happen, either due to lack of privilege
361 * or PF didn't pre-provision.
362 */
363 if (!ether_addr_equal(addr->sa_data, mac)) {
364 status = -EPERM;
365 goto err;
366 }
367
368 /* Remember currently programmed MAC */
369 ether_addr_copy(adapter->dev_mac, addr->sa_data);
370 done:
371 eth_hw_addr_set(netdev, addr->sa_data);
372 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
373 return 0;
374 err:
375 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
376 return status;
377 }
378
379 /* BE2 supports only v0 cmd */
hw_stats_from_cmd(struct be_adapter * adapter)380 static void *hw_stats_from_cmd(struct be_adapter *adapter)
381 {
382 if (BE2_chip(adapter)) {
383 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
384
385 return &cmd->hw_stats;
386 } else if (BE3_chip(adapter)) {
387 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
388
389 return &cmd->hw_stats;
390 } else {
391 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
392
393 return &cmd->hw_stats;
394 }
395 }
396
397 /* BE2 supports only v0 cmd */
be_erx_stats_from_cmd(struct be_adapter * adapter)398 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
399 {
400 if (BE2_chip(adapter)) {
401 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
402
403 return &hw_stats->erx;
404 } else if (BE3_chip(adapter)) {
405 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
406
407 return &hw_stats->erx;
408 } else {
409 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
410
411 return &hw_stats->erx;
412 }
413 }
414
populate_be_v0_stats(struct be_adapter * adapter)415 static void populate_be_v0_stats(struct be_adapter *adapter)
416 {
417 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
418 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
419 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
420 struct be_port_rxf_stats_v0 *port_stats =
421 &rxf_stats->port[adapter->port_num];
422 struct be_drv_stats *drvs = &adapter->drv_stats;
423
424 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
435 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
436 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
437 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
438 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
439 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
440 drvs->rx_dropped_header_too_small =
441 port_stats->rx_dropped_header_too_small;
442 drvs->rx_address_filtered =
443 port_stats->rx_address_filtered +
444 port_stats->rx_vlan_filtered;
445 drvs->rx_alignment_symbol_errors =
446 port_stats->rx_alignment_symbol_errors;
447
448 drvs->tx_pauseframes = port_stats->tx_pauseframes;
449 drvs->tx_controlframes = port_stats->tx_controlframes;
450
451 if (adapter->port_num)
452 drvs->jabber_events = rxf_stats->port1_jabber_events;
453 else
454 drvs->jabber_events = rxf_stats->port0_jabber_events;
455 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
456 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
457 drvs->forwarded_packets = rxf_stats->forwarded_packets;
458 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
459 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
460 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
461 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
462 }
463
populate_be_v1_stats(struct be_adapter * adapter)464 static void populate_be_v1_stats(struct be_adapter *adapter)
465 {
466 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
467 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
468 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
469 struct be_port_rxf_stats_v1 *port_stats =
470 &rxf_stats->port[adapter->port_num];
471 struct be_drv_stats *drvs = &adapter->drv_stats;
472
473 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
474 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
475 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
476 drvs->rx_pause_frames = port_stats->rx_pause_frames;
477 drvs->rx_crc_errors = port_stats->rx_crc_errors;
478 drvs->rx_control_frames = port_stats->rx_control_frames;
479 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
480 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
481 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
482 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
483 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
484 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
485 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
486 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
487 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
488 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
489 drvs->rx_dropped_header_too_small =
490 port_stats->rx_dropped_header_too_small;
491 drvs->rx_input_fifo_overflow_drop =
492 port_stats->rx_input_fifo_overflow_drop;
493 drvs->rx_address_filtered = port_stats->rx_address_filtered;
494 drvs->rx_alignment_symbol_errors =
495 port_stats->rx_alignment_symbol_errors;
496 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
497 drvs->tx_pauseframes = port_stats->tx_pauseframes;
498 drvs->tx_controlframes = port_stats->tx_controlframes;
499 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
500 drvs->jabber_events = port_stats->jabber_events;
501 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
502 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
503 drvs->forwarded_packets = rxf_stats->forwarded_packets;
504 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
505 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
506 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
507 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
508 }
509
populate_be_v2_stats(struct be_adapter * adapter)510 static void populate_be_v2_stats(struct be_adapter *adapter)
511 {
512 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
513 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
514 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
515 struct be_port_rxf_stats_v2 *port_stats =
516 &rxf_stats->port[adapter->port_num];
517 struct be_drv_stats *drvs = &adapter->drv_stats;
518
519 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
520 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
521 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
522 drvs->rx_pause_frames = port_stats->rx_pause_frames;
523 drvs->rx_crc_errors = port_stats->rx_crc_errors;
524 drvs->rx_control_frames = port_stats->rx_control_frames;
525 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
526 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
527 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
528 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
529 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
530 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
531 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
532 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
533 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
534 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
535 drvs->rx_dropped_header_too_small =
536 port_stats->rx_dropped_header_too_small;
537 drvs->rx_input_fifo_overflow_drop =
538 port_stats->rx_input_fifo_overflow_drop;
539 drvs->rx_address_filtered = port_stats->rx_address_filtered;
540 drvs->rx_alignment_symbol_errors =
541 port_stats->rx_alignment_symbol_errors;
542 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
543 drvs->tx_pauseframes = port_stats->tx_pauseframes;
544 drvs->tx_controlframes = port_stats->tx_controlframes;
545 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
546 drvs->jabber_events = port_stats->jabber_events;
547 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
548 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
549 drvs->forwarded_packets = rxf_stats->forwarded_packets;
550 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
551 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
552 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
553 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
554 if (be_roce_supported(adapter)) {
555 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
556 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
557 drvs->rx_roce_frames = port_stats->roce_frames_received;
558 drvs->roce_drops_crc = port_stats->roce_drops_crc;
559 drvs->roce_drops_payload_len =
560 port_stats->roce_drops_payload_len;
561 }
562 }
563
populate_lancer_stats(struct be_adapter * adapter)564 static void populate_lancer_stats(struct be_adapter *adapter)
565 {
566 struct be_drv_stats *drvs = &adapter->drv_stats;
567 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
568
569 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
570 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
571 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
572 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
573 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
574 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
575 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
576 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
577 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
578 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
579 drvs->rx_dropped_tcp_length =
580 pport_stats->rx_dropped_invalid_tcp_length;
581 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
582 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
583 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
584 drvs->rx_dropped_header_too_small =
585 pport_stats->rx_dropped_header_too_small;
586 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
587 drvs->rx_address_filtered =
588 pport_stats->rx_address_filtered +
589 pport_stats->rx_vlan_filtered;
590 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
591 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
592 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
593 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
594 drvs->jabber_events = pport_stats->rx_jabbers;
595 drvs->forwarded_packets = pport_stats->num_forwards_lo;
596 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
597 drvs->rx_drops_too_many_frags =
598 pport_stats->rx_drops_too_many_frags_lo;
599 }
600
accumulate_16bit_val(u32 * acc,u16 val)601 static void accumulate_16bit_val(u32 *acc, u16 val)
602 {
603 #define lo(x) (x & 0xFFFF)
604 #define hi(x) (x & 0xFFFF0000)
605 bool wrapped = val < lo(*acc);
606 u32 newacc = hi(*acc) + val;
607
608 if (wrapped)
609 newacc += 65536;
610 WRITE_ONCE(*acc, newacc);
611 }
612
populate_erx_stats(struct be_adapter * adapter,struct be_rx_obj * rxo,u32 erx_stat)613 static void populate_erx_stats(struct be_adapter *adapter,
614 struct be_rx_obj *rxo, u32 erx_stat)
615 {
616 if (!BEx_chip(adapter))
617 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
618 else
619 /* below erx HW counter can actually wrap around after
620 * 65535. Driver accumulates a 32-bit value
621 */
622 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
623 (u16)erx_stat);
624 }
625
be_parse_stats(struct be_adapter * adapter)626 void be_parse_stats(struct be_adapter *adapter)
627 {
628 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
629 struct be_rx_obj *rxo;
630 int i;
631 u32 erx_stat;
632
633 if (lancer_chip(adapter)) {
634 populate_lancer_stats(adapter);
635 } else {
636 if (BE2_chip(adapter))
637 populate_be_v0_stats(adapter);
638 else if (BE3_chip(adapter))
639 /* for BE3 */
640 populate_be_v1_stats(adapter);
641 else
642 populate_be_v2_stats(adapter);
643
644 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
645 for_all_rx_queues(adapter, rxo, i) {
646 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
647 populate_erx_stats(adapter, rxo, erx_stat);
648 }
649 }
650 }
651
be_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)652 static void be_get_stats64(struct net_device *netdev,
653 struct rtnl_link_stats64 *stats)
654 {
655 struct be_adapter *adapter = netdev_priv(netdev);
656 struct be_drv_stats *drvs = &adapter->drv_stats;
657 struct be_rx_obj *rxo;
658 struct be_tx_obj *txo;
659 u64 pkts, bytes;
660 unsigned int start;
661 int i;
662
663 for_all_rx_queues(adapter, rxo, i) {
664 const struct be_rx_stats *rx_stats = rx_stats(rxo);
665
666 do {
667 start = u64_stats_fetch_begin(&rx_stats->sync);
668 pkts = rx_stats(rxo)->rx_pkts;
669 bytes = rx_stats(rxo)->rx_bytes;
670 } while (u64_stats_fetch_retry(&rx_stats->sync, start));
671 stats->rx_packets += pkts;
672 stats->rx_bytes += bytes;
673 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
674 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
675 rx_stats(rxo)->rx_drops_no_frags;
676 }
677
678 for_all_tx_queues(adapter, txo, i) {
679 const struct be_tx_stats *tx_stats = tx_stats(txo);
680
681 do {
682 start = u64_stats_fetch_begin(&tx_stats->sync);
683 pkts = tx_stats(txo)->tx_pkts;
684 bytes = tx_stats(txo)->tx_bytes;
685 } while (u64_stats_fetch_retry(&tx_stats->sync, start));
686 stats->tx_packets += pkts;
687 stats->tx_bytes += bytes;
688 }
689
690 /* bad pkts received */
691 stats->rx_errors = drvs->rx_crc_errors +
692 drvs->rx_alignment_symbol_errors +
693 drvs->rx_in_range_errors +
694 drvs->rx_out_range_errors +
695 drvs->rx_frame_too_long +
696 drvs->rx_dropped_too_small +
697 drvs->rx_dropped_too_short +
698 drvs->rx_dropped_header_too_small +
699 drvs->rx_dropped_tcp_length +
700 drvs->rx_dropped_runt;
701
702 /* detailed rx errors */
703 stats->rx_length_errors = drvs->rx_in_range_errors +
704 drvs->rx_out_range_errors +
705 drvs->rx_frame_too_long;
706
707 stats->rx_crc_errors = drvs->rx_crc_errors;
708
709 /* frame alignment errors */
710 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
711
712 /* receiver fifo overrun */
713 /* drops_no_pbuf is no per i/f, it's per BE card */
714 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
715 drvs->rx_input_fifo_overflow_drop +
716 drvs->rx_drops_no_pbuf;
717 }
718
be_link_status_update(struct be_adapter * adapter,u8 link_status)719 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
720 {
721 struct net_device *netdev = adapter->netdev;
722
723 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
724 netif_carrier_off(netdev);
725 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
726 }
727
728 if (link_status)
729 netif_carrier_on(netdev);
730 else
731 netif_carrier_off(netdev);
732
733 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
734 }
735
be_gso_hdr_len(struct sk_buff * skb)736 static int be_gso_hdr_len(struct sk_buff *skb)
737 {
738 if (skb->encapsulation)
739 return skb_inner_tcp_all_headers(skb);
740
741 return skb_tcp_all_headers(skb);
742 }
743
be_tx_stats_update(struct be_tx_obj * txo,struct sk_buff * skb)744 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
745 {
746 struct be_tx_stats *stats = tx_stats(txo);
747 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
748 /* Account for headers which get duplicated in TSO pkt */
749 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
750
751 u64_stats_update_begin(&stats->sync);
752 stats->tx_reqs++;
753 stats->tx_bytes += skb->len + dup_hdr_len;
754 stats->tx_pkts += tx_pkts;
755 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
756 stats->tx_vxlan_offload_pkts += tx_pkts;
757 u64_stats_update_end(&stats->sync);
758 }
759
760 /* Returns number of WRBs needed for the skb */
skb_wrb_cnt(struct sk_buff * skb)761 static u32 skb_wrb_cnt(struct sk_buff *skb)
762 {
763 /* +1 for the header wrb */
764 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
765 }
766
wrb_fill(struct be_eth_wrb * wrb,u64 addr,int len)767 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
768 {
769 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
770 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
771 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
772 wrb->rsvd0 = 0;
773 }
774
775 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
776 * to avoid the swap and shift/mask operations in wrb_fill().
777 */
wrb_fill_dummy(struct be_eth_wrb * wrb)778 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
779 {
780 wrb->frag_pa_hi = 0;
781 wrb->frag_pa_lo = 0;
782 wrb->frag_len = 0;
783 wrb->rsvd0 = 0;
784 }
785
be_get_tx_vlan_tag(struct be_adapter * adapter,struct sk_buff * skb)786 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
787 struct sk_buff *skb)
788 {
789 u8 vlan_prio;
790 u16 vlan_tag;
791
792 vlan_tag = skb_vlan_tag_get(skb);
793 vlan_prio = skb_vlan_tag_get_prio(skb);
794 /* If vlan priority provided by OS is NOT in available bmap */
795 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
796 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
797 adapter->recommended_prio_bits;
798
799 return vlan_tag;
800 }
801
802 /* Used only for IP tunnel packets */
skb_inner_ip_proto(struct sk_buff * skb)803 static u16 skb_inner_ip_proto(struct sk_buff *skb)
804 {
805 return (inner_ip_hdr(skb)->version == 4) ?
806 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
807 }
808
skb_ip_proto(struct sk_buff * skb)809 static u16 skb_ip_proto(struct sk_buff *skb)
810 {
811 return (ip_hdr(skb)->version == 4) ?
812 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
813 }
814
be_is_txq_full(struct be_tx_obj * txo)815 static inline bool be_is_txq_full(struct be_tx_obj *txo)
816 {
817 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
818 }
819
be_can_txq_wake(struct be_tx_obj * txo)820 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
821 {
822 return atomic_read(&txo->q.used) < txo->q.len / 2;
823 }
824
be_is_tx_compl_pending(struct be_tx_obj * txo)825 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
826 {
827 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
828 }
829
be_get_wrb_params_from_skb(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)830 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
831 struct sk_buff *skb,
832 struct be_wrb_params *wrb_params)
833 {
834 u16 proto;
835
836 if (skb_is_gso(skb)) {
837 BE_WRB_F_SET(wrb_params->features, LSO, 1);
838 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
839 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
840 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
841 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 if (skb->encapsulation) {
843 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
844 proto = skb_inner_ip_proto(skb);
845 } else {
846 proto = skb_ip_proto(skb);
847 }
848 if (proto == IPPROTO_TCP)
849 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
850 else if (proto == IPPROTO_UDP)
851 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
852 }
853
854 if (skb_vlan_tag_present(skb)) {
855 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
856 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
857 }
858
859 BE_WRB_F_SET(wrb_params->features, CRC, 1);
860 }
861
wrb_fill_hdr(struct be_adapter * adapter,struct be_eth_hdr_wrb * hdr,struct be_wrb_params * wrb_params,struct sk_buff * skb)862 static void wrb_fill_hdr(struct be_adapter *adapter,
863 struct be_eth_hdr_wrb *hdr,
864 struct be_wrb_params *wrb_params,
865 struct sk_buff *skb)
866 {
867 memset(hdr, 0, sizeof(*hdr));
868
869 SET_TX_WRB_HDR_BITS(crc, hdr,
870 BE_WRB_F_GET(wrb_params->features, CRC));
871 SET_TX_WRB_HDR_BITS(ipcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, IPCS));
873 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, TCPCS));
875 SET_TX_WRB_HDR_BITS(udpcs, hdr,
876 BE_WRB_F_GET(wrb_params->features, UDPCS));
877
878 SET_TX_WRB_HDR_BITS(lso, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO));
880 SET_TX_WRB_HDR_BITS(lso6, hdr,
881 BE_WRB_F_GET(wrb_params->features, LSO6));
882 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
883
884 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
885 * hack is not needed, the evt bit is set while ringing DB.
886 */
887 SET_TX_WRB_HDR_BITS(event, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
889 SET_TX_WRB_HDR_BITS(vlan, hdr,
890 BE_WRB_F_GET(wrb_params->features, VLAN));
891 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
892
893 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
894 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
895 SET_TX_WRB_HDR_BITS(mgmt, hdr,
896 BE_WRB_F_GET(wrb_params->features, OS2BMC));
897 }
898
unmap_tx_frag(struct device * dev,struct be_eth_wrb * wrb,bool unmap_single)899 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
900 bool unmap_single)
901 {
902 dma_addr_t dma;
903 u32 frag_len = le32_to_cpu(wrb->frag_len);
904
905
906 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
907 (u64)le32_to_cpu(wrb->frag_pa_lo);
908 if (frag_len) {
909 if (unmap_single)
910 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
911 else
912 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
913 }
914 }
915
916 /* Grab a WRB header for xmit */
be_tx_get_wrb_hdr(struct be_tx_obj * txo)917 static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
918 {
919 u32 head = txo->q.head;
920
921 queue_head_inc(&txo->q);
922 return head;
923 }
924
925 /* Set up the WRB header for xmit */
be_tx_setup_wrb_hdr(struct be_adapter * adapter,struct be_tx_obj * txo,struct be_wrb_params * wrb_params,struct sk_buff * skb,u16 head)926 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
927 struct be_tx_obj *txo,
928 struct be_wrb_params *wrb_params,
929 struct sk_buff *skb, u16 head)
930 {
931 u32 num_frags = skb_wrb_cnt(skb);
932 struct be_queue_info *txq = &txo->q;
933 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
934
935 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
936 be_dws_cpu_to_le(hdr, sizeof(*hdr));
937
938 BUG_ON(txo->sent_skb_list[head]);
939 txo->sent_skb_list[head] = skb;
940 txo->last_req_hdr = head;
941 atomic_add(num_frags, &txq->used);
942 txo->last_req_wrb_cnt = num_frags;
943 txo->pend_wrb_cnt += num_frags;
944 }
945
946 /* Setup a WRB fragment (buffer descriptor) for xmit */
be_tx_setup_wrb_frag(struct be_tx_obj * txo,dma_addr_t busaddr,int len)947 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
948 int len)
949 {
950 struct be_eth_wrb *wrb;
951 struct be_queue_info *txq = &txo->q;
952
953 wrb = queue_head_node(txq);
954 wrb_fill(wrb, busaddr, len);
955 queue_head_inc(txq);
956 }
957
958 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
959 * was invoked. The producer index is restored to the previous packet and the
960 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
961 */
be_xmit_restore(struct be_adapter * adapter,struct be_tx_obj * txo,u32 head,bool map_single,u32 copied)962 static void be_xmit_restore(struct be_adapter *adapter,
963 struct be_tx_obj *txo, u32 head, bool map_single,
964 u32 copied)
965 {
966 struct device *dev;
967 struct be_eth_wrb *wrb;
968 struct be_queue_info *txq = &txo->q;
969
970 dev = &adapter->pdev->dev;
971 txq->head = head;
972
973 /* skip the first wrb (hdr); it's not mapped */
974 queue_head_inc(txq);
975 while (copied) {
976 wrb = queue_head_node(txq);
977 unmap_tx_frag(dev, wrb, map_single);
978 map_single = false;
979 copied -= le32_to_cpu(wrb->frag_len);
980 queue_head_inc(txq);
981 }
982
983 txq->head = head;
984 }
985
986 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
987 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
988 * of WRBs used up by the packet.
989 */
be_xmit_enqueue(struct be_adapter * adapter,struct be_tx_obj * txo,struct sk_buff * skb,struct be_wrb_params * wrb_params)990 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
991 struct sk_buff *skb,
992 struct be_wrb_params *wrb_params)
993 {
994 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
995 struct device *dev = &adapter->pdev->dev;
996 bool map_single = false;
997 u32 head;
998 dma_addr_t busaddr;
999 int len;
1000
1001 head = be_tx_get_wrb_hdr(txo);
1002
1003 if (skb->len > skb->data_len) {
1004 len = skb_headlen(skb);
1005
1006 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1007 if (dma_mapping_error(dev, busaddr))
1008 goto dma_err;
1009 map_single = true;
1010 be_tx_setup_wrb_frag(txo, busaddr, len);
1011 copied += len;
1012 }
1013
1014 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1015 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1016 len = skb_frag_size(frag);
1017
1018 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1019 if (dma_mapping_error(dev, busaddr))
1020 goto dma_err;
1021 be_tx_setup_wrb_frag(txo, busaddr, len);
1022 copied += len;
1023 }
1024
1025 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1026
1027 be_tx_stats_update(txo, skb);
1028 return wrb_cnt;
1029
1030 dma_err:
1031 adapter->drv_stats.dma_map_errors++;
1032 be_xmit_restore(adapter, txo, head, map_single, copied);
1033 return 0;
1034 }
1035
qnq_async_evt_rcvd(struct be_adapter * adapter)1036 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1037 {
1038 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1039 }
1040
be_insert_vlan_in_pkt(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1041 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
1042 struct sk_buff *skb,
1043 struct be_wrb_params
1044 *wrb_params)
1045 {
1046 bool insert_vlan = false;
1047 u16 vlan_tag = 0;
1048
1049 skb = skb_share_check(skb, GFP_ATOMIC);
1050 if (unlikely(!skb))
1051 return skb;
1052
1053 if (skb_vlan_tag_present(skb)) {
1054 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
1055 insert_vlan = true;
1056 }
1057
1058 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1059 if (!insert_vlan) {
1060 vlan_tag = adapter->pvid;
1061 insert_vlan = true;
1062 }
1063 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1064 * skip VLAN insertion
1065 */
1066 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1067 }
1068
1069 if (insert_vlan) {
1070 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1071 vlan_tag);
1072 if (unlikely(!skb))
1073 return skb;
1074 __vlan_hwaccel_clear_tag(skb);
1075 }
1076
1077 /* Insert the outer VLAN, if any */
1078 if (adapter->qnq_vid) {
1079 vlan_tag = adapter->qnq_vid;
1080 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1081 vlan_tag);
1082 if (unlikely(!skb))
1083 return skb;
1084 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1085 }
1086
1087 return skb;
1088 }
1089
be_ipv6_exthdr_check(struct sk_buff * skb)1090 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1091 {
1092 struct ethhdr *eh = (struct ethhdr *)skb->data;
1093 u16 offset = ETH_HLEN;
1094
1095 if (eh->h_proto == htons(ETH_P_IPV6)) {
1096 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1097
1098 offset += sizeof(struct ipv6hdr);
1099 if (ip6h->nexthdr != NEXTHDR_TCP &&
1100 ip6h->nexthdr != NEXTHDR_UDP) {
1101 struct ipv6_opt_hdr *ehdr =
1102 (struct ipv6_opt_hdr *)(skb->data + offset);
1103
1104 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1105 if (ehdr->hdrlen == 0xff)
1106 return true;
1107 }
1108 }
1109 return false;
1110 }
1111
be_vlan_tag_tx_chk(struct be_adapter * adapter,struct sk_buff * skb)1112 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1113 {
1114 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1115 }
1116
be_ipv6_tx_stall_chk(struct be_adapter * adapter,struct sk_buff * skb)1117 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1118 {
1119 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1120 }
1121
be_lancer_xmit_workarounds(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1122 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
1124 struct be_wrb_params
1125 *wrb_params)
1126 {
1127 struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
1128 unsigned int eth_hdr_len;
1129 struct iphdr *ip;
1130
1131 /* For padded packets, BE HW modifies tot_len field in IP header
1132 * incorrecly when VLAN tag is inserted by HW.
1133 * For padded packets, Lancer computes incorrect checksum.
1134 */
1135 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1136 VLAN_ETH_HLEN : ETH_HLEN;
1137 if (skb->len <= 60 &&
1138 (lancer_chip(adapter) || BE3_chip(adapter) ||
1139 skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
1140 ip = (struct iphdr *)ip_hdr(skb);
1141 if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
1142 goto tx_drop;
1143 }
1144
1145 /* If vlan tag is already inlined in the packet, skip HW VLAN
1146 * tagging in pvid-tagging mode
1147 */
1148 if (be_pvid_tagging_enabled(adapter) &&
1149 veh->h_vlan_proto == htons(ETH_P_8021Q))
1150 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1151
1152 /* HW has a bug wherein it will calculate CSUM for VLAN
1153 * pkts even though it is disabled.
1154 * Manually insert VLAN in pkt.
1155 */
1156 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1157 skb_vlan_tag_present(skb)) {
1158 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1159 if (unlikely(!skb))
1160 goto err;
1161 }
1162
1163 /* HW may lockup when VLAN HW tagging is requested on
1164 * certain ipv6 packets. Drop such pkts if the HW workaround to
1165 * skip HW tagging is not enabled by FW.
1166 */
1167 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1168 (adapter->pvid || adapter->qnq_vid) &&
1169 !qnq_async_evt_rcvd(adapter)))
1170 goto tx_drop;
1171
1172 /* Manual VLAN tag insertion to prevent:
1173 * ASIC lockup when the ASIC inserts VLAN tag into
1174 * certain ipv6 packets. Insert VLAN tags in driver,
1175 * and set event, completion, vlan bits accordingly
1176 * in the Tx WRB.
1177 */
1178 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1179 be_vlan_tag_tx_chk(adapter, skb)) {
1180 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1181 if (unlikely(!skb))
1182 goto err;
1183 }
1184
1185 return skb;
1186 tx_drop:
1187 dev_kfree_skb_any(skb);
1188 err:
1189 return NULL;
1190 }
1191
be_xmit_workarounds(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1192 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1193 struct sk_buff *skb,
1194 struct be_wrb_params *wrb_params)
1195 {
1196 int err;
1197
1198 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1199 * packets that are 32b or less may cause a transmit stall
1200 * on that port. The workaround is to pad such packets
1201 * (len <= 32 bytes) to a minimum length of 36b.
1202 */
1203 if (skb->len <= 32) {
1204 if (skb_put_padto(skb, 36))
1205 return NULL;
1206 }
1207
1208 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1209 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1210 if (!skb)
1211 return NULL;
1212 }
1213
1214 /* The stack can send us skbs with length greater than
1215 * what the HW can handle. Trim the extra bytes.
1216 */
1217 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1218 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1219 WARN_ON(err);
1220
1221 return skb;
1222 }
1223
be_xmit_flush(struct be_adapter * adapter,struct be_tx_obj * txo)1224 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1225 {
1226 struct be_queue_info *txq = &txo->q;
1227 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1228
1229 /* Mark the last request eventable if it hasn't been marked already */
1230 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1231 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1232
1233 /* compose a dummy wrb if there are odd set of wrbs to notify */
1234 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1235 wrb_fill_dummy(queue_head_node(txq));
1236 queue_head_inc(txq);
1237 atomic_inc(&txq->used);
1238 txo->pend_wrb_cnt++;
1239 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1240 TX_HDR_WRB_NUM_SHIFT);
1241 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1242 TX_HDR_WRB_NUM_SHIFT);
1243 }
1244 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1245 txo->pend_wrb_cnt = 0;
1246 }
1247
1248 /* OS2BMC related */
1249
1250 #define DHCP_CLIENT_PORT 68
1251 #define DHCP_SERVER_PORT 67
1252 #define NET_BIOS_PORT1 137
1253 #define NET_BIOS_PORT2 138
1254 #define DHCPV6_RAS_PORT 547
1255
1256 #define is_mc_allowed_on_bmc(adapter, eh) \
1257 (!is_multicast_filt_enabled(adapter) && \
1258 is_multicast_ether_addr(eh->h_dest) && \
1259 !is_broadcast_ether_addr(eh->h_dest))
1260
1261 #define is_bc_allowed_on_bmc(adapter, eh) \
1262 (!is_broadcast_filt_enabled(adapter) && \
1263 is_broadcast_ether_addr(eh->h_dest))
1264
1265 #define is_arp_allowed_on_bmc(adapter, skb) \
1266 (is_arp(skb) && is_arp_filt_enabled(adapter))
1267
1268 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1269
1270 #define is_arp_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1272
1273 #define is_dhcp_client_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1275
1276 #define is_dhcp_srvr_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1278
1279 #define is_nbios_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1281
1282 #define is_ipv6_na_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & \
1284 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1285
1286 #define is_ipv6_ra_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1288
1289 #define is_ipv6_ras_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1291
1292 #define is_broadcast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1294
1295 #define is_multicast_filt_enabled(adapter) \
1296 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1297
be_send_pkt_to_bmc(struct be_adapter * adapter,struct sk_buff ** skb,struct be_wrb_params * wrb_params)1298 static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1299 struct sk_buff **skb,
1300 struct be_wrb_params *wrb_params)
1301 {
1302 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1303 bool os2bmc = false;
1304
1305 if (!be_is_os2bmc_enabled(adapter))
1306 goto done;
1307
1308 if (!is_multicast_ether_addr(eh->h_dest))
1309 goto done;
1310
1311 if (is_mc_allowed_on_bmc(adapter, eh) ||
1312 is_bc_allowed_on_bmc(adapter, eh) ||
1313 is_arp_allowed_on_bmc(adapter, (*skb))) {
1314 os2bmc = true;
1315 goto done;
1316 }
1317
1318 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1319 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1320 u8 nexthdr = hdr->nexthdr;
1321
1322 if (nexthdr == IPPROTO_ICMPV6) {
1323 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1324
1325 switch (icmp6->icmp6_type) {
1326 case NDISC_ROUTER_ADVERTISEMENT:
1327 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1328 goto done;
1329 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1330 os2bmc = is_ipv6_na_filt_enabled(adapter);
1331 goto done;
1332 default:
1333 break;
1334 }
1335 }
1336 }
1337
1338 if (is_udp_pkt((*skb))) {
1339 struct udphdr *udp = udp_hdr((*skb));
1340
1341 switch (ntohs(udp->dest)) {
1342 case DHCP_CLIENT_PORT:
1343 os2bmc = is_dhcp_client_filt_enabled(adapter);
1344 goto done;
1345 case DHCP_SERVER_PORT:
1346 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1347 goto done;
1348 case NET_BIOS_PORT1:
1349 case NET_BIOS_PORT2:
1350 os2bmc = is_nbios_filt_enabled(adapter);
1351 goto done;
1352 case DHCPV6_RAS_PORT:
1353 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1354 goto done;
1355 default:
1356 break;
1357 }
1358 }
1359 done:
1360 /* For packets over a vlan, which are destined
1361 * to BMC, asic expects the vlan to be inline in the packet.
1362 */
1363 if (os2bmc)
1364 *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
1365
1366 return os2bmc;
1367 }
1368
be_xmit(struct sk_buff * skb,struct net_device * netdev)1369 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1370 {
1371 struct be_adapter *adapter = netdev_priv(netdev);
1372 u16 q_idx = skb_get_queue_mapping(skb);
1373 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1374 struct be_wrb_params wrb_params = { 0 };
1375 bool flush = !netdev_xmit_more();
1376 u16 wrb_cnt;
1377
1378 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1379 if (unlikely(!skb))
1380 goto drop;
1381
1382 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1383
1384 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1385 if (unlikely(!wrb_cnt))
1386 goto drop_skb;
1387
1388 /* if os2bmc is enabled and if the pkt is destined to bmc,
1389 * enqueue the pkt a 2nd time with mgmt bit set.
1390 */
1391 if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
1392 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1393 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1394 if (unlikely(!wrb_cnt))
1395 goto drop_skb;
1396 else
1397 skb_get(skb);
1398 }
1399
1400 if (be_is_txq_full(txo)) {
1401 netif_stop_subqueue(netdev, q_idx);
1402 tx_stats(txo)->tx_stops++;
1403 }
1404
1405 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1406 be_xmit_flush(adapter, txo);
1407
1408 return NETDEV_TX_OK;
1409 drop_skb:
1410 dev_kfree_skb_any(skb);
1411 drop:
1412 tx_stats(txo)->tx_drv_drops++;
1413 /* Flush the already enqueued tx requests */
1414 if (flush && txo->pend_wrb_cnt)
1415 be_xmit_flush(adapter, txo);
1416
1417 return NETDEV_TX_OK;
1418 }
1419
be_tx_timeout(struct net_device * netdev,unsigned int txqueue)1420 static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1421 {
1422 struct be_adapter *adapter = netdev_priv(netdev);
1423 struct device *dev = &adapter->pdev->dev;
1424 struct be_tx_obj *txo;
1425 struct sk_buff *skb;
1426 struct tcphdr *tcphdr;
1427 struct udphdr *udphdr;
1428 u32 *entry;
1429 int status;
1430 int i, j;
1431
1432 for_all_tx_queues(adapter, txo, i) {
1433 dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
1434 i, txo->q.head, txo->q.tail,
1435 atomic_read(&txo->q.used), txo->q.id);
1436
1437 entry = txo->q.dma_mem.va;
1438 for (j = 0; j < TX_Q_LEN * 4; j += 4) {
1439 if (entry[j] != 0 || entry[j + 1] != 0 ||
1440 entry[j + 2] != 0 || entry[j + 3] != 0) {
1441 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1442 j, entry[j], entry[j + 1],
1443 entry[j + 2], entry[j + 3]);
1444 }
1445 }
1446
1447 entry = txo->cq.dma_mem.va;
1448 dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
1449 i, txo->cq.head, txo->cq.tail,
1450 atomic_read(&txo->cq.used));
1451 for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
1452 if (entry[j] != 0 || entry[j + 1] != 0 ||
1453 entry[j + 2] != 0 || entry[j + 3] != 0) {
1454 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1455 j, entry[j], entry[j + 1],
1456 entry[j + 2], entry[j + 3]);
1457 }
1458 }
1459
1460 for (j = 0; j < TX_Q_LEN; j++) {
1461 if (txo->sent_skb_list[j]) {
1462 skb = txo->sent_skb_list[j];
1463 if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1464 tcphdr = tcp_hdr(skb);
1465 dev_info(dev, "TCP source port %d\n",
1466 ntohs(tcphdr->source));
1467 dev_info(dev, "TCP dest port %d\n",
1468 ntohs(tcphdr->dest));
1469 dev_info(dev, "TCP sequence num %u\n",
1470 ntohl(tcphdr->seq));
1471 dev_info(dev, "TCP ack_seq %u\n",
1472 ntohl(tcphdr->ack_seq));
1473 } else if (ip_hdr(skb)->protocol ==
1474 IPPROTO_UDP) {
1475 udphdr = udp_hdr(skb);
1476 dev_info(dev, "UDP source port %d\n",
1477 ntohs(udphdr->source));
1478 dev_info(dev, "UDP dest port %d\n",
1479 ntohs(udphdr->dest));
1480 }
1481 dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
1482 j, skb, skb->len, skb->protocol);
1483 }
1484 }
1485 }
1486
1487 if (lancer_chip(adapter)) {
1488 dev_info(dev, "Initiating reset due to tx timeout\n");
1489 dev_info(dev, "Resetting adapter\n");
1490 status = lancer_physdev_ctrl(adapter,
1491 PHYSDEV_CONTROL_FW_RESET_MASK);
1492 if (status)
1493 dev_err(dev, "Reset failed .. Reboot server\n");
1494 }
1495 }
1496
be_in_all_promisc(struct be_adapter * adapter)1497 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1498 {
1499 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1500 BE_IF_FLAGS_ALL_PROMISCUOUS;
1501 }
1502
be_set_vlan_promisc(struct be_adapter * adapter)1503 static int be_set_vlan_promisc(struct be_adapter *adapter)
1504 {
1505 struct device *dev = &adapter->pdev->dev;
1506 int status;
1507
1508 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1509 return 0;
1510
1511 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1512 if (!status) {
1513 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1514 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1515 } else {
1516 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1517 }
1518 return status;
1519 }
1520
be_clear_vlan_promisc(struct be_adapter * adapter)1521 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1522 {
1523 struct device *dev = &adapter->pdev->dev;
1524 int status;
1525
1526 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1527 if (!status) {
1528 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1529 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1530 }
1531 return status;
1532 }
1533
1534 /*
1535 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1536 * If the user configures more, place BE in vlan promiscuous mode.
1537 */
be_vid_config(struct be_adapter * adapter)1538 static int be_vid_config(struct be_adapter *adapter)
1539 {
1540 struct device *dev = &adapter->pdev->dev;
1541 u16 vids[BE_NUM_VLANS_SUPPORTED];
1542 u16 num = 0, i = 0;
1543 int status = 0;
1544
1545 /* No need to change the VLAN state if the I/F is in promiscuous */
1546 if (adapter->netdev->flags & IFF_PROMISC)
1547 return 0;
1548
1549 if (adapter->vlans_added > be_max_vlans(adapter))
1550 return be_set_vlan_promisc(adapter);
1551
1552 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1553 status = be_clear_vlan_promisc(adapter);
1554 if (status)
1555 return status;
1556 }
1557 /* Construct VLAN Table to give to HW */
1558 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1559 vids[num++] = cpu_to_le16(i);
1560
1561 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1562 if (status) {
1563 dev_err(dev, "Setting HW VLAN filtering failed\n");
1564 /* Set to VLAN promisc mode as setting VLAN filter failed */
1565 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1566 addl_status(status) ==
1567 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1568 return be_set_vlan_promisc(adapter);
1569 }
1570 return status;
1571 }
1572
be_vlan_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1573 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1574 {
1575 struct be_adapter *adapter = netdev_priv(netdev);
1576 int status = 0;
1577
1578 mutex_lock(&adapter->rx_filter_lock);
1579
1580 /* Packets with VID 0 are always received by Lancer by default */
1581 if (lancer_chip(adapter) && vid == 0)
1582 goto done;
1583
1584 if (test_bit(vid, adapter->vids))
1585 goto done;
1586
1587 set_bit(vid, adapter->vids);
1588 adapter->vlans_added++;
1589
1590 status = be_vid_config(adapter);
1591 done:
1592 mutex_unlock(&adapter->rx_filter_lock);
1593 return status;
1594 }
1595
be_vlan_rem_vid(struct net_device * netdev,__be16 proto,u16 vid)1596 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1597 {
1598 struct be_adapter *adapter = netdev_priv(netdev);
1599 int status = 0;
1600
1601 mutex_lock(&adapter->rx_filter_lock);
1602
1603 /* Packets with VID 0 are always received by Lancer by default */
1604 if (lancer_chip(adapter) && vid == 0)
1605 goto done;
1606
1607 if (!test_bit(vid, adapter->vids))
1608 goto done;
1609
1610 clear_bit(vid, adapter->vids);
1611 adapter->vlans_added--;
1612
1613 status = be_vid_config(adapter);
1614 done:
1615 mutex_unlock(&adapter->rx_filter_lock);
1616 return status;
1617 }
1618
be_set_all_promisc(struct be_adapter * adapter)1619 static void be_set_all_promisc(struct be_adapter *adapter)
1620 {
1621 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1622 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1623 }
1624
be_set_mc_promisc(struct be_adapter * adapter)1625 static void be_set_mc_promisc(struct be_adapter *adapter)
1626 {
1627 int status;
1628
1629 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1630 return;
1631
1632 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1633 if (!status)
1634 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1635 }
1636
be_set_uc_promisc(struct be_adapter * adapter)1637 static void be_set_uc_promisc(struct be_adapter *adapter)
1638 {
1639 int status;
1640
1641 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1642 return;
1643
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
1645 if (!status)
1646 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1647 }
1648
be_clear_uc_promisc(struct be_adapter * adapter)1649 static void be_clear_uc_promisc(struct be_adapter *adapter)
1650 {
1651 int status;
1652
1653 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1654 return;
1655
1656 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1657 if (!status)
1658 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1659 }
1660
1661 /* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1662 * We use a single callback function for both sync and unsync. We really don't
1663 * add/remove addresses through this callback. But, we use it to detect changes
1664 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1665 */
be_uc_list_update(struct net_device * netdev,const unsigned char * addr)1666 static int be_uc_list_update(struct net_device *netdev,
1667 const unsigned char *addr)
1668 {
1669 struct be_adapter *adapter = netdev_priv(netdev);
1670
1671 adapter->update_uc_list = true;
1672 return 0;
1673 }
1674
be_mc_list_update(struct net_device * netdev,const unsigned char * addr)1675 static int be_mc_list_update(struct net_device *netdev,
1676 const unsigned char *addr)
1677 {
1678 struct be_adapter *adapter = netdev_priv(netdev);
1679
1680 adapter->update_mc_list = true;
1681 return 0;
1682 }
1683
be_set_mc_list(struct be_adapter * adapter)1684 static void be_set_mc_list(struct be_adapter *adapter)
1685 {
1686 struct net_device *netdev = adapter->netdev;
1687 struct netdev_hw_addr *ha;
1688 bool mc_promisc = false;
1689 int status;
1690
1691 netif_addr_lock_bh(netdev);
1692 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1693
1694 if (netdev->flags & IFF_PROMISC) {
1695 adapter->update_mc_list = false;
1696 } else if (netdev->flags & IFF_ALLMULTI ||
1697 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1698 /* Enable multicast promisc if num configured exceeds
1699 * what we support
1700 */
1701 mc_promisc = true;
1702 adapter->update_mc_list = false;
1703 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1704 /* Update mc-list unconditionally if the iface was previously
1705 * in mc-promisc mode and now is out of that mode.
1706 */
1707 adapter->update_mc_list = true;
1708 }
1709
1710 if (adapter->update_mc_list) {
1711 int i = 0;
1712
1713 /* cache the mc-list in adapter */
1714 netdev_for_each_mc_addr(ha, netdev) {
1715 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1716 i++;
1717 }
1718 adapter->mc_count = netdev_mc_count(netdev);
1719 }
1720 netif_addr_unlock_bh(netdev);
1721
1722 if (mc_promisc) {
1723 be_set_mc_promisc(adapter);
1724 } else if (adapter->update_mc_list) {
1725 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1726 if (!status)
1727 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1728 else
1729 be_set_mc_promisc(adapter);
1730
1731 adapter->update_mc_list = false;
1732 }
1733 }
1734
be_clear_mc_list(struct be_adapter * adapter)1735 static void be_clear_mc_list(struct be_adapter *adapter)
1736 {
1737 struct net_device *netdev = adapter->netdev;
1738
1739 __dev_mc_unsync(netdev, NULL);
1740 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
1741 adapter->mc_count = 0;
1742 }
1743
be_uc_mac_add(struct be_adapter * adapter,int uc_idx)1744 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1745 {
1746 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
1747 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1748 return 0;
1749 }
1750
1751 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
1752 adapter->if_handle,
1753 &adapter->pmac_id[uc_idx + 1], 0);
1754 }
1755
be_uc_mac_del(struct be_adapter * adapter,int pmac_id)1756 static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1757 {
1758 if (pmac_id == adapter->pmac_id[0])
1759 return;
1760
1761 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1762 }
1763
be_set_uc_list(struct be_adapter * adapter)1764 static void be_set_uc_list(struct be_adapter *adapter)
1765 {
1766 struct net_device *netdev = adapter->netdev;
1767 struct netdev_hw_addr *ha;
1768 bool uc_promisc = false;
1769 int curr_uc_macs = 0, i;
1770
1771 netif_addr_lock_bh(netdev);
1772 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
1773
1774 if (netdev->flags & IFF_PROMISC) {
1775 adapter->update_uc_list = false;
1776 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1777 uc_promisc = true;
1778 adapter->update_uc_list = false;
1779 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1780 /* Update uc-list unconditionally if the iface was previously
1781 * in uc-promisc mode and now is out of that mode.
1782 */
1783 adapter->update_uc_list = true;
1784 }
1785
1786 if (adapter->update_uc_list) {
1787 /* cache the uc-list in adapter array */
1788 i = 0;
1789 netdev_for_each_uc_addr(ha, netdev) {
1790 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1791 i++;
1792 }
1793 curr_uc_macs = netdev_uc_count(netdev);
1794 }
1795 netif_addr_unlock_bh(netdev);
1796
1797 if (uc_promisc) {
1798 be_set_uc_promisc(adapter);
1799 } else if (adapter->update_uc_list) {
1800 be_clear_uc_promisc(adapter);
1801
1802 for (i = 0; i < adapter->uc_macs; i++)
1803 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1804
1805 for (i = 0; i < curr_uc_macs; i++)
1806 be_uc_mac_add(adapter, i);
1807 adapter->uc_macs = curr_uc_macs;
1808 adapter->update_uc_list = false;
1809 }
1810 }
1811
be_clear_uc_list(struct be_adapter * adapter)1812 static void be_clear_uc_list(struct be_adapter *adapter)
1813 {
1814 struct net_device *netdev = adapter->netdev;
1815 int i;
1816
1817 __dev_uc_unsync(netdev, NULL);
1818 for (i = 0; i < adapter->uc_macs; i++)
1819 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1820
1821 adapter->uc_macs = 0;
1822 }
1823
__be_set_rx_mode(struct be_adapter * adapter)1824 static void __be_set_rx_mode(struct be_adapter *adapter)
1825 {
1826 struct net_device *netdev = adapter->netdev;
1827
1828 mutex_lock(&adapter->rx_filter_lock);
1829
1830 if (netdev->flags & IFF_PROMISC) {
1831 if (!be_in_all_promisc(adapter))
1832 be_set_all_promisc(adapter);
1833 } else if (be_in_all_promisc(adapter)) {
1834 /* We need to re-program the vlan-list or clear
1835 * vlan-promisc mode (if needed) when the interface
1836 * comes out of promisc mode.
1837 */
1838 be_vid_config(adapter);
1839 }
1840
1841 be_set_uc_list(adapter);
1842 be_set_mc_list(adapter);
1843
1844 mutex_unlock(&adapter->rx_filter_lock);
1845 }
1846
be_work_set_rx_mode(struct work_struct * work)1847 static void be_work_set_rx_mode(struct work_struct *work)
1848 {
1849 struct be_cmd_work *cmd_work =
1850 container_of(work, struct be_cmd_work, work);
1851
1852 __be_set_rx_mode(cmd_work->adapter);
1853 kfree(cmd_work);
1854 }
1855
be_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)1856 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1857 {
1858 struct be_adapter *adapter = netdev_priv(netdev);
1859 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1860 int status;
1861
1862 if (!sriov_enabled(adapter))
1863 return -EPERM;
1864
1865 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1866 return -EINVAL;
1867
1868 /* Proceed further only if user provided MAC is different
1869 * from active MAC
1870 */
1871 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1872 return 0;
1873
1874 if (BEx_chip(adapter)) {
1875 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1876 vf + 1);
1877
1878 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1879 &vf_cfg->pmac_id, vf + 1);
1880 } else {
1881 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1882 vf + 1);
1883 }
1884
1885 if (status) {
1886 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1887 mac, vf, status);
1888 return be_cmd_status(status);
1889 }
1890
1891 ether_addr_copy(vf_cfg->mac_addr, mac);
1892
1893 return 0;
1894 }
1895
be_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * vi)1896 static int be_get_vf_config(struct net_device *netdev, int vf,
1897 struct ifla_vf_info *vi)
1898 {
1899 struct be_adapter *adapter = netdev_priv(netdev);
1900 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1901
1902 if (!sriov_enabled(adapter))
1903 return -EPERM;
1904
1905 if (vf >= adapter->num_vfs)
1906 return -EINVAL;
1907
1908 vi->vf = vf;
1909 vi->max_tx_rate = vf_cfg->tx_rate;
1910 vi->min_tx_rate = 0;
1911 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1912 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1913 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1914 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1915 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1916
1917 return 0;
1918 }
1919
be_set_vf_tvt(struct be_adapter * adapter,int vf,u16 vlan)1920 static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1921 {
1922 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1923 u16 vids[BE_NUM_VLANS_SUPPORTED];
1924 int vf_if_id = vf_cfg->if_handle;
1925 int status;
1926
1927 /* Enable Transparent VLAN Tagging */
1928 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1929 if (status)
1930 return status;
1931
1932 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1933 vids[0] = 0;
1934 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1935 if (!status)
1936 dev_info(&adapter->pdev->dev,
1937 "Cleared guest VLANs on VF%d", vf);
1938
1939 /* After TVT is enabled, disallow VFs to program VLAN filters */
1940 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1941 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1942 ~BE_PRIV_FILTMGMT, vf + 1);
1943 if (!status)
1944 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1945 }
1946 return 0;
1947 }
1948
be_clear_vf_tvt(struct be_adapter * adapter,int vf)1949 static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1950 {
1951 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1952 struct device *dev = &adapter->pdev->dev;
1953 int status;
1954
1955 /* Reset Transparent VLAN Tagging. */
1956 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1957 vf_cfg->if_handle, 0, 0);
1958 if (status)
1959 return status;
1960
1961 /* Allow VFs to program VLAN filtering */
1962 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1963 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1964 BE_PRIV_FILTMGMT, vf + 1);
1965 if (!status) {
1966 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1967 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1968 }
1969 }
1970
1971 dev_info(dev,
1972 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1973 return 0;
1974 }
1975
be_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)1976 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1977 __be16 vlan_proto)
1978 {
1979 struct be_adapter *adapter = netdev_priv(netdev);
1980 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1981 int status;
1982
1983 if (!sriov_enabled(adapter))
1984 return -EPERM;
1985
1986 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1987 return -EINVAL;
1988
1989 if (vlan_proto != htons(ETH_P_8021Q))
1990 return -EPROTONOSUPPORT;
1991
1992 if (vlan || qos) {
1993 vlan |= qos << VLAN_PRIO_SHIFT;
1994 status = be_set_vf_tvt(adapter, vf, vlan);
1995 } else {
1996 status = be_clear_vf_tvt(adapter, vf);
1997 }
1998
1999 if (status) {
2000 dev_err(&adapter->pdev->dev,
2001 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
2002 status);
2003 return be_cmd_status(status);
2004 }
2005
2006 vf_cfg->vlan_tag = vlan;
2007 return 0;
2008 }
2009
be_set_vf_tx_rate(struct net_device * netdev,int vf,int min_tx_rate,int max_tx_rate)2010 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
2011 int min_tx_rate, int max_tx_rate)
2012 {
2013 struct be_adapter *adapter = netdev_priv(netdev);
2014 struct device *dev = &adapter->pdev->dev;
2015 int percent_rate, status = 0;
2016 u16 link_speed = 0;
2017 u8 link_status;
2018
2019 if (!sriov_enabled(adapter))
2020 return -EPERM;
2021
2022 if (vf >= adapter->num_vfs)
2023 return -EINVAL;
2024
2025 if (min_tx_rate)
2026 return -EINVAL;
2027
2028 if (!max_tx_rate)
2029 goto config_qos;
2030
2031 status = be_cmd_link_status_query(adapter, &link_speed,
2032 &link_status, 0);
2033 if (status)
2034 goto err;
2035
2036 if (!link_status) {
2037 dev_err(dev, "TX-rate setting not allowed when link is down\n");
2038 status = -ENETDOWN;
2039 goto err;
2040 }
2041
2042 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
2043 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
2044 link_speed);
2045 status = -EINVAL;
2046 goto err;
2047 }
2048
2049 /* On Skyhawk the QOS setting must be done only as a % value */
2050 percent_rate = link_speed / 100;
2051 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
2052 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
2053 percent_rate);
2054 status = -EINVAL;
2055 goto err;
2056 }
2057
2058 config_qos:
2059 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
2060 if (status)
2061 goto err;
2062
2063 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2064 return 0;
2065
2066 err:
2067 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2068 max_tx_rate, vf);
2069 return be_cmd_status(status);
2070 }
2071
be_set_vf_link_state(struct net_device * netdev,int vf,int link_state)2072 static int be_set_vf_link_state(struct net_device *netdev, int vf,
2073 int link_state)
2074 {
2075 struct be_adapter *adapter = netdev_priv(netdev);
2076 int status;
2077
2078 if (!sriov_enabled(adapter))
2079 return -EPERM;
2080
2081 if (vf >= adapter->num_vfs)
2082 return -EINVAL;
2083
2084 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
2085 if (status) {
2086 dev_err(&adapter->pdev->dev,
2087 "Link state change on VF %d failed: %#x\n", vf, status);
2088 return be_cmd_status(status);
2089 }
2090
2091 adapter->vf_cfg[vf].plink_tracking = link_state;
2092
2093 return 0;
2094 }
2095
be_set_vf_spoofchk(struct net_device * netdev,int vf,bool enable)2096 static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2097 {
2098 struct be_adapter *adapter = netdev_priv(netdev);
2099 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2100 u8 spoofchk;
2101 int status;
2102
2103 if (!sriov_enabled(adapter))
2104 return -EPERM;
2105
2106 if (vf >= adapter->num_vfs)
2107 return -EINVAL;
2108
2109 if (BEx_chip(adapter))
2110 return -EOPNOTSUPP;
2111
2112 if (enable == vf_cfg->spoofchk)
2113 return 0;
2114
2115 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2116
2117 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2118 0, spoofchk);
2119 if (status) {
2120 dev_err(&adapter->pdev->dev,
2121 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2122 return be_cmd_status(status);
2123 }
2124
2125 vf_cfg->spoofchk = enable;
2126 return 0;
2127 }
2128
be_aic_update(struct be_aic_obj * aic,u64 rx_pkts,u64 tx_pkts,ulong now)2129 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2130 ulong now)
2131 {
2132 aic->rx_pkts_prev = rx_pkts;
2133 aic->tx_reqs_prev = tx_pkts;
2134 aic->jiffies = now;
2135 }
2136
be_get_new_eqd(struct be_eq_obj * eqo)2137 static int be_get_new_eqd(struct be_eq_obj *eqo)
2138 {
2139 struct be_adapter *adapter = eqo->adapter;
2140 int eqd, start;
2141 struct be_aic_obj *aic;
2142 struct be_rx_obj *rxo;
2143 struct be_tx_obj *txo;
2144 u64 rx_pkts = 0, tx_pkts = 0;
2145 ulong now;
2146 u32 pps, delta;
2147 int i;
2148
2149 aic = &adapter->aic_obj[eqo->idx];
2150 if (!adapter->aic_enabled) {
2151 if (aic->jiffies)
2152 aic->jiffies = 0;
2153 eqd = aic->et_eqd;
2154 return eqd;
2155 }
2156
2157 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2158 do {
2159 start = u64_stats_fetch_begin(&rxo->stats.sync);
2160 rx_pkts += rxo->stats.rx_pkts;
2161 } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
2162 }
2163
2164 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2165 do {
2166 start = u64_stats_fetch_begin(&txo->stats.sync);
2167 tx_pkts += txo->stats.tx_reqs;
2168 } while (u64_stats_fetch_retry(&txo->stats.sync, start));
2169 }
2170
2171 /* Skip, if wrapped around or first calculation */
2172 now = jiffies;
2173 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2174 rx_pkts < aic->rx_pkts_prev ||
2175 tx_pkts < aic->tx_reqs_prev) {
2176 be_aic_update(aic, rx_pkts, tx_pkts, now);
2177 return aic->prev_eqd;
2178 }
2179
2180 delta = jiffies_to_msecs(now - aic->jiffies);
2181 if (delta == 0)
2182 return aic->prev_eqd;
2183
2184 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2185 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2186 eqd = (pps / 15000) << 2;
2187
2188 if (eqd < 8)
2189 eqd = 0;
2190 eqd = min_t(u32, eqd, aic->max_eqd);
2191 eqd = max_t(u32, eqd, aic->min_eqd);
2192
2193 be_aic_update(aic, rx_pkts, tx_pkts, now);
2194
2195 return eqd;
2196 }
2197
2198 /* For Skyhawk-R only */
be_get_eq_delay_mult_enc(struct be_eq_obj * eqo)2199 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2200 {
2201 struct be_adapter *adapter = eqo->adapter;
2202 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2203 ulong now = jiffies;
2204 int eqd;
2205 u32 mult_enc;
2206
2207 if (!adapter->aic_enabled)
2208 return 0;
2209
2210 if (jiffies_to_msecs(now - aic->jiffies) < 1)
2211 eqd = aic->prev_eqd;
2212 else
2213 eqd = be_get_new_eqd(eqo);
2214
2215 if (eqd > 100)
2216 mult_enc = R2I_DLY_ENC_1;
2217 else if (eqd > 60)
2218 mult_enc = R2I_DLY_ENC_2;
2219 else if (eqd > 20)
2220 mult_enc = R2I_DLY_ENC_3;
2221 else
2222 mult_enc = R2I_DLY_ENC_0;
2223
2224 aic->prev_eqd = eqd;
2225
2226 return mult_enc;
2227 }
2228
be_eqd_update(struct be_adapter * adapter,bool force_update)2229 void be_eqd_update(struct be_adapter *adapter, bool force_update)
2230 {
2231 struct be_set_eqd set_eqd[MAX_EVT_QS];
2232 struct be_aic_obj *aic;
2233 struct be_eq_obj *eqo;
2234 int i, num = 0, eqd;
2235
2236 for_all_evt_queues(adapter, eqo, i) {
2237 aic = &adapter->aic_obj[eqo->idx];
2238 eqd = be_get_new_eqd(eqo);
2239 if (force_update || eqd != aic->prev_eqd) {
2240 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2241 set_eqd[num].eq_id = eqo->q.id;
2242 aic->prev_eqd = eqd;
2243 num++;
2244 }
2245 }
2246
2247 if (num)
2248 be_cmd_modify_eqd(adapter, set_eqd, num);
2249 }
2250
be_rx_stats_update(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)2251 static void be_rx_stats_update(struct be_rx_obj *rxo,
2252 struct be_rx_compl_info *rxcp)
2253 {
2254 struct be_rx_stats *stats = rx_stats(rxo);
2255
2256 u64_stats_update_begin(&stats->sync);
2257 stats->rx_compl++;
2258 stats->rx_bytes += rxcp->pkt_size;
2259 stats->rx_pkts++;
2260 if (rxcp->tunneled)
2261 stats->rx_vxlan_offload_pkts++;
2262 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
2263 stats->rx_mcast_pkts++;
2264 if (rxcp->err)
2265 stats->rx_compl_err++;
2266 u64_stats_update_end(&stats->sync);
2267 }
2268
csum_passed(struct be_rx_compl_info * rxcp)2269 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
2270 {
2271 /* L4 checksum is not reliable for non TCP/UDP packets.
2272 * Also ignore ipcksm for ipv6 pkts
2273 */
2274 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
2275 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
2276 }
2277
get_rx_page_info(struct be_rx_obj * rxo)2278 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
2279 {
2280 struct be_adapter *adapter = rxo->adapter;
2281 struct be_rx_page_info *rx_page_info;
2282 struct be_queue_info *rxq = &rxo->q;
2283 u32 frag_idx = rxq->tail;
2284
2285 rx_page_info = &rxo->page_info_tbl[frag_idx];
2286 BUG_ON(!rx_page_info->page);
2287
2288 if (rx_page_info->last_frag) {
2289 dma_unmap_page(&adapter->pdev->dev,
2290 dma_unmap_addr(rx_page_info, bus),
2291 adapter->big_page_size, DMA_FROM_DEVICE);
2292 rx_page_info->last_frag = false;
2293 } else {
2294 dma_sync_single_for_cpu(&adapter->pdev->dev,
2295 dma_unmap_addr(rx_page_info, bus),
2296 rx_frag_size, DMA_FROM_DEVICE);
2297 }
2298
2299 queue_tail_inc(rxq);
2300 atomic_dec(&rxq->used);
2301 return rx_page_info;
2302 }
2303
2304 /* Throwaway the data in the Rx completion */
be_rx_compl_discard(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)2305 static void be_rx_compl_discard(struct be_rx_obj *rxo,
2306 struct be_rx_compl_info *rxcp)
2307 {
2308 struct be_rx_page_info *page_info;
2309 u16 i, num_rcvd = rxcp->num_rcvd;
2310
2311 for (i = 0; i < num_rcvd; i++) {
2312 page_info = get_rx_page_info(rxo);
2313 put_page(page_info->page);
2314 memset(page_info, 0, sizeof(*page_info));
2315 }
2316 }
2317
2318 /*
2319 * skb_fill_rx_data forms a complete skb for an ether frame
2320 * indicated by rxcp.
2321 */
skb_fill_rx_data(struct be_rx_obj * rxo,struct sk_buff * skb,struct be_rx_compl_info * rxcp)2322 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2323 struct be_rx_compl_info *rxcp)
2324 {
2325 struct be_rx_page_info *page_info;
2326 u16 i, j;
2327 u16 hdr_len, curr_frag_len, remaining;
2328 u8 *start;
2329
2330 page_info = get_rx_page_info(rxo);
2331 start = page_address(page_info->page) + page_info->page_offset;
2332 prefetch(start);
2333
2334 /* Copy data in the first descriptor of this completion */
2335 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2336
2337 skb->len = curr_frag_len;
2338 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2339 memcpy(skb->data, start, curr_frag_len);
2340 /* Complete packet has now been moved to data */
2341 put_page(page_info->page);
2342 skb->data_len = 0;
2343 skb->tail += curr_frag_len;
2344 } else {
2345 hdr_len = ETH_HLEN;
2346 memcpy(skb->data, start, hdr_len);
2347 skb_shinfo(skb)->nr_frags = 1;
2348 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0],
2349 page_info->page,
2350 page_info->page_offset + hdr_len,
2351 curr_frag_len - hdr_len);
2352 skb->data_len = curr_frag_len - hdr_len;
2353 skb->truesize += rx_frag_size;
2354 skb->tail += hdr_len;
2355 }
2356 page_info->page = NULL;
2357
2358 if (rxcp->pkt_size <= rx_frag_size) {
2359 BUG_ON(rxcp->num_rcvd != 1);
2360 return;
2361 }
2362
2363 /* More frags present for this completion */
2364 remaining = rxcp->pkt_size - curr_frag_len;
2365 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2366 page_info = get_rx_page_info(rxo);
2367 curr_frag_len = min(remaining, rx_frag_size);
2368
2369 /* Coalesce all frags from the same physical page in one slot */
2370 if (page_info->page_offset == 0) {
2371 /* Fresh page */
2372 j++;
2373 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
2374 page_info->page,
2375 page_info->page_offset,
2376 curr_frag_len);
2377 skb_shinfo(skb)->nr_frags++;
2378 } else {
2379 put_page(page_info->page);
2380 skb_frag_size_add(&skb_shinfo(skb)->frags[j],
2381 curr_frag_len);
2382 }
2383
2384 skb->len += curr_frag_len;
2385 skb->data_len += curr_frag_len;
2386 skb->truesize += rx_frag_size;
2387 remaining -= curr_frag_len;
2388 page_info->page = NULL;
2389 }
2390 BUG_ON(j > MAX_SKB_FRAGS);
2391 }
2392
2393 /* Process the RX completion indicated by rxcp when GRO is disabled */
be_rx_compl_process(struct be_rx_obj * rxo,struct napi_struct * napi,struct be_rx_compl_info * rxcp)2394 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2395 struct be_rx_compl_info *rxcp)
2396 {
2397 struct be_adapter *adapter = rxo->adapter;
2398 struct net_device *netdev = adapter->netdev;
2399 struct sk_buff *skb;
2400
2401 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2402 if (unlikely(!skb)) {
2403 rx_stats(rxo)->rx_drops_no_skbs++;
2404 be_rx_compl_discard(rxo, rxcp);
2405 return;
2406 }
2407
2408 skb_fill_rx_data(rxo, skb, rxcp);
2409
2410 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2411 skb->ip_summed = CHECKSUM_UNNECESSARY;
2412 else
2413 skb_checksum_none_assert(skb);
2414
2415 skb->protocol = eth_type_trans(skb, netdev);
2416 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2417 if (netdev->features & NETIF_F_RXHASH)
2418 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2419
2420 skb->csum_level = rxcp->tunneled;
2421 skb_mark_napi_id(skb, napi);
2422
2423 if (rxcp->vlanf)
2424 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2425
2426 netif_receive_skb(skb);
2427 }
2428
2429 /* Process the RX completion indicated by rxcp when GRO is enabled */
be_rx_compl_process_gro(struct be_rx_obj * rxo,struct napi_struct * napi,struct be_rx_compl_info * rxcp)2430 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2431 struct napi_struct *napi,
2432 struct be_rx_compl_info *rxcp)
2433 {
2434 struct be_adapter *adapter = rxo->adapter;
2435 struct be_rx_page_info *page_info;
2436 struct sk_buff *skb = NULL;
2437 u16 remaining, curr_frag_len;
2438 u16 i, j;
2439
2440 skb = napi_get_frags(napi);
2441 if (!skb) {
2442 be_rx_compl_discard(rxo, rxcp);
2443 return;
2444 }
2445
2446 remaining = rxcp->pkt_size;
2447 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2448 page_info = get_rx_page_info(rxo);
2449
2450 curr_frag_len = min(remaining, rx_frag_size);
2451
2452 /* Coalesce all frags from the same physical page in one slot */
2453 if (i == 0 || page_info->page_offset == 0) {
2454 /* First frag or Fresh page */
2455 j++;
2456 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
2457 page_info->page,
2458 page_info->page_offset,
2459 curr_frag_len);
2460 } else {
2461 put_page(page_info->page);
2462 skb_frag_size_add(&skb_shinfo(skb)->frags[j],
2463 curr_frag_len);
2464 }
2465
2466 skb->truesize += rx_frag_size;
2467 remaining -= curr_frag_len;
2468 memset(page_info, 0, sizeof(*page_info));
2469 }
2470 BUG_ON(j > MAX_SKB_FRAGS);
2471
2472 skb_shinfo(skb)->nr_frags = j + 1;
2473 skb->len = rxcp->pkt_size;
2474 skb->data_len = rxcp->pkt_size;
2475 skb->ip_summed = CHECKSUM_UNNECESSARY;
2476 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2477 if (adapter->netdev->features & NETIF_F_RXHASH)
2478 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2479
2480 skb->csum_level = rxcp->tunneled;
2481
2482 if (rxcp->vlanf)
2483 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2484
2485 napi_gro_frags(napi);
2486 }
2487
be_parse_rx_compl_v1(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)2488 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2489 struct be_rx_compl_info *rxcp)
2490 {
2491 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2492 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2493 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2494 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2495 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2496 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2497 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2498 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2499 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2500 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2501 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2502 if (rxcp->vlanf) {
2503 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2504 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2505 }
2506 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2507 rxcp->tunneled =
2508 GET_RX_COMPL_V1_BITS(tunneled, compl);
2509 }
2510
be_parse_rx_compl_v0(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)2511 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2512 struct be_rx_compl_info *rxcp)
2513 {
2514 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2515 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2516 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2517 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2518 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2519 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2520 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2521 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2522 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2523 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2524 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2525 if (rxcp->vlanf) {
2526 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2527 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2528 }
2529 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2530 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2531 }
2532
be_rx_compl_get(struct be_rx_obj * rxo)2533 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2534 {
2535 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2536 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2537 struct be_adapter *adapter = rxo->adapter;
2538
2539 /* For checking the valid bit it is Ok to use either definition as the
2540 * valid bit is at the same position in both v0 and v1 Rx compl */
2541 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2542 return NULL;
2543
2544 rmb();
2545 be_dws_le_to_cpu(compl, sizeof(*compl));
2546
2547 if (adapter->be3_native)
2548 be_parse_rx_compl_v1(compl, rxcp);
2549 else
2550 be_parse_rx_compl_v0(compl, rxcp);
2551
2552 if (rxcp->ip_frag)
2553 rxcp->l4_csum = 0;
2554
2555 if (rxcp->vlanf) {
2556 /* In QNQ modes, if qnq bit is not set, then the packet was
2557 * tagged only with the transparent outer vlan-tag and must
2558 * not be treated as a vlan packet by host
2559 */
2560 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2561 rxcp->vlanf = 0;
2562
2563 if (!lancer_chip(adapter))
2564 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2565
2566 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2567 !test_bit(rxcp->vlan_tag, adapter->vids))
2568 rxcp->vlanf = 0;
2569 }
2570
2571 /* As the compl has been parsed, reset it; we wont touch it again */
2572 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2573
2574 queue_tail_inc(&rxo->cq);
2575 return rxcp;
2576 }
2577
be_alloc_pages(u32 size,gfp_t gfp)2578 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2579 {
2580 u32 order = get_order(size);
2581
2582 if (order > 0)
2583 gfp |= __GFP_COMP;
2584 return alloc_pages(gfp, order);
2585 }
2586
2587 /*
2588 * Allocate a page, split it to fragments of size rx_frag_size and post as
2589 * receive buffers to BE
2590 */
be_post_rx_frags(struct be_rx_obj * rxo,gfp_t gfp,u32 frags_needed)2591 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2592 {
2593 struct be_adapter *adapter = rxo->adapter;
2594 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2595 struct be_queue_info *rxq = &rxo->q;
2596 struct page *pagep = NULL;
2597 struct device *dev = &adapter->pdev->dev;
2598 struct be_eth_rx_d *rxd;
2599 u64 page_dmaaddr = 0, frag_dmaaddr;
2600 u32 posted, page_offset = 0, notify = 0;
2601
2602 page_info = &rxo->page_info_tbl[rxq->head];
2603 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2604 if (!pagep) {
2605 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2606 if (unlikely(!pagep)) {
2607 rx_stats(rxo)->rx_post_fail++;
2608 break;
2609 }
2610 page_dmaaddr = dma_map_page(dev, pagep, 0,
2611 adapter->big_page_size,
2612 DMA_FROM_DEVICE);
2613 if (dma_mapping_error(dev, page_dmaaddr)) {
2614 put_page(pagep);
2615 pagep = NULL;
2616 adapter->drv_stats.dma_map_errors++;
2617 break;
2618 }
2619 page_offset = 0;
2620 } else {
2621 get_page(pagep);
2622 page_offset += rx_frag_size;
2623 }
2624 page_info->page_offset = page_offset;
2625 page_info->page = pagep;
2626
2627 rxd = queue_head_node(rxq);
2628 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2629 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2630 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2631
2632 /* Any space left in the current big page for another frag? */
2633 if ((page_offset + rx_frag_size + rx_frag_size) >
2634 adapter->big_page_size) {
2635 pagep = NULL;
2636 page_info->last_frag = true;
2637 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2638 } else {
2639 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2640 }
2641
2642 prev_page_info = page_info;
2643 queue_head_inc(rxq);
2644 page_info = &rxo->page_info_tbl[rxq->head];
2645 }
2646
2647 /* Mark the last frag of a page when we break out of the above loop
2648 * with no more slots available in the RXQ
2649 */
2650 if (pagep) {
2651 prev_page_info->last_frag = true;
2652 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2653 }
2654
2655 if (posted) {
2656 atomic_add(posted, &rxq->used);
2657 if (rxo->rx_post_starved)
2658 rxo->rx_post_starved = false;
2659 do {
2660 notify = min(MAX_NUM_POST_ERX_DB, posted);
2661 be_rxq_notify(adapter, rxq->id, notify);
2662 posted -= notify;
2663 } while (posted);
2664 } else if (atomic_read(&rxq->used) == 0) {
2665 /* Let be_worker replenish when memory is available */
2666 rxo->rx_post_starved = true;
2667 }
2668 }
2669
be_update_tx_err(struct be_tx_obj * txo,u8 status)2670 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2671 {
2672 switch (status) {
2673 case BE_TX_COMP_HDR_PARSE_ERR:
2674 tx_stats(txo)->tx_hdr_parse_err++;
2675 break;
2676 case BE_TX_COMP_NDMA_ERR:
2677 tx_stats(txo)->tx_dma_err++;
2678 break;
2679 case BE_TX_COMP_ACL_ERR:
2680 tx_stats(txo)->tx_spoof_check_err++;
2681 break;
2682 }
2683 }
2684
lancer_update_tx_err(struct be_tx_obj * txo,u8 status)2685 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2686 {
2687 switch (status) {
2688 case LANCER_TX_COMP_LSO_ERR:
2689 tx_stats(txo)->tx_tso_err++;
2690 break;
2691 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2692 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2693 tx_stats(txo)->tx_spoof_check_err++;
2694 break;
2695 case LANCER_TX_COMP_QINQ_ERR:
2696 tx_stats(txo)->tx_qinq_err++;
2697 break;
2698 case LANCER_TX_COMP_PARITY_ERR:
2699 tx_stats(txo)->tx_internal_parity_err++;
2700 break;
2701 case LANCER_TX_COMP_DMA_ERR:
2702 tx_stats(txo)->tx_dma_err++;
2703 break;
2704 case LANCER_TX_COMP_SGE_ERR:
2705 tx_stats(txo)->tx_sge_err++;
2706 break;
2707 }
2708 }
2709
be_tx_compl_get(struct be_adapter * adapter,struct be_tx_obj * txo)2710 static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2711 struct be_tx_obj *txo)
2712 {
2713 struct be_queue_info *tx_cq = &txo->cq;
2714 struct be_tx_compl_info *txcp = &txo->txcp;
2715 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2716
2717 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2718 return NULL;
2719
2720 /* Ensure load ordering of valid bit dword and other dwords below */
2721 rmb();
2722 be_dws_le_to_cpu(compl, sizeof(*compl));
2723
2724 txcp->status = GET_TX_COMPL_BITS(status, compl);
2725 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2726
2727 if (txcp->status) {
2728 if (lancer_chip(adapter)) {
2729 lancer_update_tx_err(txo, txcp->status);
2730 /* Reset the adapter incase of TSO,
2731 * SGE or Parity error
2732 */
2733 if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2734 txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2735 txcp->status == LANCER_TX_COMP_SGE_ERR)
2736 be_set_error(adapter, BE_ERROR_TX);
2737 } else {
2738 be_update_tx_err(txo, txcp->status);
2739 }
2740 }
2741
2742 if (be_check_error(adapter, BE_ERROR_TX))
2743 return NULL;
2744
2745 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2746 queue_tail_inc(tx_cq);
2747 return txcp;
2748 }
2749
be_tx_compl_process(struct be_adapter * adapter,struct be_tx_obj * txo,u16 last_index)2750 static u16 be_tx_compl_process(struct be_adapter *adapter,
2751 struct be_tx_obj *txo, u16 last_index)
2752 {
2753 struct sk_buff **sent_skbs = txo->sent_skb_list;
2754 struct be_queue_info *txq = &txo->q;
2755 struct sk_buff *skb = NULL;
2756 bool unmap_skb_hdr = false;
2757 struct be_eth_wrb *wrb;
2758 u16 num_wrbs = 0;
2759 u32 frag_index;
2760
2761 do {
2762 if (sent_skbs[txq->tail]) {
2763 /* Free skb from prev req */
2764 if (skb)
2765 dev_consume_skb_any(skb);
2766 skb = sent_skbs[txq->tail];
2767 sent_skbs[txq->tail] = NULL;
2768 queue_tail_inc(txq); /* skip hdr wrb */
2769 num_wrbs++;
2770 unmap_skb_hdr = true;
2771 }
2772 wrb = queue_tail_node(txq);
2773 frag_index = txq->tail;
2774 unmap_tx_frag(&adapter->pdev->dev, wrb,
2775 (unmap_skb_hdr && skb_headlen(skb)));
2776 unmap_skb_hdr = false;
2777 queue_tail_inc(txq);
2778 num_wrbs++;
2779 } while (frag_index != last_index);
2780 dev_consume_skb_any(skb);
2781
2782 return num_wrbs;
2783 }
2784
2785 /* Return the number of events in the event queue */
events_get(struct be_eq_obj * eqo)2786 static inline int events_get(struct be_eq_obj *eqo)
2787 {
2788 struct be_eq_entry *eqe;
2789 int num = 0;
2790
2791 do {
2792 eqe = queue_tail_node(&eqo->q);
2793 if (eqe->evt == 0)
2794 break;
2795
2796 rmb();
2797 eqe->evt = 0;
2798 num++;
2799 queue_tail_inc(&eqo->q);
2800 } while (true);
2801
2802 return num;
2803 }
2804
2805 /* Leaves the EQ is disarmed state */
be_eq_clean(struct be_eq_obj * eqo)2806 static void be_eq_clean(struct be_eq_obj *eqo)
2807 {
2808 int num = events_get(eqo);
2809
2810 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2811 }
2812
2813 /* Free posted rx buffers that were not used */
be_rxq_clean(struct be_rx_obj * rxo)2814 static void be_rxq_clean(struct be_rx_obj *rxo)
2815 {
2816 struct be_queue_info *rxq = &rxo->q;
2817 struct be_rx_page_info *page_info;
2818
2819 while (atomic_read(&rxq->used) > 0) {
2820 page_info = get_rx_page_info(rxo);
2821 put_page(page_info->page);
2822 memset(page_info, 0, sizeof(*page_info));
2823 }
2824 BUG_ON(atomic_read(&rxq->used));
2825 rxq->tail = 0;
2826 rxq->head = 0;
2827 }
2828
be_rx_cq_clean(struct be_rx_obj * rxo)2829 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2830 {
2831 struct be_queue_info *rx_cq = &rxo->cq;
2832 struct be_rx_compl_info *rxcp;
2833 struct be_adapter *adapter = rxo->adapter;
2834 int flush_wait = 0;
2835
2836 /* Consume pending rx completions.
2837 * Wait for the flush completion (identified by zero num_rcvd)
2838 * to arrive. Notify CQ even when there are no more CQ entries
2839 * for HW to flush partially coalesced CQ entries.
2840 * In Lancer, there is no need to wait for flush compl.
2841 */
2842 for (;;) {
2843 rxcp = be_rx_compl_get(rxo);
2844 if (!rxcp) {
2845 if (lancer_chip(adapter))
2846 break;
2847
2848 if (flush_wait++ > 50 ||
2849 be_check_error(adapter,
2850 BE_ERROR_HW)) {
2851 dev_warn(&adapter->pdev->dev,
2852 "did not receive flush compl\n");
2853 break;
2854 }
2855 be_cq_notify(adapter, rx_cq->id, true, 0);
2856 mdelay(1);
2857 } else {
2858 be_rx_compl_discard(rxo, rxcp);
2859 be_cq_notify(adapter, rx_cq->id, false, 1);
2860 if (rxcp->num_rcvd == 0)
2861 break;
2862 }
2863 }
2864
2865 /* After cleanup, leave the CQ in unarmed state */
2866 be_cq_notify(adapter, rx_cq->id, false, 0);
2867 }
2868
be_tx_compl_clean(struct be_adapter * adapter)2869 static void be_tx_compl_clean(struct be_adapter *adapter)
2870 {
2871 struct device *dev = &adapter->pdev->dev;
2872 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
2873 struct be_tx_compl_info *txcp;
2874 struct be_queue_info *txq;
2875 u32 end_idx, notified_idx;
2876 struct be_tx_obj *txo;
2877 int i, pending_txqs;
2878
2879 /* Stop polling for compls when HW has been silent for 10ms */
2880 do {
2881 pending_txqs = adapter->num_tx_qs;
2882
2883 for_all_tx_queues(adapter, txo, i) {
2884 cmpl = 0;
2885 num_wrbs = 0;
2886 txq = &txo->q;
2887 while ((txcp = be_tx_compl_get(adapter, txo))) {
2888 num_wrbs +=
2889 be_tx_compl_process(adapter, txo,
2890 txcp->end_index);
2891 cmpl++;
2892 }
2893 if (cmpl) {
2894 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2895 atomic_sub(num_wrbs, &txq->used);
2896 timeo = 0;
2897 }
2898 if (!be_is_tx_compl_pending(txo))
2899 pending_txqs--;
2900 }
2901
2902 if (pending_txqs == 0 || ++timeo > 10 ||
2903 be_check_error(adapter, BE_ERROR_HW))
2904 break;
2905
2906 mdelay(1);
2907 } while (true);
2908
2909 /* Free enqueued TX that was never notified to HW */
2910 for_all_tx_queues(adapter, txo, i) {
2911 txq = &txo->q;
2912
2913 if (atomic_read(&txq->used)) {
2914 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2915 i, atomic_read(&txq->used));
2916 notified_idx = txq->tail;
2917 end_idx = txq->tail;
2918 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2919 txq->len);
2920 /* Use the tx-compl process logic to handle requests
2921 * that were not sent to the HW.
2922 */
2923 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2924 atomic_sub(num_wrbs, &txq->used);
2925 BUG_ON(atomic_read(&txq->used));
2926 txo->pend_wrb_cnt = 0;
2927 /* Since hw was never notified of these requests,
2928 * reset TXQ indices
2929 */
2930 txq->head = notified_idx;
2931 txq->tail = notified_idx;
2932 }
2933 }
2934 }
2935
be_evt_queues_destroy(struct be_adapter * adapter)2936 static void be_evt_queues_destroy(struct be_adapter *adapter)
2937 {
2938 struct be_eq_obj *eqo;
2939 int i;
2940
2941 for_all_evt_queues(adapter, eqo, i) {
2942 if (eqo->q.created) {
2943 be_eq_clean(eqo);
2944 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2945 netif_napi_del(&eqo->napi);
2946 free_cpumask_var(eqo->affinity_mask);
2947 }
2948 be_queue_free(adapter, &eqo->q);
2949 }
2950 }
2951
be_evt_queues_create(struct be_adapter * adapter)2952 static int be_evt_queues_create(struct be_adapter *adapter)
2953 {
2954 struct be_queue_info *eq;
2955 struct be_eq_obj *eqo;
2956 struct be_aic_obj *aic;
2957 int i, rc;
2958
2959 /* need enough EQs to service both RX and TX queues */
2960 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2961 max(adapter->cfg_num_rx_irqs,
2962 adapter->cfg_num_tx_irqs));
2963
2964 adapter->aic_enabled = true;
2965
2966 for_all_evt_queues(adapter, eqo, i) {
2967 int numa_node = dev_to_node(&adapter->pdev->dev);
2968
2969 aic = &adapter->aic_obj[i];
2970 eqo->adapter = adapter;
2971 eqo->idx = i;
2972 aic->max_eqd = BE_MAX_EQD;
2973
2974 eq = &eqo->q;
2975 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2976 sizeof(struct be_eq_entry));
2977 if (rc)
2978 return rc;
2979
2980 rc = be_cmd_eq_create(adapter, eqo);
2981 if (rc)
2982 return rc;
2983
2984 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2985 return -ENOMEM;
2986 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2987 eqo->affinity_mask);
2988 netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
2989 }
2990 return 0;
2991 }
2992
be_mcc_queues_destroy(struct be_adapter * adapter)2993 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2994 {
2995 struct be_queue_info *q;
2996
2997 q = &adapter->mcc_obj.q;
2998 if (q->created)
2999 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
3000 be_queue_free(adapter, q);
3001
3002 q = &adapter->mcc_obj.cq;
3003 if (q->created)
3004 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3005 be_queue_free(adapter, q);
3006 }
3007
3008 /* Must be called only after TX qs are created as MCC shares TX EQ */
be_mcc_queues_create(struct be_adapter * adapter)3009 static int be_mcc_queues_create(struct be_adapter *adapter)
3010 {
3011 struct be_queue_info *q, *cq;
3012
3013 cq = &adapter->mcc_obj.cq;
3014 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
3015 sizeof(struct be_mcc_compl)))
3016 goto err;
3017
3018 /* Use the default EQ for MCC completions */
3019 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
3020 goto mcc_cq_free;
3021
3022 q = &adapter->mcc_obj.q;
3023 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3024 goto mcc_cq_destroy;
3025
3026 if (be_cmd_mccq_create(adapter, q, cq))
3027 goto mcc_q_free;
3028
3029 return 0;
3030
3031 mcc_q_free:
3032 be_queue_free(adapter, q);
3033 mcc_cq_destroy:
3034 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
3035 mcc_cq_free:
3036 be_queue_free(adapter, cq);
3037 err:
3038 return -1;
3039 }
3040
be_tx_queues_destroy(struct be_adapter * adapter)3041 static void be_tx_queues_destroy(struct be_adapter *adapter)
3042 {
3043 struct be_queue_info *q;
3044 struct be_tx_obj *txo;
3045 u8 i;
3046
3047 for_all_tx_queues(adapter, txo, i) {
3048 q = &txo->q;
3049 if (q->created)
3050 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
3051 be_queue_free(adapter, q);
3052
3053 q = &txo->cq;
3054 if (q->created)
3055 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3056 be_queue_free(adapter, q);
3057 }
3058 }
3059
be_tx_qs_create(struct be_adapter * adapter)3060 static int be_tx_qs_create(struct be_adapter *adapter)
3061 {
3062 struct be_queue_info *cq;
3063 struct be_tx_obj *txo;
3064 struct be_eq_obj *eqo;
3065 int status, i;
3066
3067 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
3068
3069 for_all_tx_queues(adapter, txo, i) {
3070 cq = &txo->cq;
3071 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
3072 sizeof(struct be_eth_tx_compl));
3073 if (status)
3074 return status;
3075
3076 u64_stats_init(&txo->stats.sync);
3077 u64_stats_init(&txo->stats.sync_compl);
3078
3079 /* If num_evt_qs is less than num_tx_qs, then more than
3080 * one txq share an eq
3081 */
3082 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
3083 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
3084 if (status)
3085 return status;
3086
3087 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3088 sizeof(struct be_eth_wrb));
3089 if (status)
3090 return status;
3091
3092 status = be_cmd_txq_create(adapter, txo);
3093 if (status)
3094 return status;
3095
3096 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3097 eqo->idx);
3098 }
3099
3100 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3101 adapter->num_tx_qs);
3102 return 0;
3103 }
3104
be_rx_cqs_destroy(struct be_adapter * adapter)3105 static void be_rx_cqs_destroy(struct be_adapter *adapter)
3106 {
3107 struct be_queue_info *q;
3108 struct be_rx_obj *rxo;
3109 int i;
3110
3111 for_all_rx_queues(adapter, rxo, i) {
3112 q = &rxo->cq;
3113 if (q->created)
3114 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3115 be_queue_free(adapter, q);
3116 }
3117 }
3118
be_rx_cqs_create(struct be_adapter * adapter)3119 static int be_rx_cqs_create(struct be_adapter *adapter)
3120 {
3121 struct be_queue_info *eq, *cq;
3122 struct be_rx_obj *rxo;
3123 int rc, i;
3124
3125 adapter->num_rss_qs =
3126 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
3127
3128 /* We'll use RSS only if atleast 2 RSS rings are supported. */
3129 if (adapter->num_rss_qs < 2)
3130 adapter->num_rss_qs = 0;
3131
3132 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3133
3134 /* When the interface is not capable of RSS rings (and there is no
3135 * need to create a default RXQ) we'll still need one RXQ
3136 */
3137 if (adapter->num_rx_qs == 0)
3138 adapter->num_rx_qs = 1;
3139
3140 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3141 for_all_rx_queues(adapter, rxo, i) {
3142 rxo->adapter = adapter;
3143 cq = &rxo->cq;
3144 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
3145 sizeof(struct be_eth_rx_compl));
3146 if (rc)
3147 return rc;
3148
3149 u64_stats_init(&rxo->stats.sync);
3150 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3151 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3152 if (rc)
3153 return rc;
3154 }
3155
3156 dev_info(&adapter->pdev->dev,
3157 "created %d RX queue(s)\n", adapter->num_rx_qs);
3158 return 0;
3159 }
3160
be_intx(int irq,void * dev)3161 static irqreturn_t be_intx(int irq, void *dev)
3162 {
3163 struct be_eq_obj *eqo = dev;
3164 struct be_adapter *adapter = eqo->adapter;
3165 int num_evts = 0;
3166
3167 /* IRQ is not expected when NAPI is scheduled as the EQ
3168 * will not be armed.
3169 * But, this can happen on Lancer INTx where it takes
3170 * a while to de-assert INTx or in BE2 where occasionaly
3171 * an interrupt may be raised even when EQ is unarmed.
3172 * If NAPI is already scheduled, then counting & notifying
3173 * events will orphan them.
3174 */
3175 if (napi_schedule_prep(&eqo->napi)) {
3176 num_evts = events_get(eqo);
3177 __napi_schedule(&eqo->napi);
3178 if (num_evts)
3179 eqo->spurious_intr = 0;
3180 }
3181 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
3182
3183 /* Return IRQ_HANDLED only for the first spurious intr
3184 * after a valid intr to stop the kernel from branding
3185 * this irq as a bad one!
3186 */
3187 if (num_evts || eqo->spurious_intr++ == 0)
3188 return IRQ_HANDLED;
3189 else
3190 return IRQ_NONE;
3191 }
3192
be_msix(int irq,void * dev)3193 static irqreturn_t be_msix(int irq, void *dev)
3194 {
3195 struct be_eq_obj *eqo = dev;
3196
3197 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
3198 napi_schedule(&eqo->napi);
3199 return IRQ_HANDLED;
3200 }
3201
do_gro(struct be_rx_compl_info * rxcp)3202 static inline bool do_gro(struct be_rx_compl_info *rxcp)
3203 {
3204 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
3205 }
3206
be_process_rx(struct be_rx_obj * rxo,struct napi_struct * napi,int budget)3207 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
3208 int budget)
3209 {
3210 struct be_adapter *adapter = rxo->adapter;
3211 struct be_queue_info *rx_cq = &rxo->cq;
3212 struct be_rx_compl_info *rxcp;
3213 u32 work_done;
3214 u32 frags_consumed = 0;
3215
3216 for (work_done = 0; work_done < budget; work_done++) {
3217 rxcp = be_rx_compl_get(rxo);
3218 if (!rxcp)
3219 break;
3220
3221 /* Is it a flush compl that has no data */
3222 if (unlikely(rxcp->num_rcvd == 0))
3223 goto loop_continue;
3224
3225 /* Discard compl with partial DMA Lancer B0 */
3226 if (unlikely(!rxcp->pkt_size)) {
3227 be_rx_compl_discard(rxo, rxcp);
3228 goto loop_continue;
3229 }
3230
3231 /* On BE drop pkts that arrive due to imperfect filtering in
3232 * promiscuous mode on some skews
3233 */
3234 if (unlikely(rxcp->port != adapter->port_num &&
3235 !lancer_chip(adapter))) {
3236 be_rx_compl_discard(rxo, rxcp);
3237 goto loop_continue;
3238 }
3239
3240 if (do_gro(rxcp))
3241 be_rx_compl_process_gro(rxo, napi, rxcp);
3242 else
3243 be_rx_compl_process(rxo, napi, rxcp);
3244
3245 loop_continue:
3246 frags_consumed += rxcp->num_rcvd;
3247 be_rx_stats_update(rxo, rxcp);
3248 }
3249
3250 if (work_done) {
3251 be_cq_notify(adapter, rx_cq->id, true, work_done);
3252
3253 /* When an rx-obj gets into post_starved state, just
3254 * let be_worker do the posting.
3255 */
3256 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3257 !rxo->rx_post_starved)
3258 be_post_rx_frags(rxo, GFP_ATOMIC,
3259 max_t(u32, MAX_RX_POST,
3260 frags_consumed));
3261 }
3262
3263 return work_done;
3264 }
3265
3266
be_process_tx(struct be_adapter * adapter,struct be_tx_obj * txo,int idx)3267 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3268 int idx)
3269 {
3270 int num_wrbs = 0, work_done = 0;
3271 struct be_tx_compl_info *txcp;
3272
3273 while ((txcp = be_tx_compl_get(adapter, txo))) {
3274 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
3275 work_done++;
3276 }
3277
3278 if (work_done) {
3279 be_cq_notify(adapter, txo->cq.id, true, work_done);
3280 atomic_sub(num_wrbs, &txo->q.used);
3281
3282 /* As Tx wrbs have been freed up, wake up netdev queue
3283 * if it was stopped due to lack of tx wrbs. */
3284 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
3285 be_can_txq_wake(txo)) {
3286 netif_wake_subqueue(adapter->netdev, idx);
3287 }
3288
3289 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3290 tx_stats(txo)->tx_compl += work_done;
3291 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3292 }
3293 }
3294
be_poll(struct napi_struct * napi,int budget)3295 int be_poll(struct napi_struct *napi, int budget)
3296 {
3297 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3298 struct be_adapter *adapter = eqo->adapter;
3299 int max_work = 0, work, i, num_evts;
3300 struct be_rx_obj *rxo;
3301 struct be_tx_obj *txo;
3302 u32 mult_enc = 0;
3303
3304 num_evts = events_get(eqo);
3305
3306 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3307 be_process_tx(adapter, txo, i);
3308
3309 /* This loop will iterate twice for EQ0 in which
3310 * completions of the last RXQ (default one) are also processed
3311 * For other EQs the loop iterates only once
3312 */
3313 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3314 work = be_process_rx(rxo, napi, budget);
3315 max_work = max(work, max_work);
3316 }
3317
3318 if (is_mcc_eqo(eqo))
3319 be_process_mcc(adapter);
3320
3321 if (max_work < budget) {
3322 napi_complete_done(napi, max_work);
3323
3324 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3325 * delay via a delay multiplier encoding value
3326 */
3327 if (skyhawk_chip(adapter))
3328 mult_enc = be_get_eq_delay_mult_enc(eqo);
3329
3330 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3331 mult_enc);
3332 } else {
3333 /* As we'll continue in polling mode, count and clear events */
3334 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3335 }
3336 return max_work;
3337 }
3338
be_detect_error(struct be_adapter * adapter)3339 void be_detect_error(struct be_adapter *adapter)
3340 {
3341 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3342 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3343 struct device *dev = &adapter->pdev->dev;
3344 u16 val;
3345 u32 i;
3346
3347 if (be_check_error(adapter, BE_ERROR_HW))
3348 return;
3349
3350 if (lancer_chip(adapter)) {
3351 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3352 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3353 be_set_error(adapter, BE_ERROR_UE);
3354 sliport_err1 = ioread32(adapter->db +
3355 SLIPORT_ERROR1_OFFSET);
3356 sliport_err2 = ioread32(adapter->db +
3357 SLIPORT_ERROR2_OFFSET);
3358 /* Do not log error messages if its a FW reset */
3359 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3360 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3361 dev_info(dev, "Reset is in progress\n");
3362 } else {
3363 dev_err(dev, "Error detected in the card\n");
3364 dev_err(dev, "ERR: sliport status 0x%x\n",
3365 sliport_status);
3366 dev_err(dev, "ERR: sliport error1 0x%x\n",
3367 sliport_err1);
3368 dev_err(dev, "ERR: sliport error2 0x%x\n",
3369 sliport_err2);
3370 }
3371 }
3372 } else {
3373 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3374 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3375 ue_lo_mask = ioread32(adapter->pcicfg +
3376 PCICFG_UE_STATUS_LOW_MASK);
3377 ue_hi_mask = ioread32(adapter->pcicfg +
3378 PCICFG_UE_STATUS_HI_MASK);
3379
3380 ue_lo = (ue_lo & ~ue_lo_mask);
3381 ue_hi = (ue_hi & ~ue_hi_mask);
3382
3383 if (ue_lo || ue_hi) {
3384 /* On certain platforms BE3 hardware can indicate
3385 * spurious UEs. In case of a UE in the chip,
3386 * the POST register correctly reports either a
3387 * FAT_LOG_START state (FW is currently dumping
3388 * FAT log data) or a ARMFW_UE state. Check for the
3389 * above states to ascertain if the UE is valid or not.
3390 */
3391 if (BE3_chip(adapter)) {
3392 val = be_POST_stage_get(adapter);
3393 if ((val & POST_STAGE_FAT_LOG_START)
3394 != POST_STAGE_FAT_LOG_START &&
3395 (val & POST_STAGE_ARMFW_UE)
3396 != POST_STAGE_ARMFW_UE &&
3397 (val & POST_STAGE_RECOVERABLE_ERR)
3398 != POST_STAGE_RECOVERABLE_ERR)
3399 return;
3400 }
3401
3402 dev_err(dev, "Error detected in the adapter");
3403 be_set_error(adapter, BE_ERROR_UE);
3404
3405 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3406 if (ue_lo & 1)
3407 dev_err(dev, "UE: %s bit set\n",
3408 ue_status_low_desc[i]);
3409 }
3410 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3411 if (ue_hi & 1)
3412 dev_err(dev, "UE: %s bit set\n",
3413 ue_status_hi_desc[i]);
3414 }
3415 }
3416 }
3417 }
3418
be_msix_disable(struct be_adapter * adapter)3419 static void be_msix_disable(struct be_adapter *adapter)
3420 {
3421 if (msix_enabled(adapter)) {
3422 pci_disable_msix(adapter->pdev);
3423 adapter->num_msix_vec = 0;
3424 adapter->num_msix_roce_vec = 0;
3425 }
3426 }
3427
be_msix_enable(struct be_adapter * adapter)3428 static int be_msix_enable(struct be_adapter *adapter)
3429 {
3430 unsigned int i, max_roce_eqs;
3431 struct device *dev = &adapter->pdev->dev;
3432 int num_vec;
3433
3434 /* If RoCE is supported, program the max number of vectors that
3435 * could be used for NIC and RoCE, else, just program the number
3436 * we'll use initially.
3437 */
3438 if (be_roce_supported(adapter)) {
3439 max_roce_eqs =
3440 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3441 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3442 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3443 } else {
3444 num_vec = max(adapter->cfg_num_rx_irqs,
3445 adapter->cfg_num_tx_irqs);
3446 }
3447
3448 for (i = 0; i < num_vec; i++)
3449 adapter->msix_entries[i].entry = i;
3450
3451 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3452 MIN_MSIX_VECTORS, num_vec);
3453 if (num_vec < 0)
3454 goto fail;
3455
3456 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3457 adapter->num_msix_roce_vec = num_vec / 2;
3458 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3459 adapter->num_msix_roce_vec);
3460 }
3461
3462 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3463
3464 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3465 adapter->num_msix_vec);
3466 return 0;
3467
3468 fail:
3469 dev_warn(dev, "MSIx enable failed\n");
3470
3471 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3472 if (be_virtfn(adapter))
3473 return num_vec;
3474 return 0;
3475 }
3476
be_msix_vec_get(struct be_adapter * adapter,struct be_eq_obj * eqo)3477 static inline int be_msix_vec_get(struct be_adapter *adapter,
3478 struct be_eq_obj *eqo)
3479 {
3480 return adapter->msix_entries[eqo->msix_idx].vector;
3481 }
3482
be_msix_register(struct be_adapter * adapter)3483 static int be_msix_register(struct be_adapter *adapter)
3484 {
3485 struct net_device *netdev = adapter->netdev;
3486 struct be_eq_obj *eqo;
3487 int status, i, vec;
3488
3489 for_all_evt_queues(adapter, eqo, i) {
3490 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3491 vec = be_msix_vec_get(adapter, eqo);
3492 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3493 if (status)
3494 goto err_msix;
3495
3496 irq_update_affinity_hint(vec, eqo->affinity_mask);
3497 }
3498
3499 return 0;
3500 err_msix:
3501 for (i--; i >= 0; i--) {
3502 eqo = &adapter->eq_obj[i];
3503 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3504 }
3505 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3506 status);
3507 be_msix_disable(adapter);
3508 return status;
3509 }
3510
be_irq_register(struct be_adapter * adapter)3511 static int be_irq_register(struct be_adapter *adapter)
3512 {
3513 struct net_device *netdev = adapter->netdev;
3514 int status;
3515
3516 if (msix_enabled(adapter)) {
3517 status = be_msix_register(adapter);
3518 if (status == 0)
3519 goto done;
3520 /* INTx is not supported for VF */
3521 if (be_virtfn(adapter))
3522 return status;
3523 }
3524
3525 /* INTx: only the first EQ is used */
3526 netdev->irq = adapter->pdev->irq;
3527 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3528 &adapter->eq_obj[0]);
3529 if (status) {
3530 dev_err(&adapter->pdev->dev,
3531 "INTx request IRQ failed - err %d\n", status);
3532 return status;
3533 }
3534 done:
3535 adapter->isr_registered = true;
3536 return 0;
3537 }
3538
be_irq_unregister(struct be_adapter * adapter)3539 static void be_irq_unregister(struct be_adapter *adapter)
3540 {
3541 struct net_device *netdev = adapter->netdev;
3542 struct be_eq_obj *eqo;
3543 int i, vec;
3544
3545 if (!adapter->isr_registered)
3546 return;
3547
3548 /* INTx */
3549 if (!msix_enabled(adapter)) {
3550 free_irq(netdev->irq, &adapter->eq_obj[0]);
3551 goto done;
3552 }
3553
3554 /* MSIx */
3555 for_all_evt_queues(adapter, eqo, i) {
3556 vec = be_msix_vec_get(adapter, eqo);
3557 irq_update_affinity_hint(vec, NULL);
3558 free_irq(vec, eqo);
3559 }
3560
3561 done:
3562 adapter->isr_registered = false;
3563 }
3564
be_rx_qs_destroy(struct be_adapter * adapter)3565 static void be_rx_qs_destroy(struct be_adapter *adapter)
3566 {
3567 struct rss_info *rss = &adapter->rss_info;
3568 struct be_queue_info *q;
3569 struct be_rx_obj *rxo;
3570 int i;
3571
3572 for_all_rx_queues(adapter, rxo, i) {
3573 q = &rxo->q;
3574 if (q->created) {
3575 /* If RXQs are destroyed while in an "out of buffer"
3576 * state, there is a possibility of an HW stall on
3577 * Lancer. So, post 64 buffers to each queue to relieve
3578 * the "out of buffer" condition.
3579 * Make sure there's space in the RXQ before posting.
3580 */
3581 if (lancer_chip(adapter)) {
3582 be_rx_cq_clean(rxo);
3583 if (atomic_read(&q->used) == 0)
3584 be_post_rx_frags(rxo, GFP_KERNEL,
3585 MAX_RX_POST);
3586 }
3587
3588 be_cmd_rxq_destroy(adapter, q);
3589 be_rx_cq_clean(rxo);
3590 be_rxq_clean(rxo);
3591 }
3592 be_queue_free(adapter, q);
3593 }
3594
3595 if (rss->rss_flags) {
3596 rss->rss_flags = RSS_ENABLE_NONE;
3597 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3598 128, rss->rss_hkey);
3599 }
3600 }
3601
be_disable_if_filters(struct be_adapter * adapter)3602 static void be_disable_if_filters(struct be_adapter *adapter)
3603 {
3604 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3605 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3606 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3607 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3608 eth_zero_addr(adapter->dev_mac);
3609 }
3610
3611 be_clear_uc_list(adapter);
3612 be_clear_mc_list(adapter);
3613
3614 /* The IFACE flags are enabled in the open path and cleared
3615 * in the close path. When a VF gets detached from the host and
3616 * assigned to a VM the following happens:
3617 * - VF's IFACE flags get cleared in the detach path
3618 * - IFACE create is issued by the VF in the attach path
3619 * Due to a bug in the BE3/Skyhawk-R FW
3620 * (Lancer FW doesn't have the bug), the IFACE capability flags
3621 * specified along with the IFACE create cmd issued by a VF are not
3622 * honoured by FW. As a consequence, if a *new* driver
3623 * (that enables/disables IFACE flags in open/close)
3624 * is loaded in the host and an *old* driver is * used by a VM/VF,
3625 * the IFACE gets created *without* the needed flags.
3626 * To avoid this, disable RX-filter flags only for Lancer.
3627 */
3628 if (lancer_chip(adapter)) {
3629 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3630 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3631 }
3632 }
3633
be_close(struct net_device * netdev)3634 static int be_close(struct net_device *netdev)
3635 {
3636 struct be_adapter *adapter = netdev_priv(netdev);
3637 struct be_eq_obj *eqo;
3638 int i;
3639
3640 /* This protection is needed as be_close() may be called even when the
3641 * adapter is in cleared state (after eeh perm failure)
3642 */
3643 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3644 return 0;
3645
3646 /* Before attempting cleanup ensure all the pending cmds in the
3647 * config_wq have finished execution
3648 */
3649 flush_workqueue(be_wq);
3650
3651 be_disable_if_filters(adapter);
3652
3653 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3654 for_all_evt_queues(adapter, eqo, i) {
3655 napi_disable(&eqo->napi);
3656 }
3657 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3658 }
3659
3660 be_async_mcc_disable(adapter);
3661
3662 /* Wait for all pending tx completions to arrive so that
3663 * all tx skbs are freed.
3664 */
3665 netif_tx_disable(netdev);
3666 be_tx_compl_clean(adapter);
3667
3668 be_rx_qs_destroy(adapter);
3669
3670 for_all_evt_queues(adapter, eqo, i) {
3671 if (msix_enabled(adapter))
3672 synchronize_irq(be_msix_vec_get(adapter, eqo));
3673 else
3674 synchronize_irq(netdev->irq);
3675 be_eq_clean(eqo);
3676 }
3677
3678 be_irq_unregister(adapter);
3679
3680 return 0;
3681 }
3682
be_rx_qs_create(struct be_adapter * adapter)3683 static int be_rx_qs_create(struct be_adapter *adapter)
3684 {
3685 struct rss_info *rss = &adapter->rss_info;
3686 u8 rss_key[RSS_HASH_KEY_LEN];
3687 struct be_rx_obj *rxo;
3688 int rc, i, j;
3689
3690 for_all_rx_queues(adapter, rxo, i) {
3691 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3692 sizeof(struct be_eth_rx_d));
3693 if (rc)
3694 return rc;
3695 }
3696
3697 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3698 rxo = default_rxo(adapter);
3699 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3700 rx_frag_size, adapter->if_handle,
3701 false, &rxo->rss_id);
3702 if (rc)
3703 return rc;
3704 }
3705
3706 for_all_rss_queues(adapter, rxo, i) {
3707 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3708 rx_frag_size, adapter->if_handle,
3709 true, &rxo->rss_id);
3710 if (rc)
3711 return rc;
3712 }
3713
3714 if (be_multi_rxq(adapter)) {
3715 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3716 for_all_rss_queues(adapter, rxo, i) {
3717 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3718 break;
3719 rss->rsstable[j + i] = rxo->rss_id;
3720 rss->rss_queue[j + i] = i;
3721 }
3722 }
3723 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3724 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3725
3726 if (!BEx_chip(adapter))
3727 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3728 RSS_ENABLE_UDP_IPV6;
3729
3730 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3731 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3732 RSS_INDIR_TABLE_LEN, rss_key);
3733 if (rc) {
3734 rss->rss_flags = RSS_ENABLE_NONE;
3735 return rc;
3736 }
3737
3738 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3739 } else {
3740 /* Disable RSS, if only default RX Q is created */
3741 rss->rss_flags = RSS_ENABLE_NONE;
3742 }
3743
3744
3745 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3746 * which is a queue empty condition
3747 */
3748 for_all_rx_queues(adapter, rxo, i)
3749 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3750
3751 return 0;
3752 }
3753
be_enable_if_filters(struct be_adapter * adapter)3754 static int be_enable_if_filters(struct be_adapter *adapter)
3755 {
3756 int status;
3757
3758 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3759 if (status)
3760 return status;
3761
3762 /* Normally this condition usually true as the ->dev_mac is zeroed.
3763 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3764 * subsequent be_dev_mac_add() can fail (after fresh boot)
3765 */
3766 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3767 int old_pmac_id = -1;
3768
3769 /* Remember old programmed MAC if any - can happen on BE3 VF */
3770 if (!is_zero_ether_addr(adapter->dev_mac))
3771 old_pmac_id = adapter->pmac_id[0];
3772
3773 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3774 if (status)
3775 return status;
3776
3777 /* Delete the old programmed MAC as we successfully programmed
3778 * a new MAC
3779 */
3780 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3781 be_dev_mac_del(adapter, old_pmac_id);
3782
3783 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3784 }
3785
3786 if (adapter->vlans_added)
3787 be_vid_config(adapter);
3788
3789 __be_set_rx_mode(adapter);
3790
3791 return 0;
3792 }
3793
be_open(struct net_device * netdev)3794 static int be_open(struct net_device *netdev)
3795 {
3796 struct be_adapter *adapter = netdev_priv(netdev);
3797 struct be_eq_obj *eqo;
3798 struct be_rx_obj *rxo;
3799 struct be_tx_obj *txo;
3800 u8 link_status;
3801 int status, i;
3802
3803 status = be_rx_qs_create(adapter);
3804 if (status)
3805 goto err;
3806
3807 status = be_enable_if_filters(adapter);
3808 if (status)
3809 goto err;
3810
3811 status = be_irq_register(adapter);
3812 if (status)
3813 goto err;
3814
3815 for_all_rx_queues(adapter, rxo, i)
3816 be_cq_notify(adapter, rxo->cq.id, true, 0);
3817
3818 for_all_tx_queues(adapter, txo, i)
3819 be_cq_notify(adapter, txo->cq.id, true, 0);
3820
3821 be_async_mcc_enable(adapter);
3822
3823 for_all_evt_queues(adapter, eqo, i) {
3824 napi_enable(&eqo->napi);
3825 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3826 }
3827 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3828
3829 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3830 if (!status)
3831 be_link_status_update(adapter, link_status);
3832
3833 netif_tx_start_all_queues(netdev);
3834
3835 udp_tunnel_nic_reset_ntf(netdev);
3836
3837 return 0;
3838 err:
3839 be_close(adapter->netdev);
3840 return -EIO;
3841 }
3842
be_vf_eth_addr_generate(struct be_adapter * adapter,u8 * mac)3843 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3844 {
3845 u32 addr;
3846
3847 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3848
3849 mac[5] = (u8)(addr & 0xFF);
3850 mac[4] = (u8)((addr >> 8) & 0xFF);
3851 mac[3] = (u8)((addr >> 16) & 0xFF);
3852 /* Use the OUI from the current MAC address */
3853 memcpy(mac, adapter->netdev->dev_addr, 3);
3854 }
3855
3856 /*
3857 * Generate a seed MAC address from the PF MAC Address using jhash.
3858 * MAC Address for VFs are assigned incrementally starting from the seed.
3859 * These addresses are programmed in the ASIC by the PF and the VF driver
3860 * queries for the MAC address during its probe.
3861 */
be_vf_eth_addr_config(struct be_adapter * adapter)3862 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3863 {
3864 u32 vf;
3865 int status = 0;
3866 u8 mac[ETH_ALEN];
3867 struct be_vf_cfg *vf_cfg;
3868
3869 be_vf_eth_addr_generate(adapter, mac);
3870
3871 for_all_vfs(adapter, vf_cfg, vf) {
3872 if (BEx_chip(adapter))
3873 status = be_cmd_pmac_add(adapter, mac,
3874 vf_cfg->if_handle,
3875 &vf_cfg->pmac_id, vf + 1);
3876 else
3877 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3878 vf + 1);
3879
3880 if (status)
3881 dev_err(&adapter->pdev->dev,
3882 "Mac address assignment failed for VF %d\n",
3883 vf);
3884 else
3885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3886
3887 mac[5] += 1;
3888 }
3889 return status;
3890 }
3891
be_vfs_mac_query(struct be_adapter * adapter)3892 static int be_vfs_mac_query(struct be_adapter *adapter)
3893 {
3894 int status, vf;
3895 u8 mac[ETH_ALEN];
3896 struct be_vf_cfg *vf_cfg;
3897
3898 for_all_vfs(adapter, vf_cfg, vf) {
3899 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3900 mac, vf_cfg->if_handle,
3901 false, vf+1);
3902 if (status)
3903 return status;
3904 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3905 }
3906 return 0;
3907 }
3908
be_vf_clear(struct be_adapter * adapter)3909 static void be_vf_clear(struct be_adapter *adapter)
3910 {
3911 struct be_vf_cfg *vf_cfg;
3912 u32 vf;
3913
3914 if (pci_vfs_assigned(adapter->pdev)) {
3915 dev_warn(&adapter->pdev->dev,
3916 "VFs are assigned to VMs: not disabling VFs\n");
3917 goto done;
3918 }
3919
3920 pci_disable_sriov(adapter->pdev);
3921
3922 for_all_vfs(adapter, vf_cfg, vf) {
3923 if (BEx_chip(adapter))
3924 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3925 vf_cfg->pmac_id, vf + 1);
3926 else
3927 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3928 vf + 1);
3929
3930 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3931 }
3932
3933 if (BE3_chip(adapter))
3934 be_cmd_set_hsw_config(adapter, 0, 0,
3935 adapter->if_handle,
3936 PORT_FWD_TYPE_PASSTHRU, 0);
3937 done:
3938 kfree(adapter->vf_cfg);
3939 adapter->num_vfs = 0;
3940 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3941 }
3942
be_clear_queues(struct be_adapter * adapter)3943 static void be_clear_queues(struct be_adapter *adapter)
3944 {
3945 be_mcc_queues_destroy(adapter);
3946 be_rx_cqs_destroy(adapter);
3947 be_tx_queues_destroy(adapter);
3948 be_evt_queues_destroy(adapter);
3949 }
3950
be_cancel_worker(struct be_adapter * adapter)3951 static void be_cancel_worker(struct be_adapter *adapter)
3952 {
3953 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3954 cancel_delayed_work_sync(&adapter->work);
3955 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3956 }
3957 }
3958
be_cancel_err_detection(struct be_adapter * adapter)3959 static void be_cancel_err_detection(struct be_adapter *adapter)
3960 {
3961 struct be_error_recovery *err_rec = &adapter->error_recovery;
3962
3963 if (!be_err_recovery_workq)
3964 return;
3965
3966 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3967 cancel_delayed_work_sync(&err_rec->err_detection_work);
3968 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3969 }
3970 }
3971
3972 /* VxLAN offload Notes:
3973 *
3974 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
3975 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
3976 * is expected to work across all types of IP tunnels once exported. Skyhawk
3977 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
3978 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
3979 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
3980 * those other tunnels are unexported on the fly through ndo_features_check().
3981 */
be_vxlan_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)3982 static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
3983 unsigned int entry, struct udp_tunnel_info *ti)
3984 {
3985 struct be_adapter *adapter = netdev_priv(netdev);
3986 struct device *dev = &adapter->pdev->dev;
3987 int status;
3988
3989 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3990 OP_CONVERT_NORMAL_TO_TUNNEL);
3991 if (status) {
3992 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3993 return status;
3994 }
3995 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3996
3997 status = be_cmd_set_vxlan_port(adapter, ti->port);
3998 if (status) {
3999 dev_warn(dev, "Failed to add VxLAN port\n");
4000 return status;
4001 }
4002 adapter->vxlan_port = ti->port;
4003
4004 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4005 NETIF_F_TSO | NETIF_F_TSO6 |
4006 NETIF_F_GSO_UDP_TUNNEL;
4007
4008 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4009 be16_to_cpu(ti->port));
4010 return 0;
4011 }
4012
be_vxlan_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)4013 static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4014 unsigned int entry, struct udp_tunnel_info *ti)
4015 {
4016 struct be_adapter *adapter = netdev_priv(netdev);
4017
4018 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4019 be_cmd_manage_iface(adapter, adapter->if_handle,
4020 OP_CONVERT_TUNNEL_TO_NORMAL);
4021
4022 if (adapter->vxlan_port)
4023 be_cmd_set_vxlan_port(adapter, 0);
4024
4025 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4026 adapter->vxlan_port = 0;
4027
4028 netdev->hw_enc_features = 0;
4029 return 0;
4030 }
4031
4032 static const struct udp_tunnel_nic_info be_udp_tunnels = {
4033 .set_port = be_vxlan_set_port,
4034 .unset_port = be_vxlan_unset_port,
4035 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
4036 .tables = {
4037 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
4038 },
4039 };
4040
be_calculate_vf_res(struct be_adapter * adapter,u16 num_vfs,struct be_resources * vft_res)4041 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4042 struct be_resources *vft_res)
4043 {
4044 struct be_resources res = adapter->pool_res;
4045 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4046 struct be_resources res_mod = {0};
4047 u16 num_vf_qs = 1;
4048
4049 /* Distribute the queue resources among the PF and it's VFs */
4050 if (num_vfs) {
4051 /* Divide the rx queues evenly among the VFs and the PF, capped
4052 * at VF-EQ-count. Any remainder queues belong to the PF.
4053 */
4054 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4055 res.max_rss_qs / (num_vfs + 1));
4056
4057 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4058 * RSS Tables per port. Provide RSS on VFs, only if number of
4059 * VFs requested is less than it's PF Pool's RSS Tables limit.
4060 */
4061 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
4062 num_vf_qs = 1;
4063 }
4064
4065 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4066 * which are modifiable using SET_PROFILE_CONFIG cmd.
4067 */
4068 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4069 RESOURCE_MODIFIABLE, 0);
4070
4071 /* If RSS IFACE capability flags are modifiable for a VF, set the
4072 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4073 * more than 1 RSSQ is available for a VF.
4074 * Otherwise, provision only 1 queue pair for VF.
4075 */
4076 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4077 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4078 if (num_vf_qs > 1) {
4079 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4080 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4081 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4082 } else {
4083 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4084 BE_IF_FLAGS_DEFQ_RSS);
4085 }
4086 } else {
4087 num_vf_qs = 1;
4088 }
4089
4090 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4091 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4092 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4093 }
4094
4095 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4096 vft_res->max_rx_qs = num_vf_qs;
4097 vft_res->max_rss_qs = num_vf_qs;
4098 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4099 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4100
4101 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4102 * among the PF and it's VFs, if the fields are changeable
4103 */
4104 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4105 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4106
4107 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4108 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4109
4110 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4111 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4112
4113 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4114 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
4115 }
4116
be_if_destroy(struct be_adapter * adapter)4117 static void be_if_destroy(struct be_adapter *adapter)
4118 {
4119 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4120
4121 kfree(adapter->pmac_id);
4122 adapter->pmac_id = NULL;
4123
4124 kfree(adapter->mc_list);
4125 adapter->mc_list = NULL;
4126
4127 kfree(adapter->uc_list);
4128 adapter->uc_list = NULL;
4129 }
4130
be_clear(struct be_adapter * adapter)4131 static int be_clear(struct be_adapter *adapter)
4132 {
4133 struct pci_dev *pdev = adapter->pdev;
4134 struct be_resources vft_res = {0};
4135
4136 be_cancel_worker(adapter);
4137
4138 flush_workqueue(be_wq);
4139
4140 if (sriov_enabled(adapter))
4141 be_vf_clear(adapter);
4142
4143 /* Re-configure FW to distribute resources evenly across max-supported
4144 * number of VFs, only when VFs are not already enabled.
4145 */
4146 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4147 !pci_vfs_assigned(pdev)) {
4148 be_calculate_vf_res(adapter,
4149 pci_sriov_get_totalvfs(pdev),
4150 &vft_res);
4151 be_cmd_set_sriov_config(adapter, adapter->pool_res,
4152 pci_sriov_get_totalvfs(pdev),
4153 &vft_res);
4154 }
4155
4156 be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
4157
4158 be_if_destroy(adapter);
4159
4160 be_clear_queues(adapter);
4161
4162 be_msix_disable(adapter);
4163 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
4164 return 0;
4165 }
4166
be_vfs_if_create(struct be_adapter * adapter)4167 static int be_vfs_if_create(struct be_adapter *adapter)
4168 {
4169 struct be_resources res = {0};
4170 u32 cap_flags, en_flags, vf;
4171 struct be_vf_cfg *vf_cfg;
4172 int status;
4173
4174 /* If a FW profile exists, then cap_flags are updated */
4175 cap_flags = BE_VF_IF_EN_FLAGS;
4176
4177 for_all_vfs(adapter, vf_cfg, vf) {
4178 if (!BE3_chip(adapter)) {
4179 status = be_cmd_get_profile_config(adapter, &res, NULL,
4180 ACTIVE_PROFILE_TYPE,
4181 RESOURCE_LIMITS,
4182 vf + 1);
4183 if (!status) {
4184 cap_flags = res.if_cap_flags;
4185 /* Prevent VFs from enabling VLAN promiscuous
4186 * mode
4187 */
4188 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4189 }
4190 }
4191
4192 /* PF should enable IF flags during proxy if_create call */
4193 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
4194 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4195 &vf_cfg->if_handle, vf + 1);
4196 if (status)
4197 return status;
4198 }
4199
4200 return 0;
4201 }
4202
be_vf_setup_init(struct be_adapter * adapter)4203 static int be_vf_setup_init(struct be_adapter *adapter)
4204 {
4205 struct be_vf_cfg *vf_cfg;
4206 int vf;
4207
4208 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4209 GFP_KERNEL);
4210 if (!adapter->vf_cfg)
4211 return -ENOMEM;
4212
4213 for_all_vfs(adapter, vf_cfg, vf) {
4214 vf_cfg->if_handle = -1;
4215 vf_cfg->pmac_id = -1;
4216 }
4217 return 0;
4218 }
4219
be_vf_setup(struct be_adapter * adapter)4220 static int be_vf_setup(struct be_adapter *adapter)
4221 {
4222 struct device *dev = &adapter->pdev->dev;
4223 struct be_vf_cfg *vf_cfg;
4224 int status, old_vfs, vf;
4225 bool spoofchk;
4226
4227 old_vfs = pci_num_vf(adapter->pdev);
4228
4229 status = be_vf_setup_init(adapter);
4230 if (status)
4231 goto err;
4232
4233 if (old_vfs) {
4234 for_all_vfs(adapter, vf_cfg, vf) {
4235 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4236 if (status)
4237 goto err;
4238 }
4239
4240 status = be_vfs_mac_query(adapter);
4241 if (status)
4242 goto err;
4243 } else {
4244 status = be_vfs_if_create(adapter);
4245 if (status)
4246 goto err;
4247
4248 status = be_vf_eth_addr_config(adapter);
4249 if (status)
4250 goto err;
4251 }
4252
4253 for_all_vfs(adapter, vf_cfg, vf) {
4254 /* Allow VFs to programs MAC/VLAN filters */
4255 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4256 vf + 1);
4257 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
4258 status = be_cmd_set_fn_privileges(adapter,
4259 vf_cfg->privileges |
4260 BE_PRIV_FILTMGMT,
4261 vf + 1);
4262 if (!status) {
4263 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
4264 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4265 vf);
4266 }
4267 }
4268
4269 /* Allow full available bandwidth */
4270 if (!old_vfs)
4271 be_cmd_config_qos(adapter, 0, 0, vf + 1);
4272
4273 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4274 vf_cfg->if_handle, NULL,
4275 &spoofchk);
4276 if (!status)
4277 vf_cfg->spoofchk = spoofchk;
4278
4279 if (!old_vfs) {
4280 be_cmd_enable_vf(adapter, vf + 1);
4281 be_cmd_set_logical_link_config(adapter,
4282 IFLA_VF_LINK_STATE_AUTO,
4283 vf+1);
4284 }
4285 }
4286
4287 if (!old_vfs) {
4288 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4289 if (status) {
4290 dev_err(dev, "SRIOV enable failed\n");
4291 adapter->num_vfs = 0;
4292 goto err;
4293 }
4294 }
4295
4296 if (BE3_chip(adapter)) {
4297 /* On BE3, enable VEB only when SRIOV is enabled */
4298 status = be_cmd_set_hsw_config(adapter, 0, 0,
4299 adapter->if_handle,
4300 PORT_FWD_TYPE_VEB, 0);
4301 if (status)
4302 goto err;
4303 }
4304
4305 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
4306 return 0;
4307 err:
4308 dev_err(dev, "VF setup failed\n");
4309 be_vf_clear(adapter);
4310 return status;
4311 }
4312
4313 /* Converting function_mode bits on BE3 to SH mc_type enums */
4314
be_convert_mc_type(u32 function_mode)4315 static u8 be_convert_mc_type(u32 function_mode)
4316 {
4317 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
4318 return vNIC1;
4319 else if (function_mode & QNQ_MODE)
4320 return FLEX10;
4321 else if (function_mode & VNIC_MODE)
4322 return vNIC2;
4323 else if (function_mode & UMC_ENABLED)
4324 return UMC;
4325 else
4326 return MC_NONE;
4327 }
4328
4329 /* On BE2/BE3 FW does not suggest the supported limits */
BEx_get_resources(struct be_adapter * adapter,struct be_resources * res)4330 static void BEx_get_resources(struct be_adapter *adapter,
4331 struct be_resources *res)
4332 {
4333 bool use_sriov = adapter->num_vfs ? 1 : 0;
4334
4335 if (be_physfn(adapter))
4336 res->max_uc_mac = BE_UC_PMAC_COUNT;
4337 else
4338 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4339
4340 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4341
4342 if (be_is_mc(adapter)) {
4343 /* Assuming that there are 4 channels per port,
4344 * when multi-channel is enabled
4345 */
4346 if (be_is_qnq_mode(adapter))
4347 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4348 else
4349 /* In a non-qnq multichannel mode, the pvid
4350 * takes up one vlan entry
4351 */
4352 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4353 } else {
4354 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4355 }
4356
4357 res->max_mcast_mac = BE_MAX_MC;
4358
4359 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4360 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4361 * *only* if it is RSS-capable.
4362 */
4363 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
4364 be_virtfn(adapter) ||
4365 (be_is_mc(adapter) &&
4366 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4367 res->max_tx_qs = 1;
4368 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4369 struct be_resources super_nic_res = {0};
4370
4371 /* On a SuperNIC profile, the driver needs to use the
4372 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4373 */
4374 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4375 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4376 0);
4377 /* Some old versions of BE3 FW don't report max_tx_qs value */
4378 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4379 } else {
4380 res->max_tx_qs = BE3_MAX_TX_QS;
4381 }
4382
4383 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4384 !use_sriov && be_physfn(adapter))
4385 res->max_rss_qs = (adapter->be3_native) ?
4386 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4387 res->max_rx_qs = res->max_rss_qs + 1;
4388
4389 if (be_physfn(adapter))
4390 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4391 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4392 else
4393 res->max_evt_qs = 1;
4394
4395 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4396 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4397 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4398 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4399 }
4400
be_setup_init(struct be_adapter * adapter)4401 static void be_setup_init(struct be_adapter *adapter)
4402 {
4403 adapter->vlan_prio_bmap = 0xff;
4404 adapter->phy.link_speed = -1;
4405 adapter->if_handle = -1;
4406 adapter->be3_native = false;
4407 adapter->if_flags = 0;
4408 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
4409 if (be_physfn(adapter))
4410 adapter->cmd_privileges = MAX_PRIVILEGES;
4411 else
4412 adapter->cmd_privileges = MIN_PRIVILEGES;
4413 }
4414
4415 /* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4416 * However, this HW limitation is not exposed to the host via any SLI cmd.
4417 * As a result, in the case of SRIOV and in particular multi-partition configs
4418 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4419 * for distribution between the VFs. This self-imposed limit will determine the
4420 * no: of VFs for which RSS can be enabled.
4421 */
be_calculate_pf_pool_rss_tables(struct be_adapter * adapter)4422 static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4423 {
4424 struct be_port_resources port_res = {0};
4425 u8 rss_tables_on_port;
4426 u16 max_vfs = be_max_vfs(adapter);
4427
4428 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4429 RESOURCE_LIMITS, 0);
4430
4431 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4432
4433 /* Each PF Pool's RSS Tables limit =
4434 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4435 */
4436 adapter->pool_res.max_rss_tables =
4437 max_vfs * rss_tables_on_port / port_res.max_vfs;
4438 }
4439
be_get_sriov_config(struct be_adapter * adapter)4440 static int be_get_sriov_config(struct be_adapter *adapter)
4441 {
4442 struct be_resources res = {0};
4443 int max_vfs, old_vfs;
4444
4445 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4446 RESOURCE_LIMITS, 0);
4447
4448 /* Some old versions of BE3 FW don't report max_vfs value */
4449 if (BE3_chip(adapter) && !res.max_vfs) {
4450 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4451 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4452 }
4453
4454 adapter->pool_res = res;
4455
4456 /* If during previous unload of the driver, the VFs were not disabled,
4457 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4458 * Instead use the TotalVFs value stored in the pci-dev struct.
4459 */
4460 old_vfs = pci_num_vf(adapter->pdev);
4461 if (old_vfs) {
4462 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4463 old_vfs);
4464
4465 adapter->pool_res.max_vfs =
4466 pci_sriov_get_totalvfs(adapter->pdev);
4467 adapter->num_vfs = old_vfs;
4468 }
4469
4470 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4471 be_calculate_pf_pool_rss_tables(adapter);
4472 dev_info(&adapter->pdev->dev,
4473 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4474 be_max_pf_pool_rss_tables(adapter));
4475 }
4476 return 0;
4477 }
4478
be_alloc_sriov_res(struct be_adapter * adapter)4479 static void be_alloc_sriov_res(struct be_adapter *adapter)
4480 {
4481 int old_vfs = pci_num_vf(adapter->pdev);
4482 struct be_resources vft_res = {0};
4483 int status;
4484
4485 be_get_sriov_config(adapter);
4486
4487 if (!old_vfs)
4488 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4489
4490 /* When the HW is in SRIOV capable configuration, the PF-pool
4491 * resources are given to PF during driver load, if there are no
4492 * old VFs. This facility is not available in BE3 FW.
4493 * Also, this is done by FW in Lancer chip.
4494 */
4495 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4496 be_calculate_vf_res(adapter, 0, &vft_res);
4497 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4498 &vft_res);
4499 if (status)
4500 dev_err(&adapter->pdev->dev,
4501 "Failed to optimize SRIOV resources\n");
4502 }
4503 }
4504
be_get_resources(struct be_adapter * adapter)4505 static int be_get_resources(struct be_adapter *adapter)
4506 {
4507 struct device *dev = &adapter->pdev->dev;
4508 struct be_resources res = {0};
4509 int status;
4510
4511 /* For Lancer, SH etc read per-function resource limits from FW.
4512 * GET_FUNC_CONFIG returns per function guaranteed limits.
4513 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4514 */
4515 if (BEx_chip(adapter)) {
4516 BEx_get_resources(adapter, &res);
4517 } else {
4518 status = be_cmd_get_func_config(adapter, &res);
4519 if (status)
4520 return status;
4521
4522 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4523 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4524 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4525 res.max_rss_qs -= 1;
4526 }
4527
4528 /* If RoCE is supported stash away half the EQs for RoCE */
4529 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4530 res.max_evt_qs / 2 : res.max_evt_qs;
4531 adapter->res = res;
4532
4533 /* If FW supports RSS default queue, then skip creating non-RSS
4534 * queue for non-IP traffic.
4535 */
4536 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4537 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4538
4539 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4540 be_max_txqs(adapter), be_max_rxqs(adapter),
4541 be_max_rss(adapter), be_max_nic_eqs(adapter),
4542 be_max_vfs(adapter));
4543 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4544 be_max_uc(adapter), be_max_mc(adapter),
4545 be_max_vlans(adapter));
4546
4547 /* Ensure RX and TX queues are created in pairs at init time */
4548 adapter->cfg_num_rx_irqs =
4549 min_t(u16, netif_get_num_default_rss_queues(),
4550 be_max_qp_irqs(adapter));
4551 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
4552 return 0;
4553 }
4554
be_get_config(struct be_adapter * adapter)4555 static int be_get_config(struct be_adapter *adapter)
4556 {
4557 int status, level;
4558 u16 profile_id;
4559
4560 status = be_cmd_get_cntl_attributes(adapter);
4561 if (status)
4562 return status;
4563
4564 status = be_cmd_query_fw_cfg(adapter);
4565 if (status)
4566 return status;
4567
4568 if (!lancer_chip(adapter) && be_physfn(adapter))
4569 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4570
4571 if (BEx_chip(adapter)) {
4572 level = be_cmd_get_fw_log_level(adapter);
4573 adapter->msg_enable =
4574 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4575 }
4576
4577 be_cmd_get_acpi_wol_cap(adapter);
4578 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4579 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
4580
4581 be_cmd_query_port_name(adapter);
4582
4583 if (be_physfn(adapter)) {
4584 status = be_cmd_get_active_profile(adapter, &profile_id);
4585 if (!status)
4586 dev_info(&adapter->pdev->dev,
4587 "Using profile 0x%x\n", profile_id);
4588 }
4589
4590 return 0;
4591 }
4592
be_mac_setup(struct be_adapter * adapter)4593 static int be_mac_setup(struct be_adapter *adapter)
4594 {
4595 u8 mac[ETH_ALEN];
4596 int status;
4597
4598 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4599 status = be_cmd_get_perm_mac(adapter, mac);
4600 if (status)
4601 return status;
4602
4603 eth_hw_addr_set(adapter->netdev, mac);
4604 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4605
4606 /* Initial MAC for BE3 VFs is already programmed by PF */
4607 if (BEx_chip(adapter) && be_virtfn(adapter))
4608 memcpy(adapter->dev_mac, mac, ETH_ALEN);
4609 }
4610
4611 return 0;
4612 }
4613
be_schedule_worker(struct be_adapter * adapter)4614 static void be_schedule_worker(struct be_adapter *adapter)
4615 {
4616 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
4617 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4618 }
4619
be_destroy_err_recovery_workq(void)4620 static void be_destroy_err_recovery_workq(void)
4621 {
4622 if (!be_err_recovery_workq)
4623 return;
4624
4625 destroy_workqueue(be_err_recovery_workq);
4626 be_err_recovery_workq = NULL;
4627 }
4628
be_schedule_err_detection(struct be_adapter * adapter,u32 delay)4629 static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
4630 {
4631 struct be_error_recovery *err_rec = &adapter->error_recovery;
4632
4633 if (!be_err_recovery_workq)
4634 return;
4635
4636 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4637 msecs_to_jiffies(delay));
4638 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4639 }
4640
be_setup_queues(struct be_adapter * adapter)4641 static int be_setup_queues(struct be_adapter *adapter)
4642 {
4643 struct net_device *netdev = adapter->netdev;
4644 int status;
4645
4646 status = be_evt_queues_create(adapter);
4647 if (status)
4648 goto err;
4649
4650 status = be_tx_qs_create(adapter);
4651 if (status)
4652 goto err;
4653
4654 status = be_rx_cqs_create(adapter);
4655 if (status)
4656 goto err;
4657
4658 status = be_mcc_queues_create(adapter);
4659 if (status)
4660 goto err;
4661
4662 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4663 if (status)
4664 goto err;
4665
4666 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4667 if (status)
4668 goto err;
4669
4670 return 0;
4671 err:
4672 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4673 return status;
4674 }
4675
be_if_create(struct be_adapter * adapter)4676 static int be_if_create(struct be_adapter *adapter)
4677 {
4678 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4679 u32 cap_flags = be_if_cap_flags(adapter);
4680
4681 /* alloc required memory for other filtering fields */
4682 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4683 sizeof(*adapter->pmac_id), GFP_KERNEL);
4684 if (!adapter->pmac_id)
4685 return -ENOMEM;
4686
4687 adapter->mc_list = kcalloc(be_max_mc(adapter),
4688 sizeof(*adapter->mc_list), GFP_KERNEL);
4689 if (!adapter->mc_list)
4690 return -ENOMEM;
4691
4692 adapter->uc_list = kcalloc(be_max_uc(adapter),
4693 sizeof(*adapter->uc_list), GFP_KERNEL);
4694 if (!adapter->uc_list)
4695 return -ENOMEM;
4696
4697 if (adapter->cfg_num_rx_irqs == 1)
4698 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4699
4700 en_flags &= cap_flags;
4701 /* will enable all the needed filter flags in be_open() */
4702 return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4703 &adapter->if_handle, 0);
4704 }
4705
be_update_queues(struct be_adapter * adapter)4706 int be_update_queues(struct be_adapter *adapter)
4707 {
4708 struct net_device *netdev = adapter->netdev;
4709 int status;
4710
4711 if (netif_running(netdev)) {
4712 /* be_tx_timeout() must not run concurrently with this
4713 * function, synchronize with an already-running dev_watchdog
4714 */
4715 netif_tx_lock_bh(netdev);
4716 /* device cannot transmit now, avoid dev_watchdog timeouts */
4717 netif_carrier_off(netdev);
4718 netif_tx_unlock_bh(netdev);
4719
4720 be_close(netdev);
4721 }
4722
4723 be_cancel_worker(adapter);
4724
4725 /* If any vectors have been shared with RoCE we cannot re-program
4726 * the MSIx table.
4727 */
4728 if (!adapter->num_msix_roce_vec)
4729 be_msix_disable(adapter);
4730
4731 be_clear_queues(adapter);
4732 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4733 if (status)
4734 return status;
4735
4736 if (!msix_enabled(adapter)) {
4737 status = be_msix_enable(adapter);
4738 if (status)
4739 return status;
4740 }
4741
4742 status = be_if_create(adapter);
4743 if (status)
4744 return status;
4745
4746 status = be_setup_queues(adapter);
4747 if (status)
4748 return status;
4749
4750 be_schedule_worker(adapter);
4751
4752 /* The IF was destroyed and re-created. We need to clear
4753 * all promiscuous flags valid for the destroyed IF.
4754 * Without this promisc mode is not restored during
4755 * be_open() because the driver thinks that it is
4756 * already enabled in HW.
4757 */
4758 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4759
4760 if (netif_running(netdev))
4761 status = be_open(netdev);
4762
4763 return status;
4764 }
4765
fw_major_num(const char * fw_ver)4766 static inline int fw_major_num(const char *fw_ver)
4767 {
4768 int fw_major = 0, i;
4769
4770 i = sscanf(fw_ver, "%d.", &fw_major);
4771 if (i != 1)
4772 return 0;
4773
4774 return fw_major;
4775 }
4776
4777 /* If it is error recovery, FLR the PF
4778 * Else if any VFs are already enabled don't FLR the PF
4779 */
be_reset_required(struct be_adapter * adapter)4780 static bool be_reset_required(struct be_adapter *adapter)
4781 {
4782 if (be_error_recovering(adapter))
4783 return true;
4784 else
4785 return pci_num_vf(adapter->pdev) == 0;
4786 }
4787
4788 /* Wait for the FW to be ready and perform the required initialization */
be_func_init(struct be_adapter * adapter)4789 static int be_func_init(struct be_adapter *adapter)
4790 {
4791 int status;
4792
4793 status = be_fw_wait_ready(adapter);
4794 if (status)
4795 return status;
4796
4797 /* FW is now ready; clear errors to allow cmds/doorbell */
4798 be_clear_error(adapter, BE_CLEAR_ALL);
4799
4800 if (be_reset_required(adapter)) {
4801 status = be_cmd_reset_function(adapter);
4802 if (status)
4803 return status;
4804
4805 /* Wait for interrupts to quiesce after an FLR */
4806 msleep(100);
4807 }
4808
4809 /* Tell FW we're ready to fire cmds */
4810 status = be_cmd_fw_init(adapter);
4811 if (status)
4812 return status;
4813
4814 /* Allow interrupts for other ULPs running on NIC function */
4815 be_intr_set(adapter, true);
4816
4817 return 0;
4818 }
4819
be_setup(struct be_adapter * adapter)4820 static int be_setup(struct be_adapter *adapter)
4821 {
4822 struct device *dev = &adapter->pdev->dev;
4823 int status;
4824
4825 status = be_func_init(adapter);
4826 if (status)
4827 return status;
4828
4829 be_setup_init(adapter);
4830
4831 if (!lancer_chip(adapter))
4832 be_cmd_req_native_mode(adapter);
4833
4834 /* invoke this cmd first to get pf_num and vf_num which are needed
4835 * for issuing profile related cmds
4836 */
4837 if (!BEx_chip(adapter)) {
4838 status = be_cmd_get_func_config(adapter, NULL);
4839 if (status)
4840 return status;
4841 }
4842
4843 status = be_get_config(adapter);
4844 if (status)
4845 goto err;
4846
4847 if (!BE2_chip(adapter) && be_physfn(adapter))
4848 be_alloc_sriov_res(adapter);
4849
4850 status = be_get_resources(adapter);
4851 if (status)
4852 goto err;
4853
4854 status = be_msix_enable(adapter);
4855 if (status)
4856 goto err;
4857
4858 /* will enable all the needed filter flags in be_open() */
4859 status = be_if_create(adapter);
4860 if (status)
4861 goto err;
4862
4863 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4864 rtnl_lock();
4865 status = be_setup_queues(adapter);
4866 rtnl_unlock();
4867 if (status)
4868 goto err;
4869
4870 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4871
4872 status = be_mac_setup(adapter);
4873 if (status)
4874 goto err;
4875
4876 be_cmd_get_fw_ver(adapter);
4877 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4878
4879 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4880 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4881 adapter->fw_ver);
4882 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4883 }
4884
4885 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4886 adapter->rx_fc);
4887 if (status)
4888 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4889 &adapter->rx_fc);
4890
4891 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4892 adapter->tx_fc, adapter->rx_fc);
4893
4894 if (be_physfn(adapter))
4895 be_cmd_set_logical_link_config(adapter,
4896 IFLA_VF_LINK_STATE_AUTO, 0);
4897
4898 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4899 * confusing a linux bridge or OVS that it might be connected to.
4900 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4901 * when SRIOV is not enabled.
4902 */
4903 if (BE3_chip(adapter))
4904 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4905 PORT_FWD_TYPE_PASSTHRU, 0);
4906
4907 if (adapter->num_vfs)
4908 be_vf_setup(adapter);
4909
4910 status = be_cmd_get_phy_info(adapter);
4911 if (!status && be_pause_supported(adapter))
4912 adapter->phy.fc_autoneg = 1;
4913
4914 if (be_physfn(adapter) && !lancer_chip(adapter))
4915 be_cmd_set_features(adapter);
4916
4917 be_schedule_worker(adapter);
4918 adapter->flags |= BE_FLAGS_SETUP_DONE;
4919 return 0;
4920 err:
4921 be_clear(adapter);
4922 return status;
4923 }
4924
4925 #ifdef CONFIG_NET_POLL_CONTROLLER
be_netpoll(struct net_device * netdev)4926 static void be_netpoll(struct net_device *netdev)
4927 {
4928 struct be_adapter *adapter = netdev_priv(netdev);
4929 struct be_eq_obj *eqo;
4930 int i;
4931
4932 for_all_evt_queues(adapter, eqo, i) {
4933 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4934 napi_schedule(&eqo->napi);
4935 }
4936 }
4937 #endif
4938
be_load_fw(struct be_adapter * adapter,u8 * fw_file)4939 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4940 {
4941 const struct firmware *fw;
4942 int status;
4943
4944 if (!netif_running(adapter->netdev)) {
4945 dev_err(&adapter->pdev->dev,
4946 "Firmware load not allowed (interface is down)\n");
4947 return -ENETDOWN;
4948 }
4949
4950 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4951 if (status)
4952 goto fw_exit;
4953
4954 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4955
4956 if (lancer_chip(adapter))
4957 status = lancer_fw_download(adapter, fw);
4958 else
4959 status = be_fw_download(adapter, fw);
4960
4961 if (!status)
4962 be_cmd_get_fw_ver(adapter);
4963
4964 fw_exit:
4965 release_firmware(fw);
4966 return status;
4967 }
4968
be_ndo_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)4969 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4970 u16 flags, struct netlink_ext_ack *extack)
4971 {
4972 struct be_adapter *adapter = netdev_priv(dev);
4973 struct nlattr *attr, *br_spec;
4974 int rem;
4975 int status = 0;
4976 u16 mode = 0;
4977
4978 if (!sriov_enabled(adapter))
4979 return -EOPNOTSUPP;
4980
4981 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4982 if (!br_spec)
4983 return -EINVAL;
4984
4985 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
4986 mode = nla_get_u16(attr);
4987 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4988 return -EOPNOTSUPP;
4989
4990 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4991 return -EINVAL;
4992
4993 status = be_cmd_set_hsw_config(adapter, 0, 0,
4994 adapter->if_handle,
4995 mode == BRIDGE_MODE_VEPA ?
4996 PORT_FWD_TYPE_VEPA :
4997 PORT_FWD_TYPE_VEB, 0);
4998 if (status)
4999 goto err;
5000
5001 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5002 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5003
5004 return status;
5005 }
5006 err:
5007 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5008 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5009
5010 return status;
5011 }
5012
be_ndo_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)5013 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5014 struct net_device *dev, u32 filter_mask,
5015 int nlflags)
5016 {
5017 struct be_adapter *adapter = netdev_priv(dev);
5018 int status = 0;
5019 u8 hsw_mode;
5020
5021 /* BE and Lancer chips support VEB mode only */
5022 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5023 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5024 if (!pci_sriov_get_totalvfs(adapter->pdev))
5025 return 0;
5026 hsw_mode = PORT_FWD_TYPE_VEB;
5027 } else {
5028 status = be_cmd_get_hsw_config(adapter, NULL, 0,
5029 adapter->if_handle, &hsw_mode,
5030 NULL);
5031 if (status)
5032 return 0;
5033
5034 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5035 return 0;
5036 }
5037
5038 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5039 hsw_mode == PORT_FWD_TYPE_VEPA ?
5040 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
5041 0, 0, nlflags, filter_mask, NULL);
5042 }
5043
be_alloc_work(struct be_adapter * adapter,void (* func)(struct work_struct *))5044 static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5045 void (*func)(struct work_struct *))
5046 {
5047 struct be_cmd_work *work;
5048
5049 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5050 if (!work) {
5051 dev_err(&adapter->pdev->dev,
5052 "be_work memory allocation failed\n");
5053 return NULL;
5054 }
5055
5056 INIT_WORK(&work->work, func);
5057 work->adapter = adapter;
5058 return work;
5059 }
5060
be_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)5061 static netdev_features_t be_features_check(struct sk_buff *skb,
5062 struct net_device *dev,
5063 netdev_features_t features)
5064 {
5065 struct be_adapter *adapter = netdev_priv(dev);
5066 u8 l4_hdr = 0;
5067
5068 if (skb_is_gso(skb)) {
5069 /* IPv6 TSO requests with extension hdrs are a problem
5070 * to Lancer and BE3 HW. Disable TSO6 feature.
5071 */
5072 if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5073 features &= ~NETIF_F_TSO6;
5074
5075 /* Lancer cannot handle the packet with MSS less than 256.
5076 * Also it can't handle a TSO packet with a single segment
5077 * Disable the GSO support in such cases
5078 */
5079 if (lancer_chip(adapter) &&
5080 (skb_shinfo(skb)->gso_size < 256 ||
5081 skb_shinfo(skb)->gso_segs == 1))
5082 features &= ~NETIF_F_GSO_MASK;
5083 }
5084
5085 /* The code below restricts offload features for some tunneled and
5086 * Q-in-Q packets.
5087 * Offload features for normal (non tunnel) packets are unchanged.
5088 */
5089 features = vlan_features_check(skb, features);
5090 if (!skb->encapsulation ||
5091 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5092 return features;
5093
5094 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5095 * should disable tunnel offload features if it's not a VxLAN packet,
5096 * as tunnel offloads have been enabled only for VxLAN. This is done to
5097 * allow other tunneled traffic like GRE work fine while VxLAN
5098 * offloads are configured in Skyhawk-R.
5099 */
5100 switch (vlan_get_protocol(skb)) {
5101 case htons(ETH_P_IP):
5102 l4_hdr = ip_hdr(skb)->protocol;
5103 break;
5104 case htons(ETH_P_IPV6):
5105 l4_hdr = ipv6_hdr(skb)->nexthdr;
5106 break;
5107 default:
5108 return features;
5109 }
5110
5111 if (l4_hdr != IPPROTO_UDP ||
5112 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5113 skb->inner_protocol != htons(ETH_P_TEB) ||
5114 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5115 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5116 !adapter->vxlan_port ||
5117 udp_hdr(skb)->dest != adapter->vxlan_port)
5118 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
5119
5120 return features;
5121 }
5122
be_get_phys_port_id(struct net_device * dev,struct netdev_phys_item_id * ppid)5123 static int be_get_phys_port_id(struct net_device *dev,
5124 struct netdev_phys_item_id *ppid)
5125 {
5126 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5127 struct be_adapter *adapter = netdev_priv(dev);
5128 u8 *id;
5129
5130 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5131 return -ENOSPC;
5132
5133 ppid->id[0] = adapter->hba_port_num + 1;
5134 id = &ppid->id[1];
5135 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5136 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5137 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5138
5139 ppid->id_len = id_len;
5140
5141 return 0;
5142 }
5143
be_set_rx_mode(struct net_device * dev)5144 static void be_set_rx_mode(struct net_device *dev)
5145 {
5146 struct be_adapter *adapter = netdev_priv(dev);
5147 struct be_cmd_work *work;
5148
5149 work = be_alloc_work(adapter, be_work_set_rx_mode);
5150 if (work)
5151 queue_work(be_wq, &work->work);
5152 }
5153
5154 static const struct net_device_ops be_netdev_ops = {
5155 .ndo_open = be_open,
5156 .ndo_stop = be_close,
5157 .ndo_start_xmit = be_xmit,
5158 .ndo_set_rx_mode = be_set_rx_mode,
5159 .ndo_set_mac_address = be_mac_addr_set,
5160 .ndo_get_stats64 = be_get_stats64,
5161 .ndo_validate_addr = eth_validate_addr,
5162 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5163 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
5164 .ndo_set_vf_mac = be_set_vf_mac,
5165 .ndo_set_vf_vlan = be_set_vf_vlan,
5166 .ndo_set_vf_rate = be_set_vf_tx_rate,
5167 .ndo_get_vf_config = be_get_vf_config,
5168 .ndo_set_vf_link_state = be_set_vf_link_state,
5169 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
5170 .ndo_tx_timeout = be_tx_timeout,
5171 #ifdef CONFIG_NET_POLL_CONTROLLER
5172 .ndo_poll_controller = be_netpoll,
5173 #endif
5174 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5175 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5176 .ndo_features_check = be_features_check,
5177 .ndo_get_phys_port_id = be_get_phys_port_id,
5178 };
5179
be_netdev_init(struct net_device * netdev)5180 static void be_netdev_init(struct net_device *netdev)
5181 {
5182 struct be_adapter *adapter = netdev_priv(netdev);
5183
5184 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5185 NETIF_F_GSO_UDP_TUNNEL |
5186 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5187 NETIF_F_HW_VLAN_CTAG_TX;
5188 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
5189 netdev->hw_features |= NETIF_F_RXHASH;
5190
5191 netdev->features |= netdev->hw_features |
5192 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
5193 NETIF_F_HIGHDMA;
5194
5195 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5196 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5197
5198 netdev->priv_flags |= IFF_UNICAST_FLT;
5199
5200 netdev->flags |= IFF_MULTICAST;
5201
5202 netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
5203
5204 netdev->netdev_ops = &be_netdev_ops;
5205
5206 netdev->ethtool_ops = &be_ethtool_ops;
5207
5208 if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
5209 netdev->udp_tunnel_nic_info = &be_udp_tunnels;
5210
5211 /* MTU range: 256 - 9000 */
5212 netdev->min_mtu = BE_MIN_MTU;
5213 netdev->max_mtu = BE_MAX_MTU;
5214 }
5215
be_cleanup(struct be_adapter * adapter)5216 static void be_cleanup(struct be_adapter *adapter)
5217 {
5218 struct net_device *netdev = adapter->netdev;
5219
5220 rtnl_lock();
5221 netif_device_detach(netdev);
5222 if (netif_running(netdev))
5223 be_close(netdev);
5224 rtnl_unlock();
5225
5226 be_clear(adapter);
5227 }
5228
be_resume(struct be_adapter * adapter)5229 static int be_resume(struct be_adapter *adapter)
5230 {
5231 struct net_device *netdev = adapter->netdev;
5232 int status;
5233
5234 status = be_setup(adapter);
5235 if (status)
5236 return status;
5237
5238 rtnl_lock();
5239 if (netif_running(netdev))
5240 status = be_open(netdev);
5241 rtnl_unlock();
5242
5243 if (status)
5244 return status;
5245
5246 netif_device_attach(netdev);
5247
5248 return 0;
5249 }
5250
be_soft_reset(struct be_adapter * adapter)5251 static void be_soft_reset(struct be_adapter *adapter)
5252 {
5253 u32 val;
5254
5255 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5256 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5257 val |= SLIPORT_SOFTRESET_SR_MASK;
5258 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5259 }
5260
be_err_is_recoverable(struct be_adapter * adapter)5261 static bool be_err_is_recoverable(struct be_adapter *adapter)
5262 {
5263 struct be_error_recovery *err_rec = &adapter->error_recovery;
5264 unsigned long initial_idle_time =
5265 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5266 unsigned long recovery_interval =
5267 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5268 u16 ue_err_code;
5269 u32 val;
5270
5271 val = be_POST_stage_get(adapter);
5272 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5273 return false;
5274 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5275 if (ue_err_code == 0)
5276 return false;
5277
5278 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5279 ue_err_code);
5280
5281 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
5282 dev_err(&adapter->pdev->dev,
5283 "Cannot recover within %lu sec from driver load\n",
5284 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5285 return false;
5286 }
5287
5288 if (err_rec->last_recovery_time && time_before_eq(
5289 jiffies - err_rec->last_recovery_time, recovery_interval)) {
5290 dev_err(&adapter->pdev->dev,
5291 "Cannot recover within %lu sec from last recovery\n",
5292 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5293 return false;
5294 }
5295
5296 if (ue_err_code == err_rec->last_err_code) {
5297 dev_err(&adapter->pdev->dev,
5298 "Cannot recover from a consecutive TPE error\n");
5299 return false;
5300 }
5301
5302 err_rec->last_recovery_time = jiffies;
5303 err_rec->last_err_code = ue_err_code;
5304 return true;
5305 }
5306
be_tpe_recover(struct be_adapter * adapter)5307 static int be_tpe_recover(struct be_adapter *adapter)
5308 {
5309 struct be_error_recovery *err_rec = &adapter->error_recovery;
5310 int status = -EAGAIN;
5311 u32 val;
5312
5313 switch (err_rec->recovery_state) {
5314 case ERR_RECOVERY_ST_NONE:
5315 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5316 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5317 break;
5318
5319 case ERR_RECOVERY_ST_DETECT:
5320 val = be_POST_stage_get(adapter);
5321 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5322 POST_STAGE_RECOVERABLE_ERR) {
5323 dev_err(&adapter->pdev->dev,
5324 "Unrecoverable HW error detected: 0x%x\n", val);
5325 status = -EINVAL;
5326 err_rec->resched_delay = 0;
5327 break;
5328 }
5329
5330 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5331
5332 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5333 * milliseconds before it checks for final error status in
5334 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5335 * If it does, then PF0 initiates a Soft Reset.
5336 */
5337 if (adapter->pf_num == 0) {
5338 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5339 err_rec->resched_delay = err_rec->ue_to_reset_time -
5340 ERR_RECOVERY_UE_DETECT_DURATION;
5341 break;
5342 }
5343
5344 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5345 err_rec->resched_delay = err_rec->ue_to_poll_time -
5346 ERR_RECOVERY_UE_DETECT_DURATION;
5347 break;
5348
5349 case ERR_RECOVERY_ST_RESET:
5350 if (!be_err_is_recoverable(adapter)) {
5351 dev_err(&adapter->pdev->dev,
5352 "Failed to meet recovery criteria\n");
5353 status = -EIO;
5354 err_rec->resched_delay = 0;
5355 break;
5356 }
5357 be_soft_reset(adapter);
5358 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5359 err_rec->resched_delay = err_rec->ue_to_poll_time -
5360 err_rec->ue_to_reset_time;
5361 break;
5362
5363 case ERR_RECOVERY_ST_PRE_POLL:
5364 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5365 err_rec->resched_delay = 0;
5366 status = 0; /* done */
5367 break;
5368
5369 default:
5370 status = -EINVAL;
5371 err_rec->resched_delay = 0;
5372 break;
5373 }
5374
5375 return status;
5376 }
5377
be_err_recover(struct be_adapter * adapter)5378 static int be_err_recover(struct be_adapter *adapter)
5379 {
5380 int status;
5381
5382 if (!lancer_chip(adapter)) {
5383 if (!adapter->error_recovery.recovery_supported ||
5384 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5385 return -EIO;
5386 status = be_tpe_recover(adapter);
5387 if (status)
5388 goto err;
5389 }
5390
5391 /* Wait for adapter to reach quiescent state before
5392 * destroying queues
5393 */
5394 status = be_fw_wait_ready(adapter);
5395 if (status)
5396 goto err;
5397
5398 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5399
5400 be_cleanup(adapter);
5401
5402 status = be_resume(adapter);
5403 if (status)
5404 goto err;
5405
5406 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5407
5408 err:
5409 return status;
5410 }
5411
be_err_detection_task(struct work_struct * work)5412 static void be_err_detection_task(struct work_struct *work)
5413 {
5414 struct be_error_recovery *err_rec =
5415 container_of(work, struct be_error_recovery,
5416 err_detection_work.work);
5417 struct be_adapter *adapter =
5418 container_of(err_rec, struct be_adapter,
5419 error_recovery);
5420 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
5421 struct device *dev = &adapter->pdev->dev;
5422 int recovery_status;
5423
5424 be_detect_error(adapter);
5425 if (!be_check_error(adapter, BE_ERROR_HW))
5426 goto reschedule_task;
5427
5428 recovery_status = be_err_recover(adapter);
5429 if (!recovery_status) {
5430 err_rec->recovery_retries = 0;
5431 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
5432 dev_info(dev, "Adapter recovery successful\n");
5433 goto reschedule_task;
5434 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5435 /* BEx/SH recovery state machine */
5436 if (adapter->pf_num == 0 &&
5437 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5438 dev_err(&adapter->pdev->dev,
5439 "Adapter recovery in progress\n");
5440 resched_delay = err_rec->resched_delay;
5441 goto reschedule_task;
5442 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
5443 /* For VFs, check if PF have allocated resources
5444 * every second.
5445 */
5446 dev_err(dev, "Re-trying adapter recovery\n");
5447 goto reschedule_task;
5448 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5449 ERR_RECOVERY_MAX_RETRY_COUNT) {
5450 /* In case of another error during recovery, it takes 30 sec
5451 * for adapter to come out of error. Retry error recovery after
5452 * this time interval.
5453 */
5454 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5455 resched_delay = ERR_RECOVERY_RETRY_DELAY;
5456 goto reschedule_task;
5457 } else {
5458 dev_err(dev, "Adapter recovery failed\n");
5459 dev_err(dev, "Please reboot server to recover\n");
5460 }
5461
5462 return;
5463
5464 reschedule_task:
5465 be_schedule_err_detection(adapter, resched_delay);
5466 }
5467
be_log_sfp_info(struct be_adapter * adapter)5468 static void be_log_sfp_info(struct be_adapter *adapter)
5469 {
5470 int status;
5471
5472 status = be_cmd_query_sfp_info(adapter);
5473 if (!status) {
5474 dev_err(&adapter->pdev->dev,
5475 "Port %c: %s Vendor: %s part no: %s",
5476 adapter->port_name,
5477 be_misconfig_evt_port_state[adapter->phy_state],
5478 adapter->phy.vendor_name,
5479 adapter->phy.vendor_pn);
5480 }
5481 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
5482 }
5483
be_worker(struct work_struct * work)5484 static void be_worker(struct work_struct *work)
5485 {
5486 struct be_adapter *adapter =
5487 container_of(work, struct be_adapter, work.work);
5488 struct be_rx_obj *rxo;
5489 int i;
5490
5491 if (be_physfn(adapter) &&
5492 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5493 be_cmd_get_die_temperature(adapter);
5494
5495 /* when interrupts are not yet enabled, just reap any pending
5496 * mcc completions
5497 */
5498 if (!netif_running(adapter->netdev)) {
5499 local_bh_disable();
5500 be_process_mcc(adapter);
5501 local_bh_enable();
5502 goto reschedule;
5503 }
5504
5505 if (!adapter->stats_cmd_sent) {
5506 if (lancer_chip(adapter))
5507 lancer_cmd_get_pport_stats(adapter,
5508 &adapter->stats_cmd);
5509 else
5510 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5511 }
5512
5513 for_all_rx_queues(adapter, rxo, i) {
5514 /* Replenish RX-queues starved due to memory
5515 * allocation failures.
5516 */
5517 if (rxo->rx_post_starved)
5518 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5519 }
5520
5521 /* EQ-delay update for Skyhawk is done while notifying EQ */
5522 if (!skyhawk_chip(adapter))
5523 be_eqd_update(adapter, false);
5524
5525 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
5526 be_log_sfp_info(adapter);
5527
5528 reschedule:
5529 adapter->work_counter++;
5530 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
5531 }
5532
be_unmap_pci_bars(struct be_adapter * adapter)5533 static void be_unmap_pci_bars(struct be_adapter *adapter)
5534 {
5535 if (adapter->csr)
5536 pci_iounmap(adapter->pdev, adapter->csr);
5537 if (adapter->db)
5538 pci_iounmap(adapter->pdev, adapter->db);
5539 if (adapter->pcicfg && adapter->pcicfg_mapped)
5540 pci_iounmap(adapter->pdev, adapter->pcicfg);
5541 }
5542
db_bar(struct be_adapter * adapter)5543 static int db_bar(struct be_adapter *adapter)
5544 {
5545 if (lancer_chip(adapter) || be_virtfn(adapter))
5546 return 0;
5547 else
5548 return 4;
5549 }
5550
be_roce_map_pci_bars(struct be_adapter * adapter)5551 static int be_roce_map_pci_bars(struct be_adapter *adapter)
5552 {
5553 if (skyhawk_chip(adapter)) {
5554 adapter->roce_db.size = 4096;
5555 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5556 db_bar(adapter));
5557 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5558 db_bar(adapter));
5559 }
5560 return 0;
5561 }
5562
be_map_pci_bars(struct be_adapter * adapter)5563 static int be_map_pci_bars(struct be_adapter *adapter)
5564 {
5565 struct pci_dev *pdev = adapter->pdev;
5566 u8 __iomem *addr;
5567 u32 sli_intf;
5568
5569 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5570 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5571 SLI_INTF_FAMILY_SHIFT;
5572 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5573
5574 if (BEx_chip(adapter) && be_physfn(adapter)) {
5575 adapter->csr = pci_iomap(pdev, 2, 0);
5576 if (!adapter->csr)
5577 return -ENOMEM;
5578 }
5579
5580 addr = pci_iomap(pdev, db_bar(adapter), 0);
5581 if (!addr)
5582 goto pci_map_err;
5583 adapter->db = addr;
5584
5585 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5586 if (be_physfn(adapter)) {
5587 /* PCICFG is the 2nd BAR in BE2 */
5588 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5589 if (!addr)
5590 goto pci_map_err;
5591 adapter->pcicfg = addr;
5592 adapter->pcicfg_mapped = true;
5593 } else {
5594 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5595 adapter->pcicfg_mapped = false;
5596 }
5597 }
5598
5599 be_roce_map_pci_bars(adapter);
5600 return 0;
5601
5602 pci_map_err:
5603 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5604 be_unmap_pci_bars(adapter);
5605 return -ENOMEM;
5606 }
5607
be_drv_cleanup(struct be_adapter * adapter)5608 static void be_drv_cleanup(struct be_adapter *adapter)
5609 {
5610 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5611 struct device *dev = &adapter->pdev->dev;
5612
5613 if (mem->va)
5614 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5615
5616 mem = &adapter->rx_filter;
5617 if (mem->va)
5618 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5619
5620 mem = &adapter->stats_cmd;
5621 if (mem->va)
5622 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5623 }
5624
5625 /* Allocate and initialize various fields in be_adapter struct */
be_drv_init(struct be_adapter * adapter)5626 static int be_drv_init(struct be_adapter *adapter)
5627 {
5628 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5629 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5630 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5631 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5632 struct device *dev = &adapter->pdev->dev;
5633 int status = 0;
5634
5635 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5636 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5637 &mbox_mem_alloc->dma,
5638 GFP_KERNEL);
5639 if (!mbox_mem_alloc->va)
5640 return -ENOMEM;
5641
5642 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5643 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5644 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5645
5646 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5647 rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5648 &rx_filter->dma, GFP_KERNEL);
5649 if (!rx_filter->va) {
5650 status = -ENOMEM;
5651 goto free_mbox;
5652 }
5653
5654 if (lancer_chip(adapter))
5655 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5656 else if (BE2_chip(adapter))
5657 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5658 else if (BE3_chip(adapter))
5659 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5660 else
5661 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5662 stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5663 &stats_cmd->dma, GFP_KERNEL);
5664 if (!stats_cmd->va) {
5665 status = -ENOMEM;
5666 goto free_rx_filter;
5667 }
5668
5669 mutex_init(&adapter->mbox_lock);
5670 mutex_init(&adapter->rx_filter_lock);
5671 spin_lock_init(&adapter->mcc_lock);
5672 spin_lock_init(&adapter->mcc_cq_lock);
5673 init_completion(&adapter->et_cmd_compl);
5674
5675 pci_save_state(adapter->pdev);
5676
5677 INIT_DELAYED_WORK(&adapter->work, be_worker);
5678
5679 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5680 adapter->error_recovery.resched_delay = 0;
5681 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
5682 be_err_detection_task);
5683
5684 adapter->rx_fc = true;
5685 adapter->tx_fc = true;
5686
5687 /* Must be a power of 2 or else MODULO will BUG_ON */
5688 adapter->be_get_temp_freq = 64;
5689
5690 return 0;
5691
5692 free_rx_filter:
5693 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5694 free_mbox:
5695 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5696 mbox_mem_alloc->dma);
5697 return status;
5698 }
5699
be_remove(struct pci_dev * pdev)5700 static void be_remove(struct pci_dev *pdev)
5701 {
5702 struct be_adapter *adapter = pci_get_drvdata(pdev);
5703
5704 if (!adapter)
5705 return;
5706
5707 be_roce_dev_remove(adapter);
5708 be_intr_set(adapter, false);
5709
5710 be_cancel_err_detection(adapter);
5711
5712 unregister_netdev(adapter->netdev);
5713
5714 be_clear(adapter);
5715
5716 if (!pci_vfs_assigned(adapter->pdev))
5717 be_cmd_reset_function(adapter);
5718
5719 /* tell fw we're done with firing cmds */
5720 be_cmd_fw_clean(adapter);
5721
5722 be_unmap_pci_bars(adapter);
5723 be_drv_cleanup(adapter);
5724
5725 pci_release_regions(pdev);
5726 pci_disable_device(pdev);
5727
5728 free_netdev(adapter->netdev);
5729 }
5730
be_hwmon_show_temp(struct device * dev,struct device_attribute * dev_attr,char * buf)5731 static ssize_t be_hwmon_show_temp(struct device *dev,
5732 struct device_attribute *dev_attr,
5733 char *buf)
5734 {
5735 struct be_adapter *adapter = dev_get_drvdata(dev);
5736
5737 /* Unit: millidegree Celsius */
5738 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5739 return -EIO;
5740 else
5741 return sprintf(buf, "%u\n",
5742 adapter->hwmon_info.be_on_die_temp * 1000);
5743 }
5744
5745 static SENSOR_DEVICE_ATTR(temp1_input, 0444,
5746 be_hwmon_show_temp, NULL, 1);
5747
5748 static struct attribute *be_hwmon_attrs[] = {
5749 &sensor_dev_attr_temp1_input.dev_attr.attr,
5750 NULL
5751 };
5752
5753 ATTRIBUTE_GROUPS(be_hwmon);
5754
mc_name(struct be_adapter * adapter)5755 static char *mc_name(struct be_adapter *adapter)
5756 {
5757 char *str = ""; /* default */
5758
5759 switch (adapter->mc_type) {
5760 case UMC:
5761 str = "UMC";
5762 break;
5763 case FLEX10:
5764 str = "FLEX10";
5765 break;
5766 case vNIC1:
5767 str = "vNIC-1";
5768 break;
5769 case nPAR:
5770 str = "nPAR";
5771 break;
5772 case UFP:
5773 str = "UFP";
5774 break;
5775 case vNIC2:
5776 str = "vNIC-2";
5777 break;
5778 default:
5779 str = "";
5780 }
5781
5782 return str;
5783 }
5784
func_name(struct be_adapter * adapter)5785 static inline char *func_name(struct be_adapter *adapter)
5786 {
5787 return be_physfn(adapter) ? "PF" : "VF";
5788 }
5789
nic_name(struct pci_dev * pdev)5790 static inline char *nic_name(struct pci_dev *pdev)
5791 {
5792 switch (pdev->device) {
5793 case OC_DEVICE_ID1:
5794 return OC_NAME;
5795 case OC_DEVICE_ID2:
5796 return OC_NAME_BE;
5797 case OC_DEVICE_ID3:
5798 case OC_DEVICE_ID4:
5799 return OC_NAME_LANCER;
5800 case BE_DEVICE_ID2:
5801 return BE3_NAME;
5802 case OC_DEVICE_ID5:
5803 case OC_DEVICE_ID6:
5804 return OC_NAME_SH;
5805 default:
5806 return BE_NAME;
5807 }
5808 }
5809
be_probe(struct pci_dev * pdev,const struct pci_device_id * pdev_id)5810 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5811 {
5812 struct be_adapter *adapter;
5813 struct net_device *netdev;
5814 int status = 0;
5815
5816 status = pci_enable_device(pdev);
5817 if (status)
5818 goto do_none;
5819
5820 status = pci_request_regions(pdev, DRV_NAME);
5821 if (status)
5822 goto disable_dev;
5823 pci_set_master(pdev);
5824
5825 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5826 if (!netdev) {
5827 status = -ENOMEM;
5828 goto rel_reg;
5829 }
5830 adapter = netdev_priv(netdev);
5831 adapter->pdev = pdev;
5832 pci_set_drvdata(pdev, adapter);
5833 adapter->netdev = netdev;
5834 SET_NETDEV_DEV(netdev, &pdev->dev);
5835
5836 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5837 if (status) {
5838 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5839 goto free_netdev;
5840 }
5841
5842 status = be_map_pci_bars(adapter);
5843 if (status)
5844 goto free_netdev;
5845
5846 status = be_drv_init(adapter);
5847 if (status)
5848 goto unmap_bars;
5849
5850 status = be_setup(adapter);
5851 if (status)
5852 goto drv_cleanup;
5853
5854 be_netdev_init(netdev);
5855 status = register_netdev(netdev);
5856 if (status != 0)
5857 goto unsetup;
5858
5859 be_roce_dev_add(adapter);
5860
5861 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5862 adapter->error_recovery.probe_time = jiffies;
5863
5864 /* On Die temperature not supported for VF. */
5865 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5866 adapter->hwmon_info.hwmon_dev =
5867 devm_hwmon_device_register_with_groups(&pdev->dev,
5868 DRV_NAME,
5869 adapter,
5870 be_hwmon_groups);
5871 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5872 }
5873
5874 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5875 func_name(adapter), mc_name(adapter), adapter->port_name);
5876
5877 return 0;
5878
5879 unsetup:
5880 be_clear(adapter);
5881 drv_cleanup:
5882 be_drv_cleanup(adapter);
5883 unmap_bars:
5884 be_unmap_pci_bars(adapter);
5885 free_netdev:
5886 free_netdev(netdev);
5887 rel_reg:
5888 pci_release_regions(pdev);
5889 disable_dev:
5890 pci_disable_device(pdev);
5891 do_none:
5892 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5893 return status;
5894 }
5895
be_suspend(struct device * dev_d)5896 static int __maybe_unused be_suspend(struct device *dev_d)
5897 {
5898 struct be_adapter *adapter = dev_get_drvdata(dev_d);
5899
5900 be_intr_set(adapter, false);
5901 be_cancel_err_detection(adapter);
5902
5903 be_cleanup(adapter);
5904
5905 return 0;
5906 }
5907
be_pci_resume(struct device * dev_d)5908 static int __maybe_unused be_pci_resume(struct device *dev_d)
5909 {
5910 struct be_adapter *adapter = dev_get_drvdata(dev_d);
5911 int status = 0;
5912
5913 status = be_resume(adapter);
5914 if (status)
5915 return status;
5916
5917 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5918
5919 return 0;
5920 }
5921
5922 /*
5923 * An FLR will stop BE from DMAing any data.
5924 */
be_shutdown(struct pci_dev * pdev)5925 static void be_shutdown(struct pci_dev *pdev)
5926 {
5927 struct be_adapter *adapter = pci_get_drvdata(pdev);
5928
5929 if (!adapter)
5930 return;
5931
5932 be_roce_dev_shutdown(adapter);
5933 cancel_delayed_work_sync(&adapter->work);
5934 be_cancel_err_detection(adapter);
5935
5936 netif_device_detach(adapter->netdev);
5937
5938 be_cmd_reset_function(adapter);
5939
5940 pci_disable_device(pdev);
5941 }
5942
be_eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)5943 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5944 pci_channel_state_t state)
5945 {
5946 struct be_adapter *adapter = pci_get_drvdata(pdev);
5947
5948 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5949
5950 be_roce_dev_remove(adapter);
5951
5952 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5953 be_set_error(adapter, BE_ERROR_EEH);
5954
5955 be_cancel_err_detection(adapter);
5956
5957 be_cleanup(adapter);
5958 }
5959
5960 if (state == pci_channel_io_perm_failure)
5961 return PCI_ERS_RESULT_DISCONNECT;
5962
5963 pci_disable_device(pdev);
5964
5965 /* The error could cause the FW to trigger a flash debug dump.
5966 * Resetting the card while flash dump is in progress
5967 * can cause it not to recover; wait for it to finish.
5968 * Wait only for first function as it is needed only once per
5969 * adapter.
5970 */
5971 if (pdev->devfn == 0)
5972 ssleep(30);
5973
5974 return PCI_ERS_RESULT_NEED_RESET;
5975 }
5976
be_eeh_reset(struct pci_dev * pdev)5977 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5978 {
5979 struct be_adapter *adapter = pci_get_drvdata(pdev);
5980 int status;
5981
5982 dev_info(&adapter->pdev->dev, "EEH reset\n");
5983
5984 status = pci_enable_device(pdev);
5985 if (status)
5986 return PCI_ERS_RESULT_DISCONNECT;
5987
5988 pci_set_master(pdev);
5989 pci_restore_state(pdev);
5990
5991 /* Check if card is ok and fw is ready */
5992 dev_info(&adapter->pdev->dev,
5993 "Waiting for FW to be ready after EEH reset\n");
5994 status = be_fw_wait_ready(adapter);
5995 if (status)
5996 return PCI_ERS_RESULT_DISCONNECT;
5997
5998 be_clear_error(adapter, BE_CLEAR_ALL);
5999 return PCI_ERS_RESULT_RECOVERED;
6000 }
6001
be_eeh_resume(struct pci_dev * pdev)6002 static void be_eeh_resume(struct pci_dev *pdev)
6003 {
6004 int status = 0;
6005 struct be_adapter *adapter = pci_get_drvdata(pdev);
6006
6007 dev_info(&adapter->pdev->dev, "EEH resume\n");
6008
6009 pci_save_state(pdev);
6010
6011 status = be_resume(adapter);
6012 if (status)
6013 goto err;
6014
6015 be_roce_dev_add(adapter);
6016
6017 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
6018 return;
6019 err:
6020 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
6021 }
6022
be_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)6023 static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6024 {
6025 struct be_adapter *adapter = pci_get_drvdata(pdev);
6026 struct be_resources vft_res = {0};
6027 int status;
6028
6029 if (!num_vfs)
6030 be_vf_clear(adapter);
6031
6032 adapter->num_vfs = num_vfs;
6033
6034 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6035 dev_warn(&pdev->dev,
6036 "Cannot disable VFs while they are assigned\n");
6037 return -EBUSY;
6038 }
6039
6040 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6041 * are equally distributed across the max-number of VFs. The user may
6042 * request only a subset of the max-vfs to be enabled.
6043 * Based on num_vfs, redistribute the resources across num_vfs so that
6044 * each VF will have access to more number of resources.
6045 * This facility is not available in BE3 FW.
6046 * Also, this is done by FW in Lancer chip.
6047 */
6048 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6049 be_calculate_vf_res(adapter, adapter->num_vfs,
6050 &vft_res);
6051 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6052 adapter->num_vfs, &vft_res);
6053 if (status)
6054 dev_err(&pdev->dev,
6055 "Failed to optimize SR-IOV resources\n");
6056 }
6057
6058 status = be_get_resources(adapter);
6059 if (status)
6060 return be_cmd_status(status);
6061
6062 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6063 rtnl_lock();
6064 status = be_update_queues(adapter);
6065 rtnl_unlock();
6066 if (status)
6067 return be_cmd_status(status);
6068
6069 if (adapter->num_vfs)
6070 status = be_vf_setup(adapter);
6071
6072 if (!status)
6073 return adapter->num_vfs;
6074
6075 return 0;
6076 }
6077
6078 static const struct pci_error_handlers be_eeh_handlers = {
6079 .error_detected = be_eeh_err_detected,
6080 .slot_reset = be_eeh_reset,
6081 .resume = be_eeh_resume,
6082 };
6083
6084 static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
6085
6086 static struct pci_driver be_driver = {
6087 .name = DRV_NAME,
6088 .id_table = be_dev_ids,
6089 .probe = be_probe,
6090 .remove = be_remove,
6091 .driver.pm = &be_pci_pm_ops,
6092 .shutdown = be_shutdown,
6093 .sriov_configure = be_pci_sriov_configure,
6094 .err_handler = &be_eeh_handlers
6095 };
6096
be_init_module(void)6097 static int __init be_init_module(void)
6098 {
6099 int status;
6100
6101 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6102 rx_frag_size != 2048) {
6103 printk(KERN_WARNING DRV_NAME
6104 " : Module param rx_frag_size must be 2048/4096/8192."
6105 " Using 2048\n");
6106 rx_frag_size = 2048;
6107 }
6108
6109 if (num_vfs > 0) {
6110 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6111 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6112 }
6113
6114 be_wq = create_singlethread_workqueue("be_wq");
6115 if (!be_wq) {
6116 pr_warn(DRV_NAME "workqueue creation failed\n");
6117 return -1;
6118 }
6119
6120 be_err_recovery_workq =
6121 create_singlethread_workqueue("be_err_recover");
6122 if (!be_err_recovery_workq)
6123 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6124
6125 status = pci_register_driver(&be_driver);
6126 if (status) {
6127 destroy_workqueue(be_wq);
6128 be_destroy_err_recovery_workq();
6129 }
6130 return status;
6131 }
6132 module_init(be_init_module);
6133
be_exit_module(void)6134 static void __exit be_exit_module(void)
6135 {
6136 pci_unregister_driver(&be_driver);
6137
6138 be_destroy_err_recovery_workq();
6139
6140 if (be_wq)
6141 destroy_workqueue(be_wq);
6142 }
6143 module_exit(be_exit_module);
6144