1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2005 - 2016 Broadcom
4 * All rights reserved.
5 *
6 * Contact Information:
7 * linux-drivers@emulex.com
8 *
9 * Emulex
10 * 3333 Susan Street
11 * Costa Mesa, CA 92626
12 */
13
14 #include <linux/prefetch.h>
15 #include <linux/module.h>
16 #include "be.h"
17 #include "be_cmds.h"
18 #include <asm/div64.h>
19 #include <linux/if_bridge.h>
20 #include <net/busy_poll.h>
21 #include <net/vxlan.h>
22
23 MODULE_DESCRIPTION(DRV_DESC);
24 MODULE_AUTHOR("Emulex Corporation");
25 MODULE_LICENSE("GPL");
26
27 /* num_vfs module param is obsolete.
28 * Use sysfs method to enable/disable VFs.
29 */
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, 0444);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, 0444);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 /* Per-module error detection/recovery workq shared across all functions.
39 * Each function schedules its own work request on this shared workq.
40 */
41 static struct workqueue_struct *be_err_recovery_workq;
42
43 static const struct pci_device_id be_dev_ids[] = {
44 #ifdef CONFIG_BE2NET_BE2
45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
47 #endif /* CONFIG_BE2NET_BE2 */
48 #ifdef CONFIG_BE2NET_BE3
49 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
50 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
51 #endif /* CONFIG_BE2NET_BE3 */
52 #ifdef CONFIG_BE2NET_LANCER
53 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
55 #endif /* CONFIG_BE2NET_LANCER */
56 #ifdef CONFIG_BE2NET_SKYHAWK
57 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
58 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
59 #endif /* CONFIG_BE2NET_SKYHAWK */
60 { 0 }
61 };
62 MODULE_DEVICE_TABLE(pci, be_dev_ids);
63
64 /* Workqueue used by all functions for defering cmd calls to the adapter */
65 static struct workqueue_struct *be_wq;
66
67 /* UE Status Low CSR */
68 static const char * const ue_status_low_desc[] = {
69 "CEV",
70 "CTX",
71 "DBUF",
72 "ERX",
73 "Host",
74 "MPU",
75 "NDMA",
76 "PTC ",
77 "RDMA ",
78 "RXF ",
79 "RXIPS ",
80 "RXULP0 ",
81 "RXULP1 ",
82 "RXULP2 ",
83 "TIM ",
84 "TPOST ",
85 "TPRE ",
86 "TXIPS ",
87 "TXULP0 ",
88 "TXULP1 ",
89 "UC ",
90 "WDMA ",
91 "TXULP2 ",
92 "HOST1 ",
93 "P0_OB_LINK ",
94 "P1_OB_LINK ",
95 "HOST_GPIO ",
96 "MBOX ",
97 "ERX2 ",
98 "SPARE ",
99 "JTAG ",
100 "MPU_INTPEND "
101 };
102
103 /* UE Status High CSR */
104 static const char * const ue_status_hi_desc[] = {
105 "LPCMEMHOST",
106 "MGMT_MAC",
107 "PCS0ONLINE",
108 "MPU_IRAM",
109 "PCS1ONLINE",
110 "PCTL0",
111 "PCTL1",
112 "PMEM",
113 "RR",
114 "TXPB",
115 "RXPP",
116 "XAUI",
117 "TXP",
118 "ARM",
119 "IPC",
120 "HOST2",
121 "HOST3",
122 "HOST4",
123 "HOST5",
124 "HOST6",
125 "HOST7",
126 "ECRC",
127 "Poison TLP",
128 "NETC",
129 "PERIPH",
130 "LLTXULP",
131 "D2P",
132 "RCON",
133 "LDMA",
134 "LLTXP",
135 "LLTXPB",
136 "Unknown"
137 };
138
139 #define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
140 BE_IF_FLAGS_BROADCAST | \
141 BE_IF_FLAGS_MULTICAST | \
142 BE_IF_FLAGS_PASS_L3L4_ERRORS)
143
be_queue_free(struct be_adapter * adapter,struct be_queue_info * q)144 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
145 {
146 struct be_dma_mem *mem = &q->dma_mem;
147
148 if (mem->va) {
149 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
150 mem->dma);
151 mem->va = NULL;
152 }
153 }
154
be_queue_alloc(struct be_adapter * adapter,struct be_queue_info * q,u16 len,u16 entry_size)155 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
156 u16 len, u16 entry_size)
157 {
158 struct be_dma_mem *mem = &q->dma_mem;
159
160 memset(q, 0, sizeof(*q));
161 q->len = len;
162 q->entry_size = entry_size;
163 mem->size = len * entry_size;
164 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
165 &mem->dma, GFP_KERNEL);
166 if (!mem->va)
167 return -ENOMEM;
168 return 0;
169 }
170
be_reg_intr_set(struct be_adapter * adapter,bool enable)171 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
172 {
173 u32 reg, enabled;
174
175 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
176 ®);
177 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
178
179 if (!enabled && enable)
180 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
181 else if (enabled && !enable)
182 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
183 else
184 return;
185
186 pci_write_config_dword(adapter->pdev,
187 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
188 }
189
be_intr_set(struct be_adapter * adapter,bool enable)190 static void be_intr_set(struct be_adapter *adapter, bool enable)
191 {
192 int status = 0;
193
194 /* On lancer interrupts can't be controlled via this register */
195 if (lancer_chip(adapter))
196 return;
197
198 if (be_check_error(adapter, BE_ERROR_EEH))
199 return;
200
201 status = be_cmd_intr_set(adapter, enable);
202 if (status)
203 be_reg_intr_set(adapter, enable);
204 }
205
be_rxq_notify(struct be_adapter * adapter,u16 qid,u16 posted)206 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
207 {
208 u32 val = 0;
209
210 if (be_check_error(adapter, BE_ERROR_HW))
211 return;
212
213 val |= qid & DB_RQ_RING_ID_MASK;
214 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
215
216 wmb();
217 iowrite32(val, adapter->db + DB_RQ_OFFSET);
218 }
219
be_txq_notify(struct be_adapter * adapter,struct be_tx_obj * txo,u16 posted)220 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
221 u16 posted)
222 {
223 u32 val = 0;
224
225 if (be_check_error(adapter, BE_ERROR_HW))
226 return;
227
228 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
229 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
230
231 wmb();
232 iowrite32(val, adapter->db + txo->db_offset);
233 }
234
be_eq_notify(struct be_adapter * adapter,u16 qid,bool arm,bool clear_int,u16 num_popped,u32 eq_delay_mult_enc)235 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
236 bool arm, bool clear_int, u16 num_popped,
237 u32 eq_delay_mult_enc)
238 {
239 u32 val = 0;
240
241 val |= qid & DB_EQ_RING_ID_MASK;
242 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
243
244 if (be_check_error(adapter, BE_ERROR_HW))
245 return;
246
247 if (arm)
248 val |= 1 << DB_EQ_REARM_SHIFT;
249 if (clear_int)
250 val |= 1 << DB_EQ_CLR_SHIFT;
251 val |= 1 << DB_EQ_EVNT_SHIFT;
252 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
253 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
254 iowrite32(val, adapter->db + DB_EQ_OFFSET);
255 }
256
be_cq_notify(struct be_adapter * adapter,u16 qid,bool arm,u16 num_popped)257 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
258 {
259 u32 val = 0;
260
261 val |= qid & DB_CQ_RING_ID_MASK;
262 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
263 DB_CQ_RING_ID_EXT_MASK_SHIFT);
264
265 if (be_check_error(adapter, BE_ERROR_HW))
266 return;
267
268 if (arm)
269 val |= 1 << DB_CQ_REARM_SHIFT;
270 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
271 iowrite32(val, adapter->db + DB_CQ_OFFSET);
272 }
273
be_dev_mac_add(struct be_adapter * adapter,const u8 * mac)274 static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
275 {
276 int i;
277
278 /* Check if mac has already been added as part of uc-list */
279 for (i = 0; i < adapter->uc_macs; i++) {
280 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
281 /* mac already added, skip addition */
282 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
283 return 0;
284 }
285 }
286
287 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
288 &adapter->pmac_id[0], 0);
289 }
290
be_dev_mac_del(struct be_adapter * adapter,int pmac_id)291 static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
292 {
293 int i;
294
295 /* Skip deletion if the programmed mac is
296 * being used in uc-list
297 */
298 for (i = 0; i < adapter->uc_macs; i++) {
299 if (adapter->pmac_id[i + 1] == pmac_id)
300 return;
301 }
302 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
303 }
304
be_mac_addr_set(struct net_device * netdev,void * p)305 static int be_mac_addr_set(struct net_device *netdev, void *p)
306 {
307 struct be_adapter *adapter = netdev_priv(netdev);
308 struct device *dev = &adapter->pdev->dev;
309 struct sockaddr *addr = p;
310 int status;
311 u8 mac[ETH_ALEN];
312 u32 old_pmac_id = adapter->pmac_id[0];
313
314 if (!is_valid_ether_addr(addr->sa_data))
315 return -EADDRNOTAVAIL;
316
317 /* Proceed further only if, User provided MAC is different
318 * from active MAC
319 */
320 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
321 return 0;
322
323 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
324 * address
325 */
326 if (BEx_chip(adapter) && be_virtfn(adapter) &&
327 !check_privilege(adapter, BE_PRIV_FILTMGMT))
328 return -EPERM;
329
330 /* if device is not running, copy MAC to netdev->dev_addr */
331 if (!netif_running(netdev))
332 goto done;
333
334 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
335 * privilege or if PF did not provision the new MAC address.
336 * On BE3, this cmd will always fail if the VF doesn't have the
337 * FILTMGMT privilege. This failure is OK, only if the PF programmed
338 * the MAC for the VF.
339 */
340 mutex_lock(&adapter->rx_filter_lock);
341 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
342 if (!status) {
343
344 /* Delete the old programmed MAC. This call may fail if the
345 * old MAC was already deleted by the PF driver.
346 */
347 if (adapter->pmac_id[0] != old_pmac_id)
348 be_dev_mac_del(adapter, old_pmac_id);
349 }
350
351 mutex_unlock(&adapter->rx_filter_lock);
352 /* Decide if the new MAC is successfully activated only after
353 * querying the FW
354 */
355 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
356 adapter->if_handle, true, 0);
357 if (status)
358 goto err;
359
360 /* The MAC change did not happen, either due to lack of privilege
361 * or PF didn't pre-provision.
362 */
363 if (!ether_addr_equal(addr->sa_data, mac)) {
364 status = -EPERM;
365 goto err;
366 }
367
368 /* Remember currently programmed MAC */
369 ether_addr_copy(adapter->dev_mac, addr->sa_data);
370 done:
371 eth_hw_addr_set(netdev, addr->sa_data);
372 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
373 return 0;
374 err:
375 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
376 return status;
377 }
378
379 /* BE2 supports only v0 cmd */
hw_stats_from_cmd(struct be_adapter * adapter)380 static void *hw_stats_from_cmd(struct be_adapter *adapter)
381 {
382 if (BE2_chip(adapter)) {
383 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
384
385 return &cmd->hw_stats;
386 } else if (BE3_chip(adapter)) {
387 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
388
389 return &cmd->hw_stats;
390 } else {
391 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
392
393 return &cmd->hw_stats;
394 }
395 }
396
397 /* BE2 supports only v0 cmd */
be_erx_stats_from_cmd(struct be_adapter * adapter)398 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
399 {
400 if (BE2_chip(adapter)) {
401 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
402
403 return &hw_stats->erx;
404 } else if (BE3_chip(adapter)) {
405 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
406
407 return &hw_stats->erx;
408 } else {
409 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
410
411 return &hw_stats->erx;
412 }
413 }
414
populate_be_v0_stats(struct be_adapter * adapter)415 static void populate_be_v0_stats(struct be_adapter *adapter)
416 {
417 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
418 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
419 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
420 struct be_port_rxf_stats_v0 *port_stats =
421 &rxf_stats->port[adapter->port_num];
422 struct be_drv_stats *drvs = &adapter->drv_stats;
423
424 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
435 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
436 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
437 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
438 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
439 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
440 drvs->rx_dropped_header_too_small =
441 port_stats->rx_dropped_header_too_small;
442 drvs->rx_address_filtered =
443 port_stats->rx_address_filtered +
444 port_stats->rx_vlan_filtered;
445 drvs->rx_alignment_symbol_errors =
446 port_stats->rx_alignment_symbol_errors;
447
448 drvs->tx_pauseframes = port_stats->tx_pauseframes;
449 drvs->tx_controlframes = port_stats->tx_controlframes;
450
451 if (adapter->port_num)
452 drvs->jabber_events = rxf_stats->port1_jabber_events;
453 else
454 drvs->jabber_events = rxf_stats->port0_jabber_events;
455 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
456 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
457 drvs->forwarded_packets = rxf_stats->forwarded_packets;
458 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
459 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
460 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
461 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
462 }
463
populate_be_v1_stats(struct be_adapter * adapter)464 static void populate_be_v1_stats(struct be_adapter *adapter)
465 {
466 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
467 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
468 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
469 struct be_port_rxf_stats_v1 *port_stats =
470 &rxf_stats->port[adapter->port_num];
471 struct be_drv_stats *drvs = &adapter->drv_stats;
472
473 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
474 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
475 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
476 drvs->rx_pause_frames = port_stats->rx_pause_frames;
477 drvs->rx_crc_errors = port_stats->rx_crc_errors;
478 drvs->rx_control_frames = port_stats->rx_control_frames;
479 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
480 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
481 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
482 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
483 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
484 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
485 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
486 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
487 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
488 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
489 drvs->rx_dropped_header_too_small =
490 port_stats->rx_dropped_header_too_small;
491 drvs->rx_input_fifo_overflow_drop =
492 port_stats->rx_input_fifo_overflow_drop;
493 drvs->rx_address_filtered = port_stats->rx_address_filtered;
494 drvs->rx_alignment_symbol_errors =
495 port_stats->rx_alignment_symbol_errors;
496 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
497 drvs->tx_pauseframes = port_stats->tx_pauseframes;
498 drvs->tx_controlframes = port_stats->tx_controlframes;
499 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
500 drvs->jabber_events = port_stats->jabber_events;
501 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
502 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
503 drvs->forwarded_packets = rxf_stats->forwarded_packets;
504 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
505 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
506 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
507 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
508 }
509
populate_be_v2_stats(struct be_adapter * adapter)510 static void populate_be_v2_stats(struct be_adapter *adapter)
511 {
512 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
513 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
514 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
515 struct be_port_rxf_stats_v2 *port_stats =
516 &rxf_stats->port[adapter->port_num];
517 struct be_drv_stats *drvs = &adapter->drv_stats;
518
519 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
520 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
521 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
522 drvs->rx_pause_frames = port_stats->rx_pause_frames;
523 drvs->rx_crc_errors = port_stats->rx_crc_errors;
524 drvs->rx_control_frames = port_stats->rx_control_frames;
525 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
526 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
527 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
528 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
529 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
530 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
531 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
532 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
533 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
534 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
535 drvs->rx_dropped_header_too_small =
536 port_stats->rx_dropped_header_too_small;
537 drvs->rx_input_fifo_overflow_drop =
538 port_stats->rx_input_fifo_overflow_drop;
539 drvs->rx_address_filtered = port_stats->rx_address_filtered;
540 drvs->rx_alignment_symbol_errors =
541 port_stats->rx_alignment_symbol_errors;
542 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
543 drvs->tx_pauseframes = port_stats->tx_pauseframes;
544 drvs->tx_controlframes = port_stats->tx_controlframes;
545 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
546 drvs->jabber_events = port_stats->jabber_events;
547 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
548 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
549 drvs->forwarded_packets = rxf_stats->forwarded_packets;
550 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
551 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
552 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
553 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
554 if (be_roce_supported(adapter)) {
555 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
556 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
557 drvs->rx_roce_frames = port_stats->roce_frames_received;
558 drvs->roce_drops_crc = port_stats->roce_drops_crc;
559 drvs->roce_drops_payload_len =
560 port_stats->roce_drops_payload_len;
561 }
562 }
563
populate_lancer_stats(struct be_adapter * adapter)564 static void populate_lancer_stats(struct be_adapter *adapter)
565 {
566 struct be_drv_stats *drvs = &adapter->drv_stats;
567 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
568
569 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
570 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
571 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
572 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
573 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
574 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
575 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
576 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
577 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
578 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
579 drvs->rx_dropped_tcp_length =
580 pport_stats->rx_dropped_invalid_tcp_length;
581 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
582 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
583 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
584 drvs->rx_dropped_header_too_small =
585 pport_stats->rx_dropped_header_too_small;
586 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
587 drvs->rx_address_filtered =
588 pport_stats->rx_address_filtered +
589 pport_stats->rx_vlan_filtered;
590 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
591 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
592 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
593 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
594 drvs->jabber_events = pport_stats->rx_jabbers;
595 drvs->forwarded_packets = pport_stats->num_forwards_lo;
596 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
597 drvs->rx_drops_too_many_frags =
598 pport_stats->rx_drops_too_many_frags_lo;
599 }
600
accumulate_16bit_val(u32 * acc,u16 val)601 static void accumulate_16bit_val(u32 *acc, u16 val)
602 {
603 #define lo(x) (x & 0xFFFF)
604 #define hi(x) (x & 0xFFFF0000)
605 bool wrapped = val < lo(*acc);
606 u32 newacc = hi(*acc) + val;
607
608 if (wrapped)
609 newacc += 65536;
610 WRITE_ONCE(*acc, newacc);
611 }
612
populate_erx_stats(struct be_adapter * adapter,struct be_rx_obj * rxo,u32 erx_stat)613 static void populate_erx_stats(struct be_adapter *adapter,
614 struct be_rx_obj *rxo, u32 erx_stat)
615 {
616 if (!BEx_chip(adapter))
617 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
618 else
619 /* below erx HW counter can actually wrap around after
620 * 65535. Driver accumulates a 32-bit value
621 */
622 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
623 (u16)erx_stat);
624 }
625
be_parse_stats(struct be_adapter * adapter)626 void be_parse_stats(struct be_adapter *adapter)
627 {
628 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
629 struct be_rx_obj *rxo;
630 int i;
631 u32 erx_stat;
632
633 if (lancer_chip(adapter)) {
634 populate_lancer_stats(adapter);
635 } else {
636 if (BE2_chip(adapter))
637 populate_be_v0_stats(adapter);
638 else if (BE3_chip(adapter))
639 /* for BE3 */
640 populate_be_v1_stats(adapter);
641 else
642 populate_be_v2_stats(adapter);
643
644 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
645 for_all_rx_queues(adapter, rxo, i) {
646 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
647 populate_erx_stats(adapter, rxo, erx_stat);
648 }
649 }
650 }
651
be_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)652 static void be_get_stats64(struct net_device *netdev,
653 struct rtnl_link_stats64 *stats)
654 {
655 struct be_adapter *adapter = netdev_priv(netdev);
656 struct be_drv_stats *drvs = &adapter->drv_stats;
657 struct be_rx_obj *rxo;
658 struct be_tx_obj *txo;
659 u64 pkts, bytes;
660 unsigned int start;
661 int i;
662
663 for_all_rx_queues(adapter, rxo, i) {
664 const struct be_rx_stats *rx_stats = rx_stats(rxo);
665
666 do {
667 start = u64_stats_fetch_begin(&rx_stats->sync);
668 pkts = rx_stats(rxo)->rx_pkts;
669 bytes = rx_stats(rxo)->rx_bytes;
670 } while (u64_stats_fetch_retry(&rx_stats->sync, start));
671 stats->rx_packets += pkts;
672 stats->rx_bytes += bytes;
673 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
674 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
675 rx_stats(rxo)->rx_drops_no_frags;
676 }
677
678 for_all_tx_queues(adapter, txo, i) {
679 const struct be_tx_stats *tx_stats = tx_stats(txo);
680
681 do {
682 start = u64_stats_fetch_begin(&tx_stats->sync);
683 pkts = tx_stats(txo)->tx_pkts;
684 bytes = tx_stats(txo)->tx_bytes;
685 } while (u64_stats_fetch_retry(&tx_stats->sync, start));
686 stats->tx_packets += pkts;
687 stats->tx_bytes += bytes;
688 }
689
690 /* bad pkts received */
691 stats->rx_errors = drvs->rx_crc_errors +
692 drvs->rx_alignment_symbol_errors +
693 drvs->rx_in_range_errors +
694 drvs->rx_out_range_errors +
695 drvs->rx_frame_too_long +
696 drvs->rx_dropped_too_small +
697 drvs->rx_dropped_too_short +
698 drvs->rx_dropped_header_too_small +
699 drvs->rx_dropped_tcp_length +
700 drvs->rx_dropped_runt;
701
702 /* detailed rx errors */
703 stats->rx_length_errors = drvs->rx_in_range_errors +
704 drvs->rx_out_range_errors +
705 drvs->rx_frame_too_long;
706
707 stats->rx_crc_errors = drvs->rx_crc_errors;
708
709 /* frame alignment errors */
710 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
711
712 /* receiver fifo overrun */
713 /* drops_no_pbuf is no per i/f, it's per BE card */
714 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
715 drvs->rx_input_fifo_overflow_drop +
716 drvs->rx_drops_no_pbuf;
717 }
718
be_link_status_update(struct be_adapter * adapter,u8 link_status)719 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
720 {
721 struct net_device *netdev = adapter->netdev;
722
723 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
724 netif_carrier_off(netdev);
725 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
726 }
727
728 if (link_status)
729 netif_carrier_on(netdev);
730 else
731 netif_carrier_off(netdev);
732
733 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
734 }
735
be_gso_hdr_len(struct sk_buff * skb)736 static int be_gso_hdr_len(struct sk_buff *skb)
737 {
738 if (skb->encapsulation)
739 return skb_inner_tcp_all_headers(skb);
740
741 return skb_tcp_all_headers(skb);
742 }
743
be_tx_stats_update(struct be_tx_obj * txo,struct sk_buff * skb)744 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
745 {
746 struct be_tx_stats *stats = tx_stats(txo);
747 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
748 /* Account for headers which get duplicated in TSO pkt */
749 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
750
751 u64_stats_update_begin(&stats->sync);
752 stats->tx_reqs++;
753 stats->tx_bytes += skb->len + dup_hdr_len;
754 stats->tx_pkts += tx_pkts;
755 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
756 stats->tx_vxlan_offload_pkts += tx_pkts;
757 u64_stats_update_end(&stats->sync);
758 }
759
760 /* Returns number of WRBs needed for the skb */
skb_wrb_cnt(struct sk_buff * skb)761 static u32 skb_wrb_cnt(struct sk_buff *skb)
762 {
763 /* +1 for the header wrb */
764 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
765 }
766
wrb_fill(struct be_eth_wrb * wrb,u64 addr,int len)767 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
768 {
769 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
770 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
771 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
772 wrb->rsvd0 = 0;
773 }
774
775 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
776 * to avoid the swap and shift/mask operations in wrb_fill().
777 */
wrb_fill_dummy(struct be_eth_wrb * wrb)778 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
779 {
780 wrb->frag_pa_hi = 0;
781 wrb->frag_pa_lo = 0;
782 wrb->frag_len = 0;
783 wrb->rsvd0 = 0;
784 }
785
be_get_tx_vlan_tag(struct be_adapter * adapter,struct sk_buff * skb)786 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
787 struct sk_buff *skb)
788 {
789 u8 vlan_prio;
790 u16 vlan_tag;
791
792 vlan_tag = skb_vlan_tag_get(skb);
793 vlan_prio = skb_vlan_tag_get_prio(skb);
794 /* If vlan priority provided by OS is NOT in available bmap */
795 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
796 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
797 adapter->recommended_prio_bits;
798
799 return vlan_tag;
800 }
801
802 /* Used only for IP tunnel packets */
skb_inner_ip_proto(struct sk_buff * skb)803 static u16 skb_inner_ip_proto(struct sk_buff *skb)
804 {
805 return (inner_ip_hdr(skb)->version == 4) ?
806 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
807 }
808
skb_ip_proto(struct sk_buff * skb)809 static u16 skb_ip_proto(struct sk_buff *skb)
810 {
811 return (ip_hdr(skb)->version == 4) ?
812 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
813 }
814
be_is_txq_full(struct be_tx_obj * txo)815 static inline bool be_is_txq_full(struct be_tx_obj *txo)
816 {
817 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
818 }
819
be_can_txq_wake(struct be_tx_obj * txo)820 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
821 {
822 return atomic_read(&txo->q.used) < txo->q.len / 2;
823 }
824
be_is_tx_compl_pending(struct be_tx_obj * txo)825 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
826 {
827 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
828 }
829
be_get_wrb_params_from_skb(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)830 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
831 struct sk_buff *skb,
832 struct be_wrb_params *wrb_params)
833 {
834 u16 proto;
835
836 if (skb_is_gso(skb)) {
837 BE_WRB_F_SET(wrb_params->features, LSO, 1);
838 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
839 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
840 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
841 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 if (skb->encapsulation) {
843 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
844 proto = skb_inner_ip_proto(skb);
845 } else {
846 proto = skb_ip_proto(skb);
847 }
848 if (proto == IPPROTO_TCP)
849 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
850 else if (proto == IPPROTO_UDP)
851 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
852 }
853
854 if (skb_vlan_tag_present(skb)) {
855 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
856 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
857 }
858
859 BE_WRB_F_SET(wrb_params->features, CRC, 1);
860 }
861
wrb_fill_hdr(struct be_adapter * adapter,struct be_eth_hdr_wrb * hdr,struct be_wrb_params * wrb_params,struct sk_buff * skb)862 static void wrb_fill_hdr(struct be_adapter *adapter,
863 struct be_eth_hdr_wrb *hdr,
864 struct be_wrb_params *wrb_params,
865 struct sk_buff *skb)
866 {
867 memset(hdr, 0, sizeof(*hdr));
868
869 SET_TX_WRB_HDR_BITS(crc, hdr,
870 BE_WRB_F_GET(wrb_params->features, CRC));
871 SET_TX_WRB_HDR_BITS(ipcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, IPCS));
873 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, TCPCS));
875 SET_TX_WRB_HDR_BITS(udpcs, hdr,
876 BE_WRB_F_GET(wrb_params->features, UDPCS));
877
878 SET_TX_WRB_HDR_BITS(lso, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO));
880 SET_TX_WRB_HDR_BITS(lso6, hdr,
881 BE_WRB_F_GET(wrb_params->features, LSO6));
882 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
883
884 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
885 * hack is not needed, the evt bit is set while ringing DB.
886 */
887 SET_TX_WRB_HDR_BITS(event, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
889 SET_TX_WRB_HDR_BITS(vlan, hdr,
890 BE_WRB_F_GET(wrb_params->features, VLAN));
891 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
892
893 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
894 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
895 SET_TX_WRB_HDR_BITS(mgmt, hdr,
896 BE_WRB_F_GET(wrb_params->features, OS2BMC));
897 }
898
unmap_tx_frag(struct device * dev,struct be_eth_wrb * wrb,bool unmap_single)899 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
900 bool unmap_single)
901 {
902 dma_addr_t dma;
903 u32 frag_len = le32_to_cpu(wrb->frag_len);
904
905
906 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
907 (u64)le32_to_cpu(wrb->frag_pa_lo);
908 if (frag_len) {
909 if (unmap_single)
910 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
911 else
912 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
913 }
914 }
915
916 /* Grab a WRB header for xmit */
be_tx_get_wrb_hdr(struct be_tx_obj * txo)917 static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
918 {
919 u32 head = txo->q.head;
920
921 queue_head_inc(&txo->q);
922 return head;
923 }
924
925 /* Set up the WRB header for xmit */
be_tx_setup_wrb_hdr(struct be_adapter * adapter,struct be_tx_obj * txo,struct be_wrb_params * wrb_params,struct sk_buff * skb,u16 head)926 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
927 struct be_tx_obj *txo,
928 struct be_wrb_params *wrb_params,
929 struct sk_buff *skb, u16 head)
930 {
931 u32 num_frags = skb_wrb_cnt(skb);
932 struct be_queue_info *txq = &txo->q;
933 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
934
935 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
936 be_dws_cpu_to_le(hdr, sizeof(*hdr));
937
938 BUG_ON(txo->sent_skb_list[head]);
939 txo->sent_skb_list[head] = skb;
940 txo->last_req_hdr = head;
941 atomic_add(num_frags, &txq->used);
942 txo->last_req_wrb_cnt = num_frags;
943 txo->pend_wrb_cnt += num_frags;
944 }
945
946 /* Setup a WRB fragment (buffer descriptor) for xmit */
be_tx_setup_wrb_frag(struct be_tx_obj * txo,dma_addr_t busaddr,int len)947 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
948 int len)
949 {
950 struct be_eth_wrb *wrb;
951 struct be_queue_info *txq = &txo->q;
952
953 wrb = queue_head_node(txq);
954 wrb_fill(wrb, busaddr, len);
955 queue_head_inc(txq);
956 }
957
958 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
959 * was invoked. The producer index is restored to the previous packet and the
960 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
961 */
be_xmit_restore(struct be_adapter * adapter,struct be_tx_obj * txo,u32 head,bool map_single,u32 copied)962 static void be_xmit_restore(struct be_adapter *adapter,
963 struct be_tx_obj *txo, u32 head, bool map_single,
964 u32 copied)
965 {
966 struct device *dev;
967 struct be_eth_wrb *wrb;
968 struct be_queue_info *txq = &txo->q;
969
970 dev = &adapter->pdev->dev;
971 txq->head = head;
972
973 /* skip the first wrb (hdr); it's not mapped */
974 queue_head_inc(txq);
975 while (copied) {
976 wrb = queue_head_node(txq);
977 unmap_tx_frag(dev, wrb, map_single);
978 map_single = false;
979 copied -= le32_to_cpu(wrb->frag_len);
980 queue_head_inc(txq);
981 }
982
983 txq->head = head;
984 }
985
986 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
987 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
988 * of WRBs used up by the packet.
989 */
be_xmit_enqueue(struct be_adapter * adapter,struct be_tx_obj * txo,struct sk_buff * skb,struct be_wrb_params * wrb_params)990 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
991 struct sk_buff *skb,
992 struct be_wrb_params *wrb_params)
993 {
994 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
995 struct device *dev = &adapter->pdev->dev;
996 bool map_single = false;
997 u32 head;
998 dma_addr_t busaddr;
999 int len;
1000
1001 head = be_tx_get_wrb_hdr(txo);
1002
1003 if (skb->len > skb->data_len) {
1004 len = skb_headlen(skb);
1005
1006 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1007 if (dma_mapping_error(dev, busaddr))
1008 goto dma_err;
1009 map_single = true;
1010 be_tx_setup_wrb_frag(txo, busaddr, len);
1011 copied += len;
1012 }
1013
1014 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1015 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1016 len = skb_frag_size(frag);
1017
1018 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1019 if (dma_mapping_error(dev, busaddr))
1020 goto dma_err;
1021 be_tx_setup_wrb_frag(txo, busaddr, len);
1022 copied += len;
1023 }
1024
1025 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1026
1027 be_tx_stats_update(txo, skb);
1028 return wrb_cnt;
1029
1030 dma_err:
1031 adapter->drv_stats.dma_map_errors++;
1032 be_xmit_restore(adapter, txo, head, map_single, copied);
1033 return 0;
1034 }
1035
qnq_async_evt_rcvd(struct be_adapter * adapter)1036 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1037 {
1038 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1039 }
1040
be_insert_vlan_in_pkt(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1041 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
1042 struct sk_buff *skb,
1043 struct be_wrb_params
1044 *wrb_params)
1045 {
1046 bool insert_vlan = false;
1047 u16 vlan_tag = 0;
1048
1049 skb = skb_share_check(skb, GFP_ATOMIC);
1050 if (unlikely(!skb))
1051 return skb;
1052
1053 if (skb_vlan_tag_present(skb)) {
1054 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
1055 insert_vlan = true;
1056 }
1057
1058 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1059 if (!insert_vlan) {
1060 vlan_tag = adapter->pvid;
1061 insert_vlan = true;
1062 }
1063 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1064 * skip VLAN insertion
1065 */
1066 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1067 }
1068
1069 if (insert_vlan) {
1070 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1071 vlan_tag);
1072 if (unlikely(!skb))
1073 return skb;
1074 __vlan_hwaccel_clear_tag(skb);
1075 }
1076
1077 /* Insert the outer VLAN, if any */
1078 if (adapter->qnq_vid) {
1079 vlan_tag = adapter->qnq_vid;
1080 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1081 vlan_tag);
1082 if (unlikely(!skb))
1083 return skb;
1084 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1085 }
1086
1087 return skb;
1088 }
1089
be_ipv6_exthdr_check(struct sk_buff * skb)1090 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1091 {
1092 struct ethhdr *eh = (struct ethhdr *)skb->data;
1093 u16 offset = ETH_HLEN;
1094
1095 if (eh->h_proto == htons(ETH_P_IPV6)) {
1096 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1097
1098 offset += sizeof(struct ipv6hdr);
1099 if (ip6h->nexthdr != NEXTHDR_TCP &&
1100 ip6h->nexthdr != NEXTHDR_UDP) {
1101 struct ipv6_opt_hdr *ehdr =
1102 (struct ipv6_opt_hdr *)(skb->data + offset);
1103
1104 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1105 if (ehdr->hdrlen == 0xff)
1106 return true;
1107 }
1108 }
1109 return false;
1110 }
1111
be_vlan_tag_tx_chk(struct be_adapter * adapter,struct sk_buff * skb)1112 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1113 {
1114 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1115 }
1116
be_ipv6_tx_stall_chk(struct be_adapter * adapter,struct sk_buff * skb)1117 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1118 {
1119 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1120 }
1121
be_lancer_xmit_workarounds(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1122 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
1124 struct be_wrb_params
1125 *wrb_params)
1126 {
1127 struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
1128 unsigned int eth_hdr_len;
1129 struct iphdr *ip;
1130
1131 /* For padded packets, BE HW modifies tot_len field in IP header
1132 * incorrecly when VLAN tag is inserted by HW.
1133 * For padded packets, Lancer computes incorrect checksum.
1134 */
1135 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1136 VLAN_ETH_HLEN : ETH_HLEN;
1137 if (skb->len <= 60 &&
1138 (lancer_chip(adapter) || BE3_chip(adapter) ||
1139 skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
1140 ip = (struct iphdr *)ip_hdr(skb);
1141 if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
1142 goto tx_drop;
1143 }
1144
1145 /* If vlan tag is already inlined in the packet, skip HW VLAN
1146 * tagging in pvid-tagging mode
1147 */
1148 if (be_pvid_tagging_enabled(adapter) &&
1149 veh->h_vlan_proto == htons(ETH_P_8021Q))
1150 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1151
1152 /* HW has a bug wherein it will calculate CSUM for VLAN
1153 * pkts even though it is disabled.
1154 * Manually insert VLAN in pkt.
1155 */
1156 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1157 skb_vlan_tag_present(skb)) {
1158 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1159 if (unlikely(!skb))
1160 goto err;
1161 }
1162
1163 /* HW may lockup when VLAN HW tagging is requested on
1164 * certain ipv6 packets. Drop such pkts if the HW workaround to
1165 * skip HW tagging is not enabled by FW.
1166 */
1167 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1168 (adapter->pvid || adapter->qnq_vid) &&
1169 !qnq_async_evt_rcvd(adapter)))
1170 goto tx_drop;
1171
1172 /* Manual VLAN tag insertion to prevent:
1173 * ASIC lockup when the ASIC inserts VLAN tag into
1174 * certain ipv6 packets. Insert VLAN tags in driver,
1175 * and set event, completion, vlan bits accordingly
1176 * in the Tx WRB.
1177 */
1178 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1179 be_vlan_tag_tx_chk(adapter, skb)) {
1180 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1181 if (unlikely(!skb))
1182 goto err;
1183 }
1184
1185 return skb;
1186 tx_drop:
1187 dev_kfree_skb_any(skb);
1188 err:
1189 return NULL;
1190 }
1191
be_xmit_workarounds(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1192 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1193 struct sk_buff *skb,
1194 struct be_wrb_params *wrb_params)
1195 {
1196 int err;
1197
1198 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1199 * packets that are 32b or less may cause a transmit stall
1200 * on that port. The workaround is to pad such packets
1201 * (len <= 32 bytes) to a minimum length of 36b.
1202 */
1203 if (skb->len <= 32) {
1204 if (skb_put_padto(skb, 36))
1205 return NULL;
1206 }
1207
1208 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1209 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1210 if (!skb)
1211 return NULL;
1212 }
1213
1214 /* The stack can send us skbs with length greater than
1215 * what the HW can handle. Trim the extra bytes.
1216 */
1217 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1218 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1219 WARN_ON(err);
1220
1221 return skb;
1222 }
1223
be_xmit_flush(struct be_adapter * adapter,struct be_tx_obj * txo)1224 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1225 {
1226 struct be_queue_info *txq = &txo->q;
1227 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1228
1229 /* Mark the last request eventable if it hasn't been marked already */
1230 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1231 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1232
1233 /* compose a dummy wrb if there are odd set of wrbs to notify */
1234 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1235 wrb_fill_dummy(queue_head_node(txq));
1236 queue_head_inc(txq);
1237 atomic_inc(&txq->used);
1238 txo->pend_wrb_cnt++;
1239 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1240 TX_HDR_WRB_NUM_SHIFT);
1241 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1242 TX_HDR_WRB_NUM_SHIFT);
1243 }
1244 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1245 txo->pend_wrb_cnt = 0;
1246 }
1247
1248 /* OS2BMC related */
1249
1250 #define DHCP_CLIENT_PORT 68
1251 #define DHCP_SERVER_PORT 67
1252 #define NET_BIOS_PORT1 137
1253 #define NET_BIOS_PORT2 138
1254 #define DHCPV6_RAS_PORT 547
1255
1256 #define is_mc_allowed_on_bmc(adapter, eh) \
1257 (!is_multicast_filt_enabled(adapter) && \
1258 is_multicast_ether_addr(eh->h_dest) && \
1259 !is_broadcast_ether_addr(eh->h_dest))
1260
1261 #define is_bc_allowed_on_bmc(adapter, eh) \
1262 (!is_broadcast_filt_enabled(adapter) && \
1263 is_broadcast_ether_addr(eh->h_dest))
1264
1265 #define is_arp_allowed_on_bmc(adapter, skb) \
1266 (is_arp(skb) && is_arp_filt_enabled(adapter))
1267
1268 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1269
1270 #define is_arp_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1272
1273 #define is_dhcp_client_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1275
1276 #define is_dhcp_srvr_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1278
1279 #define is_nbios_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1281
1282 #define is_ipv6_na_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & \
1284 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1285
1286 #define is_ipv6_ra_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1288
1289 #define is_ipv6_ras_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1291
1292 #define is_broadcast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1294
1295 #define is_multicast_filt_enabled(adapter) \
1296 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1297
be_send_pkt_to_bmc(struct be_adapter * adapter,struct sk_buff ** skb,struct be_wrb_params * wrb_params)1298 static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1299 struct sk_buff **skb,
1300 struct be_wrb_params *wrb_params)
1301 {
1302 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1303 bool os2bmc = false;
1304
1305 if (!be_is_os2bmc_enabled(adapter))
1306 goto done;
1307
1308 if (!is_multicast_ether_addr(eh->h_dest))
1309 goto done;
1310
1311 if (is_mc_allowed_on_bmc(adapter, eh) ||
1312 is_bc_allowed_on_bmc(adapter, eh) ||
1313 is_arp_allowed_on_bmc(adapter, (*skb))) {
1314 os2bmc = true;
1315 goto done;
1316 }
1317
1318 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1319 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1320 u8 nexthdr = hdr->nexthdr;
1321
1322 if (nexthdr == IPPROTO_ICMPV6) {
1323 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1324
1325 switch (icmp6->icmp6_type) {
1326 case NDISC_ROUTER_ADVERTISEMENT:
1327 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1328 goto done;
1329 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1330 os2bmc = is_ipv6_na_filt_enabled(adapter);
1331 goto done;
1332 default:
1333 break;
1334 }
1335 }
1336 }
1337
1338 if (is_udp_pkt((*skb))) {
1339 struct udphdr *udp = udp_hdr((*skb));
1340
1341 switch (ntohs(udp->dest)) {
1342 case DHCP_CLIENT_PORT:
1343 os2bmc = is_dhcp_client_filt_enabled(adapter);
1344 goto done;
1345 case DHCP_SERVER_PORT:
1346 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1347 goto done;
1348 case NET_BIOS_PORT1:
1349 case NET_BIOS_PORT2:
1350 os2bmc = is_nbios_filt_enabled(adapter);
1351 goto done;
1352 case DHCPV6_RAS_PORT:
1353 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1354 goto done;
1355 default:
1356 break;
1357 }
1358 }
1359 done:
1360 /* For packets over a vlan, which are destined
1361 * to BMC, asic expects the vlan to be inline in the packet.
1362 */
1363 if (os2bmc)
1364 *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
1365
1366 return os2bmc;
1367 }
1368
be_xmit(struct sk_buff * skb,struct net_device * netdev)1369 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1370 {
1371 struct be_adapter *adapter = netdev_priv(netdev);
1372 u16 q_idx = skb_get_queue_mapping(skb);
1373 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1374 struct be_wrb_params wrb_params = { 0 };
1375 bool flush = !netdev_xmit_more();
1376 u16 wrb_cnt;
1377
1378 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1379 if (unlikely(!skb))
1380 goto drop;
1381
1382 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1383
1384 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1385 if (unlikely(!wrb_cnt))
1386 goto drop_skb;
1387
1388 /* if os2bmc is enabled and if the pkt is destined to bmc,
1389 * enqueue the pkt a 2nd time with mgmt bit set.
1390 */
1391 if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
1392 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1393 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1394 if (unlikely(!wrb_cnt))
1395 goto drop_skb;
1396 else
1397 skb_get(skb);
1398 }
1399
1400 if (be_is_txq_full(txo)) {
1401 netif_stop_subqueue(netdev, q_idx);
1402 tx_stats(txo)->tx_stops++;
1403 }
1404
1405 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1406 be_xmit_flush(adapter, txo);
1407
1408 return NETDEV_TX_OK;
1409 drop_skb:
1410 dev_kfree_skb_any(skb);
1411 drop:
1412 tx_stats(txo)->tx_drv_drops++;
1413 /* Flush the already enqueued tx requests */
1414 if (flush && txo->pend_wrb_cnt)
1415 be_xmit_flush(adapter, txo);
1416
1417 return NETDEV_TX_OK;
1418 }
1419
be_tx_timeout(struct net_device * netdev,unsigned int txqueue)1420 static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1421 {
1422 struct be_adapter *adapter = netdev_priv(netdev);
1423 struct device *dev = &adapter->pdev->dev;
1424 struct be_tx_obj *txo;
1425 struct sk_buff *skb;
1426 struct tcphdr *tcphdr;
1427 struct udphdr *udphdr;
1428 u32 *entry;
1429 int status;
1430 int i, j;
1431
1432 for_all_tx_queues(adapter, txo, i) {
1433 dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
1434 i, txo->q.head, txo->q.tail,
1435 atomic_read(&txo->q.used), txo->q.id);
1436
1437 entry = txo->q.dma_mem.va;
1438 for (j = 0; j < TX_Q_LEN * 4; j += 4) {
1439 if (entry[j] != 0 || entry[j + 1] != 0 ||
1440 entry[j + 2] != 0 || entry[j + 3] != 0) {
1441 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1442 j, entry[j], entry[j + 1],
1443 entry[j + 2], entry[j + 3]);
1444 }
1445 }
1446
1447 entry = txo->cq.dma_mem.va;
1448 dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
1449 i, txo->cq.head, txo->cq.tail,
1450 atomic_read(&txo->cq.used));
1451 for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
1452 if (entry[j] != 0 || entry[j + 1] != 0 ||
1453 entry[j + 2] != 0 || entry[j + 3] != 0) {
1454 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1455 j, entry[j], entry[j + 1],
1456 entry[j + 2], entry[j + 3]);
1457 }
1458 }
1459
1460 for (j = 0; j < TX_Q_LEN; j++) {
1461 if (txo->sent_skb_list[j]) {
1462 skb = txo->sent_skb_list[j];
1463 if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1464 tcphdr = tcp_hdr(skb);
1465 dev_info(dev, "TCP source port %d\n",
1466 ntohs(tcphdr->source));
1467 dev_info(dev, "TCP dest port %d\n",
1468 ntohs(tcphdr->dest));
1469 dev_info(dev, "TCP sequence num %u\n",
1470 ntohl(tcphdr->seq));
1471 dev_info(dev, "TCP ack_seq %u\n",
1472 ntohl(tcphdr->ack_seq));
1473 } else if (ip_hdr(skb)->protocol ==
1474 IPPROTO_UDP) {
1475 udphdr = udp_hdr(skb);
1476 dev_info(dev, "UDP source port %d\n",
1477 ntohs(udphdr->source));
1478 dev_info(dev, "UDP dest port %d\n",
1479 ntohs(udphdr->dest));
1480 }
1481 dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
1482 j, skb, skb->len, skb->protocol);
1483 }
1484 }
1485 }
1486
1487 if (lancer_chip(adapter)) {
1488 dev_info(dev, "Initiating reset due to tx timeout\n");
1489 dev_info(dev, "Resetting adapter\n");
1490 status = lancer_physdev_ctrl(adapter,
1491 PHYSDEV_CONTROL_FW_RESET_MASK);
1492 if (status)
1493 dev_err(dev, "Reset failed .. Reboot server\n");
1494 }
1495 }
1496
be_in_all_promisc(struct be_adapter * adapter)1497 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1498 {
1499 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1500 BE_IF_FLAGS_ALL_PROMISCUOUS;
1501 }
1502
be_set_vlan_promisc(struct be_adapter * adapter)1503 static int be_set_vlan_promisc(struct be_adapter *adapter)
1504 {
1505 struct device *dev = &adapter->pdev->dev;
1506 int status;
1507
1508 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1509 return 0;
1510
1511 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1512 if (!status) {
1513 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1514 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1515 } else {
1516 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1517 }
1518 return status;
1519 }
1520
be_clear_vlan_promisc(struct be_adapter * adapter)1521 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1522 {
1523 struct device *dev = &adapter->pdev->dev;
1524 int status;
1525
1526 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1527 if (!status) {
1528 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1529 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1530 }
1531 return status;
1532 }
1533
1534 /*
1535 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1536 * If the user configures more, place BE in vlan promiscuous mode.
1537 */
be_vid_config(struct be_adapter * adapter)1538 static int be_vid_config(struct be_adapter *adapter)
1539 {
1540 struct device *dev = &adapter->pdev->dev;
1541 u16 vids[BE_NUM_VLANS_SUPPORTED];
1542 u16 num = 0, i = 0;
1543 int status = 0;
1544
1545 /* No need to change the VLAN state if the I/F is in promiscuous */
1546 if (adapter->netdev->flags & IFF_PROMISC)
1547 return 0;
1548
1549 if (adapter->vlans_added > be_max_vlans(adapter))
1550 return be_set_vlan_promisc(adapter);
1551
1552 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1553 status = be_clear_vlan_promisc(adapter);
1554 if (status)
1555 return status;
1556 }
1557 /* Construct VLAN Table to give to HW */
1558 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1559 vids[num++] = cpu_to_le16(i);
1560
1561 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1562 if (status) {
1563 dev_err(dev, "Setting HW VLAN filtering failed\n");
1564 /* Set to VLAN promisc mode as setting VLAN filter failed */
1565 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1566 addl_status(status) ==
1567 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1568 return be_set_vlan_promisc(adapter);
1569 }
1570 return status;
1571 }
1572
be_vlan_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1573 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1574 {
1575 struct be_adapter *adapter = netdev_priv(netdev);
1576 int status = 0;
1577
1578 mutex_lock(&adapter->rx_filter_lock);
1579
1580 /* Packets with VID 0 are always received by Lancer by default */
1581 if (lancer_chip(adapter) && vid == 0)
1582 goto done;
1583
1584 if (test_bit(vid, adapter->vids))
1585 goto done;
1586
1587 set_bit(vid, adapter->vids);
1588 adapter->vlans_added++;
1589
1590 status = be_vid_config(adapter);
1591 done:
1592 mutex_unlock(&adapter->rx_filter_lock);
1593 return status;
1594 }
1595
be_vlan_rem_vid(struct net_device * netdev,__be16 proto,u16 vid)1596 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1597 {
1598 struct be_adapter *adapter = netdev_priv(netdev);
1599 int status = 0;
1600
1601 mutex_lock(&adapter->rx_filter_lock);
1602
1603 /* Packets with VID 0 are always received by Lancer by default */
1604 if (lancer_chip(adapter) && vid == 0)
1605 goto done;
1606
1607 if (!test_bit(vid, adapter->vids))
1608 goto done;
1609
1610 clear_bit(vid, adapter->vids);
1611 adapter->vlans_added--;
1612
1613 status = be_vid_config(adapter);
1614 done:
1615 mutex_unlock(&adapter->rx_filter_lock);
1616 return status;
1617 }
1618
be_set_all_promisc(struct be_adapter * adapter)1619 static void be_set_all_promisc(struct be_adapter *adapter)
1620 {
1621 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1622 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1623 }
1624
be_set_mc_promisc(struct be_adapter * adapter)1625 static void be_set_mc_promisc(struct be_adapter *adapter)
1626 {
1627 int status;
1628
1629 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1630 return;
1631
1632 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1633 if (!status)
1634 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1635 }
1636
be_set_uc_promisc(struct be_adapter * adapter)1637 static void be_set_uc_promisc(struct be_adapter *adapter)
1638 {
1639 int status;
1640
1641 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1642 return;
1643
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
1645 if (!status)
1646 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1647 }
1648
be_clear_uc_promisc(struct be_adapter * adapter)1649 static void be_clear_uc_promisc(struct be_adapter *adapter)
1650 {
1651 int status;
1652
1653 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1654 return;
1655
1656 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1657 if (!status)
1658 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1659 }
1660
1661 /* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1662 * We use a single callback function for both sync and unsync. We really don't
1663 * add/remove addresses through this callback. But, we use it to detect changes
1664 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1665 */
be_uc_list_update(struct net_device * netdev,const unsigned char * addr)1666 static int be_uc_list_update(struct net_device *netdev,
1667 const unsigned char *addr)
1668 {
1669 struct be_adapter *adapter = netdev_priv(netdev);
1670
1671 adapter->update_uc_list = true;
1672 return 0;
1673 }
1674
be_mc_list_update(struct net_device * netdev,const unsigned char * addr)1675 static int be_mc_list_update(struct net_device *netdev,
1676 const unsigned char *addr)
1677 {
1678 struct be_adapter *adapter = netdev_priv(netdev);
1679
1680 adapter->update_mc_list = true;
1681 return 0;
1682 }
1683
be_set_mc_list(struct be_adapter * adapter)1684 static void be_set_mc_list(struct be_adapter *adapter)
1685 {
1686 struct net_device *netdev = adapter->netdev;
1687 struct netdev_hw_addr *ha;
1688 bool mc_promisc = false;
1689 int status;
1690
1691 netif_addr_lock_bh(netdev);
1692 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1693
1694 if (netdev->flags & IFF_PROMISC) {
1695 adapter->update_mc_list = false;
1696 } else if (netdev->flags & IFF_ALLMULTI ||
1697 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1698 /* Enable multicast promisc if num configured exceeds
1699 * what we support
1700 */
1701 mc_promisc = true;
1702 adapter->update_mc_list = false;
1703 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1704 /* Update mc-list unconditionally if the iface was previously
1705 * in mc-promisc mode and now is out of that mode.
1706 */
1707 adapter->update_mc_list = true;
1708 }
1709
1710 if (adapter->update_mc_list) {
1711 int i = 0;
1712
1713 /* cache the mc-list in adapter */
1714 netdev_for_each_mc_addr(ha, netdev) {
1715 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1716 i++;
1717 }
1718 adapter->mc_count = netdev_mc_count(netdev);
1719 }
1720 netif_addr_unlock_bh(netdev);
1721
1722 if (mc_promisc) {
1723 be_set_mc_promisc(adapter);
1724 } else if (adapter->update_mc_list) {
1725 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1726 if (!status)
1727 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1728 else
1729 be_set_mc_promisc(adapter);
1730
1731 adapter->update_mc_list = false;
1732 }
1733 }
1734
be_clear_mc_list(struct be_adapter * adapter)1735 static void be_clear_mc_list(struct be_adapter *adapter)
1736 {
1737 struct net_device *netdev = adapter->netdev;
1738
1739 __dev_mc_unsync(netdev, NULL);
1740 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
1741 adapter->mc_count = 0;
1742 }
1743
be_uc_mac_add(struct be_adapter * adapter,int uc_idx)1744 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1745 {
1746 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
1747 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1748 return 0;
1749 }
1750
1751 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
1752 adapter->if_handle,
1753 &adapter->pmac_id[uc_idx + 1], 0);
1754 }
1755
be_uc_mac_del(struct be_adapter * adapter,int pmac_id)1756 static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1757 {
1758 if (pmac_id == adapter->pmac_id[0])
1759 return;
1760
1761 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1762 }
1763
be_set_uc_list(struct be_adapter * adapter)1764 static void be_set_uc_list(struct be_adapter *adapter)
1765 {
1766 struct net_device *netdev = adapter->netdev;
1767 struct netdev_hw_addr *ha;
1768 bool uc_promisc = false;
1769 int curr_uc_macs = 0, i;
1770
1771 netif_addr_lock_bh(netdev);
1772 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
1773
1774 if (netdev->flags & IFF_PROMISC) {
1775 adapter->update_uc_list = false;
1776 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1777 uc_promisc = true;
1778 adapter->update_uc_list = false;
1779 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1780 /* Update uc-list unconditionally if the iface was previously
1781 * in uc-promisc mode and now is out of that mode.
1782 */
1783 adapter->update_uc_list = true;
1784 }
1785
1786 if (adapter->update_uc_list) {
1787 /* cache the uc-list in adapter array */
1788 i = 0;
1789 netdev_for_each_uc_addr(ha, netdev) {
1790 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1791 i++;
1792 }
1793 curr_uc_macs = netdev_uc_count(netdev);
1794 }
1795 netif_addr_unlock_bh(netdev);
1796
1797 if (uc_promisc) {
1798 be_set_uc_promisc(adapter);
1799 } else if (adapter->update_uc_list) {
1800 be_clear_uc_promisc(adapter);
1801
1802 for (i = 0; i < adapter->uc_macs; i++)
1803 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1804
1805 for (i = 0; i < curr_uc_macs; i++)
1806 be_uc_mac_add(adapter, i);
1807 adapter->uc_macs = curr_uc_macs;
1808 adapter->update_uc_list = false;
1809 }
1810 }
1811
be_clear_uc_list(struct be_adapter * adapter)1812 static void be_clear_uc_list(struct be_adapter *adapter)
1813 {
1814 struct net_device *netdev = adapter->netdev;
1815 int i;
1816
1817 __dev_uc_unsync(netdev, NULL);
1818 for (i = 0; i < adapter->uc_macs; i++)
1819 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1820
1821 adapter->uc_macs = 0;
1822 }
1823
__be_set_rx_mode(struct be_adapter * adapter)1824 static void __be_set_rx_mode(struct be_adapter *adapter)
1825 {
1826 struct net_device *netdev = adapter->netdev;
1827
1828 mutex_lock(&adapter->rx_filter_lock);
1829
1830 if (netdev->flags & IFF_PROMISC) {
1831 if (!be_in_all_promisc(adapter))
1832 be_set_all_promisc(adapter);
1833 } else if (be_in_all_promisc(adapter)) {
1834 /* We need to re-program the vlan-list or clear
1835 * vlan-promisc mode (if needed) when the interface
1836 * comes out of promisc mode.
1837 */
1838 be_vid_config(adapter);
1839 }
1840
1841 be_set_uc_list(adapter);
1842 be_set_mc_list(adapter);
1843
1844 mutex_unlock(&adapter->rx_filter_lock);
1845 }
1846
be_work_set_rx_mode(struct work_struct * work)1847 static void be_work_set_rx_mode(struct work_struct *work)
1848 {
1849 struct be_cmd_work *cmd_work =
1850 container_of(work, struct be_cmd_work, work);
1851
1852 __be_set_rx_mode(cmd_work->adapter);
1853 kfree(cmd_work);
1854 }
1855
be_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)1856 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1857 {
1858 struct be_adapter *adapter = netdev_priv(netdev);
1859 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1860 int status;
1861
1862 if (!sriov_enabled(adapter))
1863 return -EPERM;
1864
1865 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1866 return -EINVAL;
1867
1868 /* Proceed further only if user provided MAC is different
1869 * from active MAC
1870 */
1871 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1872 return 0;
1873
1874 if (BEx_chip(adapter)) {
1875 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1876 vf + 1);
1877
1878 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1879 &vf_cfg->pmac_id, vf + 1);
1880 } else {
1881 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1882 vf + 1);
1883 }
1884
1885 if (status) {
1886 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1887 mac, vf, status);
1888 return be_cmd_status(status);
1889 }
1890
1891 ether_addr_copy(vf_cfg->mac_addr, mac);
1892
1893 return 0;
1894 }
1895
be_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * vi)1896 static int be_get_vf_config(struct net_device *netdev, int vf,
1897 struct ifla_vf_info *vi)
1898 {
1899 struct be_adapter *adapter = netdev_priv(netdev);
1900 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1901
1902 if (!sriov_enabled(adapter))
1903 return -EPERM;
1904
1905 if (vf >= adapter->num_vfs)
1906 return -EINVAL;
1907
1908 vi->vf = vf;
1909 vi->max_tx_rate = vf_cfg->tx_rate;
1910 vi->min_tx_rate = 0;
1911 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1912 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1913 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1914 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1915 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1916
1917 return 0;
1918 }
1919
be_set_vf_tvt(struct be_adapter * adapter,int vf,u16 vlan)1920 static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1921 {
1922 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1923 u16 vids[BE_NUM_VLANS_SUPPORTED];
1924 int vf_if_id = vf_cfg->if_handle;
1925 int status;
1926
1927 /* Enable Transparent VLAN Tagging */
1928 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1929 if (status)
1930 return status;
1931
1932 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1933 vids[0] = 0;
1934 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1935 if (!status)
1936 dev_info(&adapter->pdev->dev,
1937 "Cleared guest VLANs on VF%d", vf);
1938
1939 /* After TVT is enabled, disallow VFs to program VLAN filters */
1940 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1941 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1942 ~BE_PRIV_FILTMGMT, vf + 1);
1943 if (!status)
1944 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1945 }
1946 return 0;
1947 }
1948
be_clear_vf_tvt(struct be_adapter * adapter,int vf)1949 static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1950 {
1951 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1952 struct device *dev = &adapter->pdev->dev;
1953 int status;
1954
1955 /* Reset Transparent VLAN Tagging. */
1956 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1957 vf_cfg->if_handle, 0, 0);
1958 if (status)
1959 return status;
1960
1961 /* Allow VFs to program VLAN filtering */
1962 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1963 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1964 BE_PRIV_FILTMGMT, vf + 1);
1965 if (!status) {
1966 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1967 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1968 }
1969 }
1970
1971 dev_info(dev,
1972 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1973 return 0;
1974 }
1975
be_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)1976 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1977 __be16 vlan_proto)
1978 {
1979 struct be_adapter *adapter = netdev_priv(netdev);
1980 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1981 int status;
1982
1983 if (!sriov_enabled(adapter))
1984 return -EPERM;
1985
1986 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1987 return -EINVAL;
1988
1989 if (vlan_proto != htons(ETH_P_8021Q))
1990 return -EPROTONOSUPPORT;
1991
1992 if (vlan || qos) {
1993 vlan |= qos << VLAN_PRIO_SHIFT;
1994 status = be_set_vf_tvt(adapter, vf, vlan);
1995 } else {
1996 status = be_clear_vf_tvt(adapter, vf);
1997 }
1998
1999 if (status) {
2000 dev_err(&adapter->pdev->dev,
2001 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
2002 status);
2003 return be_cmd_status(status);
2004 }
2005
2006 vf_cfg->vlan_tag = vlan;
2007 return 0;
2008 }
2009
be_set_vf_tx_rate(struct net_device * netdev,int vf,int min_tx_rate,int max_tx_rate)2010 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
2011 int min_tx_rate, int max_tx_rate)
2012 {
2013 struct be_adapter *adapter = netdev_priv(netdev);
2014 struct device *dev = &adapter->pdev->dev;
2015 int percent_rate, status = 0;
2016 u16 link_speed = 0;
2017 u8 link_status;
2018
2019 if (!sriov_enabled(adapter))
2020 return -EPERM;
2021
2022 if (vf >= adapter->num_vfs)
2023 return -EINVAL;
2024
2025 if (min_tx_rate)
2026 return -EINVAL;
2027
2028 if (!max_tx_rate)
2029 goto config_qos;
2030
2031 status = be_cmd_link_status_query(adapter, &link_speed,
2032 &link_status, 0);
2033 if (status)
2034 goto err;
2035
2036 if (!link_status) {
2037 dev_err(dev, "TX-rate setting not allowed when link is down\n");
2038 status = -ENETDOWN;
2039 goto err;
2040 }
2041
2042 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
2043 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
2044 link_speed);
2045 status = -EINVAL;
2046 goto err;
2047 }
2048
2049 /* On Skyhawk the QOS setting must be done only as a % value */
2050 percent_rate = link_speed / 100;
2051 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
2052 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
2053 percent_rate);
2054 status = -EINVAL;
2055 goto err;
2056 }
2057
2058 config_qos:
2059 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
2060 if (status)
2061 goto err;
2062
2063 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2064 return 0;
2065
2066 err:
2067 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2068 max_tx_rate, vf);
2069 return be_cmd_status(status);
2070 }
2071
be_set_vf_link_state(struct net_device * netdev,int vf,int link_state)2072 static int be_set_vf_link_state(struct net_device *netdev, int vf,
2073 int link_state)
2074 {
2075 struct be_adapter *adapter = netdev_priv(netdev);
2076 int status;
2077
2078 if (!sriov_enabled(adapter))
2079 return -EPERM;
2080
2081 if (vf >= adapter->num_vfs)
2082 return -EINVAL;
2083
2084 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
2085 if (status) {
2086 dev_err(&adapter->pdev->dev,
2087 "Link state change on VF %d failed: %#x\n", vf, status);
2088 return be_cmd_status(status);
2089 }
2090
2091 adapter->vf_cfg[vf].plink_tracking = link_state;
2092
2093 return 0;
2094 }
2095
be_set_vf_spoofchk(struct net_device * netdev,int vf,bool enable)2096 static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2097 {
2098 struct be_adapter *adapter = netdev_priv(netdev);
2099 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2100 u8 spoofchk;
2101 int status;
2102
2103 if (!sriov_enabled(adapter))
2104 return -EPERM;
2105
2106 if (vf >= adapter->num_vfs)
2107 return -EINVAL;
2108
2109 if (BEx_chip(adapter))
2110 return -EOPNOTSUPP;
2111
2112 if (enable == vf_cfg->spoofchk)
2113 return 0;
2114
2115 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2116
2117 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2118 0, spoofchk);
2119 if (status) {
2120 dev_err(&adapter->pdev->dev,
2121 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2122 return be_cmd_status(status);
2123 }
2124
2125 vf_cfg->spoofchk = enable;
2126 return 0;
2127 }
2128
be_aic_update(struct be_aic_obj * aic,u64 rx_pkts,u64 tx_pkts,ulong now)2129 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2130 ulong now)
2131 {
2132 aic->rx_pkts_prev = rx_pkts;
2133 aic->tx_reqs_prev = tx_pkts;
2134 aic->jiffies = now;
2135 }
2136
be_get_new_eqd(struct be_eq_obj * eqo)2137 static int be_get_new_eqd(struct be_eq_obj *eqo)
2138 {
2139 struct be_adapter *adapter = eqo->adapter;
2140 int eqd, start;
2141 struct be_aic_obj *aic;
2142 struct be_rx_obj *rxo;
2143 struct be_tx_obj *txo;
2144 u64 rx_pkts = 0, tx_pkts = 0, pkts;
2145 ulong now;
2146 u32 pps, delta;
2147 int i;
2148
2149 aic = &adapter->aic_obj[eqo->idx];
2150 if (!adapter->aic_enabled) {
2151 if (aic->jiffies)
2152 aic->jiffies = 0;
2153 eqd = aic->et_eqd;
2154 return eqd;
2155 }
2156
2157 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2158 do {
2159 start = u64_stats_fetch_begin(&rxo->stats.sync);
2160 pkts = rxo->stats.rx_pkts;
2161 } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
2162 rx_pkts += pkts;
2163 }
2164
2165 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2166 do {
2167 start = u64_stats_fetch_begin(&txo->stats.sync);
2168 pkts = txo->stats.tx_reqs;
2169 } while (u64_stats_fetch_retry(&txo->stats.sync, start));
2170 tx_pkts += pkts;
2171 }
2172
2173 /* Skip, if wrapped around or first calculation */
2174 now = jiffies;
2175 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2176 rx_pkts < aic->rx_pkts_prev ||
2177 tx_pkts < aic->tx_reqs_prev) {
2178 be_aic_update(aic, rx_pkts, tx_pkts, now);
2179 return aic->prev_eqd;
2180 }
2181
2182 delta = jiffies_to_msecs(now - aic->jiffies);
2183 if (delta == 0)
2184 return aic->prev_eqd;
2185
2186 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2187 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2188 eqd = (pps / 15000) << 2;
2189
2190 if (eqd < 8)
2191 eqd = 0;
2192 eqd = min_t(u32, eqd, aic->max_eqd);
2193 eqd = max_t(u32, eqd, aic->min_eqd);
2194
2195 be_aic_update(aic, rx_pkts, tx_pkts, now);
2196
2197 return eqd;
2198 }
2199
2200 /* For Skyhawk-R only */
be_get_eq_delay_mult_enc(struct be_eq_obj * eqo)2201 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2202 {
2203 struct be_adapter *adapter = eqo->adapter;
2204 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2205 ulong now = jiffies;
2206 int eqd;
2207 u32 mult_enc;
2208
2209 if (!adapter->aic_enabled)
2210 return 0;
2211
2212 if (jiffies_to_msecs(now - aic->jiffies) < 1)
2213 eqd = aic->prev_eqd;
2214 else
2215 eqd = be_get_new_eqd(eqo);
2216
2217 if (eqd > 100)
2218 mult_enc = R2I_DLY_ENC_1;
2219 else if (eqd > 60)
2220 mult_enc = R2I_DLY_ENC_2;
2221 else if (eqd > 20)
2222 mult_enc = R2I_DLY_ENC_3;
2223 else
2224 mult_enc = R2I_DLY_ENC_0;
2225
2226 aic->prev_eqd = eqd;
2227
2228 return mult_enc;
2229 }
2230
be_eqd_update(struct be_adapter * adapter,bool force_update)2231 void be_eqd_update(struct be_adapter *adapter, bool force_update)
2232 {
2233 struct be_set_eqd set_eqd[MAX_EVT_QS];
2234 struct be_aic_obj *aic;
2235 struct be_eq_obj *eqo;
2236 int i, num = 0, eqd;
2237
2238 for_all_evt_queues(adapter, eqo, i) {
2239 aic = &adapter->aic_obj[eqo->idx];
2240 eqd = be_get_new_eqd(eqo);
2241 if (force_update || eqd != aic->prev_eqd) {
2242 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2243 set_eqd[num].eq_id = eqo->q.id;
2244 aic->prev_eqd = eqd;
2245 num++;
2246 }
2247 }
2248
2249 if (num)
2250 be_cmd_modify_eqd(adapter, set_eqd, num);
2251 }
2252
be_rx_stats_update(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)2253 static void be_rx_stats_update(struct be_rx_obj *rxo,
2254 struct be_rx_compl_info *rxcp)
2255 {
2256 struct be_rx_stats *stats = rx_stats(rxo);
2257
2258 u64_stats_update_begin(&stats->sync);
2259 stats->rx_compl++;
2260 stats->rx_bytes += rxcp->pkt_size;
2261 stats->rx_pkts++;
2262 if (rxcp->tunneled)
2263 stats->rx_vxlan_offload_pkts++;
2264 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
2265 stats->rx_mcast_pkts++;
2266 if (rxcp->err)
2267 stats->rx_compl_err++;
2268 u64_stats_update_end(&stats->sync);
2269 }
2270
csum_passed(struct be_rx_compl_info * rxcp)2271 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
2272 {
2273 /* L4 checksum is not reliable for non TCP/UDP packets.
2274 * Also ignore ipcksm for ipv6 pkts
2275 */
2276 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
2277 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
2278 }
2279
get_rx_page_info(struct be_rx_obj * rxo)2280 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
2281 {
2282 struct be_adapter *adapter = rxo->adapter;
2283 struct be_rx_page_info *rx_page_info;
2284 struct be_queue_info *rxq = &rxo->q;
2285 u32 frag_idx = rxq->tail;
2286
2287 rx_page_info = &rxo->page_info_tbl[frag_idx];
2288 BUG_ON(!rx_page_info->page);
2289
2290 if (rx_page_info->last_frag) {
2291 dma_unmap_page(&adapter->pdev->dev,
2292 dma_unmap_addr(rx_page_info, bus),
2293 adapter->big_page_size, DMA_FROM_DEVICE);
2294 rx_page_info->last_frag = false;
2295 } else {
2296 dma_sync_single_for_cpu(&adapter->pdev->dev,
2297 dma_unmap_addr(rx_page_info, bus),
2298 rx_frag_size, DMA_FROM_DEVICE);
2299 }
2300
2301 queue_tail_inc(rxq);
2302 atomic_dec(&rxq->used);
2303 return rx_page_info;
2304 }
2305
2306 /* Throwaway the data in the Rx completion */
be_rx_compl_discard(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)2307 static void be_rx_compl_discard(struct be_rx_obj *rxo,
2308 struct be_rx_compl_info *rxcp)
2309 {
2310 struct be_rx_page_info *page_info;
2311 u16 i, num_rcvd = rxcp->num_rcvd;
2312
2313 for (i = 0; i < num_rcvd; i++) {
2314 page_info = get_rx_page_info(rxo);
2315 put_page(page_info->page);
2316 memset(page_info, 0, sizeof(*page_info));
2317 }
2318 }
2319
2320 /*
2321 * skb_fill_rx_data forms a complete skb for an ether frame
2322 * indicated by rxcp.
2323 */
skb_fill_rx_data(struct be_rx_obj * rxo,struct sk_buff * skb,struct be_rx_compl_info * rxcp)2324 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2325 struct be_rx_compl_info *rxcp)
2326 {
2327 struct be_rx_page_info *page_info;
2328 u16 i, j;
2329 u16 hdr_len, curr_frag_len, remaining;
2330 u8 *start;
2331
2332 page_info = get_rx_page_info(rxo);
2333 start = page_address(page_info->page) + page_info->page_offset;
2334 prefetch(start);
2335
2336 /* Copy data in the first descriptor of this completion */
2337 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2338
2339 skb->len = curr_frag_len;
2340 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2341 memcpy(skb->data, start, curr_frag_len);
2342 /* Complete packet has now been moved to data */
2343 put_page(page_info->page);
2344 skb->data_len = 0;
2345 skb->tail += curr_frag_len;
2346 } else {
2347 hdr_len = ETH_HLEN;
2348 memcpy(skb->data, start, hdr_len);
2349 skb_shinfo(skb)->nr_frags = 1;
2350 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0],
2351 page_info->page,
2352 page_info->page_offset + hdr_len,
2353 curr_frag_len - hdr_len);
2354 skb->data_len = curr_frag_len - hdr_len;
2355 skb->truesize += rx_frag_size;
2356 skb->tail += hdr_len;
2357 }
2358 page_info->page = NULL;
2359
2360 if (rxcp->pkt_size <= rx_frag_size) {
2361 BUG_ON(rxcp->num_rcvd != 1);
2362 return;
2363 }
2364
2365 /* More frags present for this completion */
2366 remaining = rxcp->pkt_size - curr_frag_len;
2367 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2368 page_info = get_rx_page_info(rxo);
2369 curr_frag_len = min(remaining, rx_frag_size);
2370
2371 /* Coalesce all frags from the same physical page in one slot */
2372 if (page_info->page_offset == 0) {
2373 /* Fresh page */
2374 j++;
2375 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
2376 page_info->page,
2377 page_info->page_offset,
2378 curr_frag_len);
2379 skb_shinfo(skb)->nr_frags++;
2380 } else {
2381 put_page(page_info->page);
2382 skb_frag_size_add(&skb_shinfo(skb)->frags[j],
2383 curr_frag_len);
2384 }
2385
2386 skb->len += curr_frag_len;
2387 skb->data_len += curr_frag_len;
2388 skb->truesize += rx_frag_size;
2389 remaining -= curr_frag_len;
2390 page_info->page = NULL;
2391 }
2392 BUG_ON(j > MAX_SKB_FRAGS);
2393 }
2394
2395 /* Process the RX completion indicated by rxcp when GRO is disabled */
be_rx_compl_process(struct be_rx_obj * rxo,struct napi_struct * napi,struct be_rx_compl_info * rxcp)2396 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2397 struct be_rx_compl_info *rxcp)
2398 {
2399 struct be_adapter *adapter = rxo->adapter;
2400 struct net_device *netdev = adapter->netdev;
2401 struct sk_buff *skb;
2402
2403 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2404 if (unlikely(!skb)) {
2405 rx_stats(rxo)->rx_drops_no_skbs++;
2406 be_rx_compl_discard(rxo, rxcp);
2407 return;
2408 }
2409
2410 skb_fill_rx_data(rxo, skb, rxcp);
2411
2412 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2413 skb->ip_summed = CHECKSUM_UNNECESSARY;
2414 else
2415 skb_checksum_none_assert(skb);
2416
2417 skb->protocol = eth_type_trans(skb, netdev);
2418 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2419 if (netdev->features & NETIF_F_RXHASH)
2420 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2421
2422 skb->csum_level = rxcp->tunneled;
2423 skb_mark_napi_id(skb, napi);
2424
2425 if (rxcp->vlanf)
2426 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2427
2428 netif_receive_skb(skb);
2429 }
2430
2431 /* Process the RX completion indicated by rxcp when GRO is enabled */
be_rx_compl_process_gro(struct be_rx_obj * rxo,struct napi_struct * napi,struct be_rx_compl_info * rxcp)2432 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2433 struct napi_struct *napi,
2434 struct be_rx_compl_info *rxcp)
2435 {
2436 struct be_adapter *adapter = rxo->adapter;
2437 struct be_rx_page_info *page_info;
2438 struct sk_buff *skb = NULL;
2439 u16 remaining, curr_frag_len;
2440 u16 i, j;
2441
2442 skb = napi_get_frags(napi);
2443 if (!skb) {
2444 be_rx_compl_discard(rxo, rxcp);
2445 return;
2446 }
2447
2448 remaining = rxcp->pkt_size;
2449 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2450 page_info = get_rx_page_info(rxo);
2451
2452 curr_frag_len = min(remaining, rx_frag_size);
2453
2454 /* Coalesce all frags from the same physical page in one slot */
2455 if (i == 0 || page_info->page_offset == 0) {
2456 /* First frag or Fresh page */
2457 j++;
2458 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
2459 page_info->page,
2460 page_info->page_offset,
2461 curr_frag_len);
2462 } else {
2463 put_page(page_info->page);
2464 skb_frag_size_add(&skb_shinfo(skb)->frags[j],
2465 curr_frag_len);
2466 }
2467
2468 skb->truesize += rx_frag_size;
2469 remaining -= curr_frag_len;
2470 memset(page_info, 0, sizeof(*page_info));
2471 }
2472 BUG_ON(j > MAX_SKB_FRAGS);
2473
2474 skb_shinfo(skb)->nr_frags = j + 1;
2475 skb->len = rxcp->pkt_size;
2476 skb->data_len = rxcp->pkt_size;
2477 skb->ip_summed = CHECKSUM_UNNECESSARY;
2478 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2479 if (adapter->netdev->features & NETIF_F_RXHASH)
2480 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2481
2482 skb->csum_level = rxcp->tunneled;
2483
2484 if (rxcp->vlanf)
2485 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2486
2487 napi_gro_frags(napi);
2488 }
2489
be_parse_rx_compl_v1(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)2490 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2491 struct be_rx_compl_info *rxcp)
2492 {
2493 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2494 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2495 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2496 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2497 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2498 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2499 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2500 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2501 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2502 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2503 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2504 if (rxcp->vlanf) {
2505 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2506 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2507 }
2508 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2509 rxcp->tunneled =
2510 GET_RX_COMPL_V1_BITS(tunneled, compl);
2511 }
2512
be_parse_rx_compl_v0(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)2513 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2514 struct be_rx_compl_info *rxcp)
2515 {
2516 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2517 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2518 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2519 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2520 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2521 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2522 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2523 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2524 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2525 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2526 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2527 if (rxcp->vlanf) {
2528 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2529 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2530 }
2531 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2532 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2533 }
2534
be_rx_compl_get(struct be_rx_obj * rxo)2535 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2536 {
2537 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2538 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2539 struct be_adapter *adapter = rxo->adapter;
2540
2541 /* For checking the valid bit it is Ok to use either definition as the
2542 * valid bit is at the same position in both v0 and v1 Rx compl */
2543 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2544 return NULL;
2545
2546 rmb();
2547 be_dws_le_to_cpu(compl, sizeof(*compl));
2548
2549 if (adapter->be3_native)
2550 be_parse_rx_compl_v1(compl, rxcp);
2551 else
2552 be_parse_rx_compl_v0(compl, rxcp);
2553
2554 if (rxcp->ip_frag)
2555 rxcp->l4_csum = 0;
2556
2557 if (rxcp->vlanf) {
2558 /* In QNQ modes, if qnq bit is not set, then the packet was
2559 * tagged only with the transparent outer vlan-tag and must
2560 * not be treated as a vlan packet by host
2561 */
2562 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2563 rxcp->vlanf = 0;
2564
2565 if (!lancer_chip(adapter))
2566 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2567
2568 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2569 !test_bit(rxcp->vlan_tag, adapter->vids))
2570 rxcp->vlanf = 0;
2571 }
2572
2573 /* As the compl has been parsed, reset it; we wont touch it again */
2574 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2575
2576 queue_tail_inc(&rxo->cq);
2577 return rxcp;
2578 }
2579
be_alloc_pages(u32 size,gfp_t gfp)2580 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2581 {
2582 u32 order = get_order(size);
2583
2584 if (order > 0)
2585 gfp |= __GFP_COMP;
2586 return alloc_pages(gfp, order);
2587 }
2588
2589 /*
2590 * Allocate a page, split it to fragments of size rx_frag_size and post as
2591 * receive buffers to BE
2592 */
be_post_rx_frags(struct be_rx_obj * rxo,gfp_t gfp,u32 frags_needed)2593 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2594 {
2595 struct be_adapter *adapter = rxo->adapter;
2596 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2597 struct be_queue_info *rxq = &rxo->q;
2598 struct page *pagep = NULL;
2599 struct device *dev = &adapter->pdev->dev;
2600 struct be_eth_rx_d *rxd;
2601 u64 page_dmaaddr = 0, frag_dmaaddr;
2602 u32 posted, page_offset = 0, notify = 0;
2603
2604 page_info = &rxo->page_info_tbl[rxq->head];
2605 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2606 if (!pagep) {
2607 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2608 if (unlikely(!pagep)) {
2609 rx_stats(rxo)->rx_post_fail++;
2610 break;
2611 }
2612 page_dmaaddr = dma_map_page(dev, pagep, 0,
2613 adapter->big_page_size,
2614 DMA_FROM_DEVICE);
2615 if (dma_mapping_error(dev, page_dmaaddr)) {
2616 put_page(pagep);
2617 pagep = NULL;
2618 adapter->drv_stats.dma_map_errors++;
2619 break;
2620 }
2621 page_offset = 0;
2622 } else {
2623 get_page(pagep);
2624 page_offset += rx_frag_size;
2625 }
2626 page_info->page_offset = page_offset;
2627 page_info->page = pagep;
2628
2629 rxd = queue_head_node(rxq);
2630 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2631 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2632 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2633
2634 /* Any space left in the current big page for another frag? */
2635 if ((page_offset + rx_frag_size + rx_frag_size) >
2636 adapter->big_page_size) {
2637 pagep = NULL;
2638 page_info->last_frag = true;
2639 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2640 } else {
2641 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2642 }
2643
2644 prev_page_info = page_info;
2645 queue_head_inc(rxq);
2646 page_info = &rxo->page_info_tbl[rxq->head];
2647 }
2648
2649 /* Mark the last frag of a page when we break out of the above loop
2650 * with no more slots available in the RXQ
2651 */
2652 if (pagep) {
2653 prev_page_info->last_frag = true;
2654 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2655 }
2656
2657 if (posted) {
2658 atomic_add(posted, &rxq->used);
2659 if (rxo->rx_post_starved)
2660 rxo->rx_post_starved = false;
2661 do {
2662 notify = min(MAX_NUM_POST_ERX_DB, posted);
2663 be_rxq_notify(adapter, rxq->id, notify);
2664 posted -= notify;
2665 } while (posted);
2666 } else if (atomic_read(&rxq->used) == 0) {
2667 /* Let be_worker replenish when memory is available */
2668 rxo->rx_post_starved = true;
2669 }
2670 }
2671
be_update_tx_err(struct be_tx_obj * txo,u8 status)2672 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2673 {
2674 switch (status) {
2675 case BE_TX_COMP_HDR_PARSE_ERR:
2676 tx_stats(txo)->tx_hdr_parse_err++;
2677 break;
2678 case BE_TX_COMP_NDMA_ERR:
2679 tx_stats(txo)->tx_dma_err++;
2680 break;
2681 case BE_TX_COMP_ACL_ERR:
2682 tx_stats(txo)->tx_spoof_check_err++;
2683 break;
2684 }
2685 }
2686
lancer_update_tx_err(struct be_tx_obj * txo,u8 status)2687 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2688 {
2689 switch (status) {
2690 case LANCER_TX_COMP_LSO_ERR:
2691 tx_stats(txo)->tx_tso_err++;
2692 break;
2693 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2694 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2695 tx_stats(txo)->tx_spoof_check_err++;
2696 break;
2697 case LANCER_TX_COMP_QINQ_ERR:
2698 tx_stats(txo)->tx_qinq_err++;
2699 break;
2700 case LANCER_TX_COMP_PARITY_ERR:
2701 tx_stats(txo)->tx_internal_parity_err++;
2702 break;
2703 case LANCER_TX_COMP_DMA_ERR:
2704 tx_stats(txo)->tx_dma_err++;
2705 break;
2706 case LANCER_TX_COMP_SGE_ERR:
2707 tx_stats(txo)->tx_sge_err++;
2708 break;
2709 }
2710 }
2711
be_tx_compl_get(struct be_adapter * adapter,struct be_tx_obj * txo)2712 static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2713 struct be_tx_obj *txo)
2714 {
2715 struct be_queue_info *tx_cq = &txo->cq;
2716 struct be_tx_compl_info *txcp = &txo->txcp;
2717 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2718
2719 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2720 return NULL;
2721
2722 /* Ensure load ordering of valid bit dword and other dwords below */
2723 rmb();
2724 be_dws_le_to_cpu(compl, sizeof(*compl));
2725
2726 txcp->status = GET_TX_COMPL_BITS(status, compl);
2727 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2728
2729 if (txcp->status) {
2730 if (lancer_chip(adapter)) {
2731 lancer_update_tx_err(txo, txcp->status);
2732 /* Reset the adapter incase of TSO,
2733 * SGE or Parity error
2734 */
2735 if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2736 txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2737 txcp->status == LANCER_TX_COMP_SGE_ERR)
2738 be_set_error(adapter, BE_ERROR_TX);
2739 } else {
2740 be_update_tx_err(txo, txcp->status);
2741 }
2742 }
2743
2744 if (be_check_error(adapter, BE_ERROR_TX))
2745 return NULL;
2746
2747 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2748 queue_tail_inc(tx_cq);
2749 return txcp;
2750 }
2751
be_tx_compl_process(struct be_adapter * adapter,struct be_tx_obj * txo,u16 last_index)2752 static u16 be_tx_compl_process(struct be_adapter *adapter,
2753 struct be_tx_obj *txo, u16 last_index)
2754 {
2755 struct sk_buff **sent_skbs = txo->sent_skb_list;
2756 struct be_queue_info *txq = &txo->q;
2757 struct sk_buff *skb = NULL;
2758 bool unmap_skb_hdr = false;
2759 struct be_eth_wrb *wrb;
2760 u16 num_wrbs = 0;
2761 u32 frag_index;
2762
2763 do {
2764 if (sent_skbs[txq->tail]) {
2765 /* Free skb from prev req */
2766 if (skb)
2767 dev_consume_skb_any(skb);
2768 skb = sent_skbs[txq->tail];
2769 sent_skbs[txq->tail] = NULL;
2770 queue_tail_inc(txq); /* skip hdr wrb */
2771 num_wrbs++;
2772 unmap_skb_hdr = true;
2773 }
2774 wrb = queue_tail_node(txq);
2775 frag_index = txq->tail;
2776 unmap_tx_frag(&adapter->pdev->dev, wrb,
2777 (unmap_skb_hdr && skb_headlen(skb)));
2778 unmap_skb_hdr = false;
2779 queue_tail_inc(txq);
2780 num_wrbs++;
2781 } while (frag_index != last_index);
2782 dev_consume_skb_any(skb);
2783
2784 return num_wrbs;
2785 }
2786
2787 /* Return the number of events in the event queue */
events_get(struct be_eq_obj * eqo)2788 static inline int events_get(struct be_eq_obj *eqo)
2789 {
2790 struct be_eq_entry *eqe;
2791 int num = 0;
2792
2793 do {
2794 eqe = queue_tail_node(&eqo->q);
2795 if (eqe->evt == 0)
2796 break;
2797
2798 rmb();
2799 eqe->evt = 0;
2800 num++;
2801 queue_tail_inc(&eqo->q);
2802 } while (true);
2803
2804 return num;
2805 }
2806
2807 /* Leaves the EQ is disarmed state */
be_eq_clean(struct be_eq_obj * eqo)2808 static void be_eq_clean(struct be_eq_obj *eqo)
2809 {
2810 int num = events_get(eqo);
2811
2812 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2813 }
2814
2815 /* Free posted rx buffers that were not used */
be_rxq_clean(struct be_rx_obj * rxo)2816 static void be_rxq_clean(struct be_rx_obj *rxo)
2817 {
2818 struct be_queue_info *rxq = &rxo->q;
2819 struct be_rx_page_info *page_info;
2820
2821 while (atomic_read(&rxq->used) > 0) {
2822 page_info = get_rx_page_info(rxo);
2823 put_page(page_info->page);
2824 memset(page_info, 0, sizeof(*page_info));
2825 }
2826 BUG_ON(atomic_read(&rxq->used));
2827 rxq->tail = 0;
2828 rxq->head = 0;
2829 }
2830
be_rx_cq_clean(struct be_rx_obj * rxo)2831 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2832 {
2833 struct be_queue_info *rx_cq = &rxo->cq;
2834 struct be_rx_compl_info *rxcp;
2835 struct be_adapter *adapter = rxo->adapter;
2836 int flush_wait = 0;
2837
2838 /* Consume pending rx completions.
2839 * Wait for the flush completion (identified by zero num_rcvd)
2840 * to arrive. Notify CQ even when there are no more CQ entries
2841 * for HW to flush partially coalesced CQ entries.
2842 * In Lancer, there is no need to wait for flush compl.
2843 */
2844 for (;;) {
2845 rxcp = be_rx_compl_get(rxo);
2846 if (!rxcp) {
2847 if (lancer_chip(adapter))
2848 break;
2849
2850 if (flush_wait++ > 50 ||
2851 be_check_error(adapter,
2852 BE_ERROR_HW)) {
2853 dev_warn(&adapter->pdev->dev,
2854 "did not receive flush compl\n");
2855 break;
2856 }
2857 be_cq_notify(adapter, rx_cq->id, true, 0);
2858 mdelay(1);
2859 } else {
2860 be_rx_compl_discard(rxo, rxcp);
2861 be_cq_notify(adapter, rx_cq->id, false, 1);
2862 if (rxcp->num_rcvd == 0)
2863 break;
2864 }
2865 }
2866
2867 /* After cleanup, leave the CQ in unarmed state */
2868 be_cq_notify(adapter, rx_cq->id, false, 0);
2869 }
2870
be_tx_compl_clean(struct be_adapter * adapter)2871 static void be_tx_compl_clean(struct be_adapter *adapter)
2872 {
2873 struct device *dev = &adapter->pdev->dev;
2874 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
2875 struct be_tx_compl_info *txcp;
2876 struct be_queue_info *txq;
2877 u32 end_idx, notified_idx;
2878 struct be_tx_obj *txo;
2879 int i, pending_txqs;
2880
2881 /* Stop polling for compls when HW has been silent for 10ms */
2882 do {
2883 pending_txqs = adapter->num_tx_qs;
2884
2885 for_all_tx_queues(adapter, txo, i) {
2886 cmpl = 0;
2887 num_wrbs = 0;
2888 txq = &txo->q;
2889 while ((txcp = be_tx_compl_get(adapter, txo))) {
2890 num_wrbs +=
2891 be_tx_compl_process(adapter, txo,
2892 txcp->end_index);
2893 cmpl++;
2894 }
2895 if (cmpl) {
2896 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2897 atomic_sub(num_wrbs, &txq->used);
2898 timeo = 0;
2899 }
2900 if (!be_is_tx_compl_pending(txo))
2901 pending_txqs--;
2902 }
2903
2904 if (pending_txqs == 0 || ++timeo > 10 ||
2905 be_check_error(adapter, BE_ERROR_HW))
2906 break;
2907
2908 mdelay(1);
2909 } while (true);
2910
2911 /* Free enqueued TX that was never notified to HW */
2912 for_all_tx_queues(adapter, txo, i) {
2913 txq = &txo->q;
2914
2915 if (atomic_read(&txq->used)) {
2916 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2917 i, atomic_read(&txq->used));
2918 notified_idx = txq->tail;
2919 end_idx = txq->tail;
2920 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2921 txq->len);
2922 /* Use the tx-compl process logic to handle requests
2923 * that were not sent to the HW.
2924 */
2925 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2926 atomic_sub(num_wrbs, &txq->used);
2927 BUG_ON(atomic_read(&txq->used));
2928 txo->pend_wrb_cnt = 0;
2929 /* Since hw was never notified of these requests,
2930 * reset TXQ indices
2931 */
2932 txq->head = notified_idx;
2933 txq->tail = notified_idx;
2934 }
2935 }
2936 }
2937
be_evt_queues_destroy(struct be_adapter * adapter)2938 static void be_evt_queues_destroy(struct be_adapter *adapter)
2939 {
2940 struct be_eq_obj *eqo;
2941 int i;
2942
2943 for_all_evt_queues(adapter, eqo, i) {
2944 if (eqo->q.created) {
2945 be_eq_clean(eqo);
2946 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2947 netif_napi_del(&eqo->napi);
2948 free_cpumask_var(eqo->affinity_mask);
2949 }
2950 be_queue_free(adapter, &eqo->q);
2951 }
2952 }
2953
be_evt_queues_create(struct be_adapter * adapter)2954 static int be_evt_queues_create(struct be_adapter *adapter)
2955 {
2956 struct be_queue_info *eq;
2957 struct be_eq_obj *eqo;
2958 struct be_aic_obj *aic;
2959 int i, rc;
2960
2961 /* need enough EQs to service both RX and TX queues */
2962 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2963 max(adapter->cfg_num_rx_irqs,
2964 adapter->cfg_num_tx_irqs));
2965
2966 adapter->aic_enabled = true;
2967
2968 for_all_evt_queues(adapter, eqo, i) {
2969 int numa_node = dev_to_node(&adapter->pdev->dev);
2970
2971 aic = &adapter->aic_obj[i];
2972 eqo->adapter = adapter;
2973 eqo->idx = i;
2974 aic->max_eqd = BE_MAX_EQD;
2975
2976 eq = &eqo->q;
2977 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2978 sizeof(struct be_eq_entry));
2979 if (rc)
2980 return rc;
2981
2982 rc = be_cmd_eq_create(adapter, eqo);
2983 if (rc)
2984 return rc;
2985
2986 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2987 return -ENOMEM;
2988 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2989 eqo->affinity_mask);
2990 netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
2991 }
2992 return 0;
2993 }
2994
be_mcc_queues_destroy(struct be_adapter * adapter)2995 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2996 {
2997 struct be_queue_info *q;
2998
2999 q = &adapter->mcc_obj.q;
3000 if (q->created)
3001 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
3002 be_queue_free(adapter, q);
3003
3004 q = &adapter->mcc_obj.cq;
3005 if (q->created)
3006 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3007 be_queue_free(adapter, q);
3008 }
3009
3010 /* Must be called only after TX qs are created as MCC shares TX EQ */
be_mcc_queues_create(struct be_adapter * adapter)3011 static int be_mcc_queues_create(struct be_adapter *adapter)
3012 {
3013 struct be_queue_info *q, *cq;
3014
3015 cq = &adapter->mcc_obj.cq;
3016 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
3017 sizeof(struct be_mcc_compl)))
3018 goto err;
3019
3020 /* Use the default EQ for MCC completions */
3021 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
3022 goto mcc_cq_free;
3023
3024 q = &adapter->mcc_obj.q;
3025 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3026 goto mcc_cq_destroy;
3027
3028 if (be_cmd_mccq_create(adapter, q, cq))
3029 goto mcc_q_free;
3030
3031 return 0;
3032
3033 mcc_q_free:
3034 be_queue_free(adapter, q);
3035 mcc_cq_destroy:
3036 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
3037 mcc_cq_free:
3038 be_queue_free(adapter, cq);
3039 err:
3040 return -1;
3041 }
3042
be_tx_queues_destroy(struct be_adapter * adapter)3043 static void be_tx_queues_destroy(struct be_adapter *adapter)
3044 {
3045 struct be_queue_info *q;
3046 struct be_tx_obj *txo;
3047 u8 i;
3048
3049 for_all_tx_queues(adapter, txo, i) {
3050 q = &txo->q;
3051 if (q->created)
3052 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
3053 be_queue_free(adapter, q);
3054
3055 q = &txo->cq;
3056 if (q->created)
3057 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3058 be_queue_free(adapter, q);
3059 }
3060 }
3061
be_tx_qs_create(struct be_adapter * adapter)3062 static int be_tx_qs_create(struct be_adapter *adapter)
3063 {
3064 struct be_queue_info *cq;
3065 struct be_tx_obj *txo;
3066 struct be_eq_obj *eqo;
3067 int status, i;
3068
3069 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
3070
3071 for_all_tx_queues(adapter, txo, i) {
3072 cq = &txo->cq;
3073 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
3074 sizeof(struct be_eth_tx_compl));
3075 if (status)
3076 return status;
3077
3078 u64_stats_init(&txo->stats.sync);
3079 u64_stats_init(&txo->stats.sync_compl);
3080
3081 /* If num_evt_qs is less than num_tx_qs, then more than
3082 * one txq share an eq
3083 */
3084 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
3085 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
3086 if (status)
3087 return status;
3088
3089 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3090 sizeof(struct be_eth_wrb));
3091 if (status)
3092 return status;
3093
3094 status = be_cmd_txq_create(adapter, txo);
3095 if (status)
3096 return status;
3097
3098 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3099 eqo->idx);
3100 }
3101
3102 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3103 adapter->num_tx_qs);
3104 return 0;
3105 }
3106
be_rx_cqs_destroy(struct be_adapter * adapter)3107 static void be_rx_cqs_destroy(struct be_adapter *adapter)
3108 {
3109 struct be_queue_info *q;
3110 struct be_rx_obj *rxo;
3111 int i;
3112
3113 for_all_rx_queues(adapter, rxo, i) {
3114 q = &rxo->cq;
3115 if (q->created)
3116 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3117 be_queue_free(adapter, q);
3118 }
3119 }
3120
be_rx_cqs_create(struct be_adapter * adapter)3121 static int be_rx_cqs_create(struct be_adapter *adapter)
3122 {
3123 struct be_queue_info *eq, *cq;
3124 struct be_rx_obj *rxo;
3125 int rc, i;
3126
3127 adapter->num_rss_qs =
3128 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
3129
3130 /* We'll use RSS only if atleast 2 RSS rings are supported. */
3131 if (adapter->num_rss_qs < 2)
3132 adapter->num_rss_qs = 0;
3133
3134 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3135
3136 /* When the interface is not capable of RSS rings (and there is no
3137 * need to create a default RXQ) we'll still need one RXQ
3138 */
3139 if (adapter->num_rx_qs == 0)
3140 adapter->num_rx_qs = 1;
3141
3142 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3143 for_all_rx_queues(adapter, rxo, i) {
3144 rxo->adapter = adapter;
3145 cq = &rxo->cq;
3146 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
3147 sizeof(struct be_eth_rx_compl));
3148 if (rc)
3149 return rc;
3150
3151 u64_stats_init(&rxo->stats.sync);
3152 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3153 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3154 if (rc)
3155 return rc;
3156 }
3157
3158 dev_info(&adapter->pdev->dev,
3159 "created %d RX queue(s)\n", adapter->num_rx_qs);
3160 return 0;
3161 }
3162
be_intx(int irq,void * dev)3163 static irqreturn_t be_intx(int irq, void *dev)
3164 {
3165 struct be_eq_obj *eqo = dev;
3166 struct be_adapter *adapter = eqo->adapter;
3167 int num_evts = 0;
3168
3169 /* IRQ is not expected when NAPI is scheduled as the EQ
3170 * will not be armed.
3171 * But, this can happen on Lancer INTx where it takes
3172 * a while to de-assert INTx or in BE2 where occasionaly
3173 * an interrupt may be raised even when EQ is unarmed.
3174 * If NAPI is already scheduled, then counting & notifying
3175 * events will orphan them.
3176 */
3177 if (napi_schedule_prep(&eqo->napi)) {
3178 num_evts = events_get(eqo);
3179 __napi_schedule(&eqo->napi);
3180 if (num_evts)
3181 eqo->spurious_intr = 0;
3182 }
3183 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
3184
3185 /* Return IRQ_HANDLED only for the first spurious intr
3186 * after a valid intr to stop the kernel from branding
3187 * this irq as a bad one!
3188 */
3189 if (num_evts || eqo->spurious_intr++ == 0)
3190 return IRQ_HANDLED;
3191 else
3192 return IRQ_NONE;
3193 }
3194
be_msix(int irq,void * dev)3195 static irqreturn_t be_msix(int irq, void *dev)
3196 {
3197 struct be_eq_obj *eqo = dev;
3198
3199 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
3200 napi_schedule(&eqo->napi);
3201 return IRQ_HANDLED;
3202 }
3203
do_gro(struct be_rx_compl_info * rxcp)3204 static inline bool do_gro(struct be_rx_compl_info *rxcp)
3205 {
3206 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
3207 }
3208
be_process_rx(struct be_rx_obj * rxo,struct napi_struct * napi,int budget)3209 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
3210 int budget)
3211 {
3212 struct be_adapter *adapter = rxo->adapter;
3213 struct be_queue_info *rx_cq = &rxo->cq;
3214 struct be_rx_compl_info *rxcp;
3215 u32 work_done;
3216 u32 frags_consumed = 0;
3217
3218 for (work_done = 0; work_done < budget; work_done++) {
3219 rxcp = be_rx_compl_get(rxo);
3220 if (!rxcp)
3221 break;
3222
3223 /* Is it a flush compl that has no data */
3224 if (unlikely(rxcp->num_rcvd == 0))
3225 goto loop_continue;
3226
3227 /* Discard compl with partial DMA Lancer B0 */
3228 if (unlikely(!rxcp->pkt_size)) {
3229 be_rx_compl_discard(rxo, rxcp);
3230 goto loop_continue;
3231 }
3232
3233 /* On BE drop pkts that arrive due to imperfect filtering in
3234 * promiscuous mode on some skews
3235 */
3236 if (unlikely(rxcp->port != adapter->port_num &&
3237 !lancer_chip(adapter))) {
3238 be_rx_compl_discard(rxo, rxcp);
3239 goto loop_continue;
3240 }
3241
3242 if (do_gro(rxcp))
3243 be_rx_compl_process_gro(rxo, napi, rxcp);
3244 else
3245 be_rx_compl_process(rxo, napi, rxcp);
3246
3247 loop_continue:
3248 frags_consumed += rxcp->num_rcvd;
3249 be_rx_stats_update(rxo, rxcp);
3250 }
3251
3252 if (work_done) {
3253 be_cq_notify(adapter, rx_cq->id, true, work_done);
3254
3255 /* When an rx-obj gets into post_starved state, just
3256 * let be_worker do the posting.
3257 */
3258 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3259 !rxo->rx_post_starved)
3260 be_post_rx_frags(rxo, GFP_ATOMIC,
3261 max_t(u32, MAX_RX_POST,
3262 frags_consumed));
3263 }
3264
3265 return work_done;
3266 }
3267
3268
be_process_tx(struct be_adapter * adapter,struct be_tx_obj * txo,int idx)3269 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3270 int idx)
3271 {
3272 int num_wrbs = 0, work_done = 0;
3273 struct be_tx_compl_info *txcp;
3274
3275 while ((txcp = be_tx_compl_get(adapter, txo))) {
3276 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
3277 work_done++;
3278 }
3279
3280 if (work_done) {
3281 be_cq_notify(adapter, txo->cq.id, true, work_done);
3282 atomic_sub(num_wrbs, &txo->q.used);
3283
3284 /* As Tx wrbs have been freed up, wake up netdev queue
3285 * if it was stopped due to lack of tx wrbs. */
3286 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
3287 be_can_txq_wake(txo)) {
3288 netif_wake_subqueue(adapter->netdev, idx);
3289 }
3290
3291 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3292 tx_stats(txo)->tx_compl += work_done;
3293 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3294 }
3295 }
3296
be_poll(struct napi_struct * napi,int budget)3297 int be_poll(struct napi_struct *napi, int budget)
3298 {
3299 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3300 struct be_adapter *adapter = eqo->adapter;
3301 int max_work = 0, work, i, num_evts;
3302 struct be_rx_obj *rxo;
3303 struct be_tx_obj *txo;
3304 u32 mult_enc = 0;
3305
3306 num_evts = events_get(eqo);
3307
3308 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3309 be_process_tx(adapter, txo, i);
3310
3311 /* This loop will iterate twice for EQ0 in which
3312 * completions of the last RXQ (default one) are also processed
3313 * For other EQs the loop iterates only once
3314 */
3315 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3316 work = be_process_rx(rxo, napi, budget);
3317 max_work = max(work, max_work);
3318 }
3319
3320 if (is_mcc_eqo(eqo))
3321 be_process_mcc(adapter);
3322
3323 if (max_work < budget) {
3324 napi_complete_done(napi, max_work);
3325
3326 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3327 * delay via a delay multiplier encoding value
3328 */
3329 if (skyhawk_chip(adapter))
3330 mult_enc = be_get_eq_delay_mult_enc(eqo);
3331
3332 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3333 mult_enc);
3334 } else {
3335 /* As we'll continue in polling mode, count and clear events */
3336 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3337 }
3338 return max_work;
3339 }
3340
be_detect_error(struct be_adapter * adapter)3341 void be_detect_error(struct be_adapter *adapter)
3342 {
3343 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3344 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3345 struct device *dev = &adapter->pdev->dev;
3346 u16 val;
3347 u32 i;
3348
3349 if (be_check_error(adapter, BE_ERROR_HW))
3350 return;
3351
3352 if (lancer_chip(adapter)) {
3353 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3354 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3355 be_set_error(adapter, BE_ERROR_UE);
3356 sliport_err1 = ioread32(adapter->db +
3357 SLIPORT_ERROR1_OFFSET);
3358 sliport_err2 = ioread32(adapter->db +
3359 SLIPORT_ERROR2_OFFSET);
3360 /* Do not log error messages if its a FW reset */
3361 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3362 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3363 dev_info(dev, "Reset is in progress\n");
3364 } else {
3365 dev_err(dev, "Error detected in the card\n");
3366 dev_err(dev, "ERR: sliport status 0x%x\n",
3367 sliport_status);
3368 dev_err(dev, "ERR: sliport error1 0x%x\n",
3369 sliport_err1);
3370 dev_err(dev, "ERR: sliport error2 0x%x\n",
3371 sliport_err2);
3372 }
3373 }
3374 } else {
3375 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3376 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3377 ue_lo_mask = ioread32(adapter->pcicfg +
3378 PCICFG_UE_STATUS_LOW_MASK);
3379 ue_hi_mask = ioread32(adapter->pcicfg +
3380 PCICFG_UE_STATUS_HI_MASK);
3381
3382 ue_lo = (ue_lo & ~ue_lo_mask);
3383 ue_hi = (ue_hi & ~ue_hi_mask);
3384
3385 if (ue_lo || ue_hi) {
3386 /* On certain platforms BE3 hardware can indicate
3387 * spurious UEs. In case of a UE in the chip,
3388 * the POST register correctly reports either a
3389 * FAT_LOG_START state (FW is currently dumping
3390 * FAT log data) or a ARMFW_UE state. Check for the
3391 * above states to ascertain if the UE is valid or not.
3392 */
3393 if (BE3_chip(adapter)) {
3394 val = be_POST_stage_get(adapter);
3395 if ((val & POST_STAGE_FAT_LOG_START)
3396 != POST_STAGE_FAT_LOG_START &&
3397 (val & POST_STAGE_ARMFW_UE)
3398 != POST_STAGE_ARMFW_UE &&
3399 (val & POST_STAGE_RECOVERABLE_ERR)
3400 != POST_STAGE_RECOVERABLE_ERR)
3401 return;
3402 }
3403
3404 dev_err(dev, "Error detected in the adapter");
3405 be_set_error(adapter, BE_ERROR_UE);
3406
3407 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3408 if (ue_lo & 1)
3409 dev_err(dev, "UE: %s bit set\n",
3410 ue_status_low_desc[i]);
3411 }
3412 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3413 if (ue_hi & 1)
3414 dev_err(dev, "UE: %s bit set\n",
3415 ue_status_hi_desc[i]);
3416 }
3417 }
3418 }
3419 }
3420
be_msix_disable(struct be_adapter * adapter)3421 static void be_msix_disable(struct be_adapter *adapter)
3422 {
3423 if (msix_enabled(adapter)) {
3424 pci_disable_msix(adapter->pdev);
3425 adapter->num_msix_vec = 0;
3426 adapter->num_msix_roce_vec = 0;
3427 }
3428 }
3429
be_msix_enable(struct be_adapter * adapter)3430 static int be_msix_enable(struct be_adapter *adapter)
3431 {
3432 unsigned int i, max_roce_eqs;
3433 struct device *dev = &adapter->pdev->dev;
3434 int num_vec;
3435
3436 /* If RoCE is supported, program the max number of vectors that
3437 * could be used for NIC and RoCE, else, just program the number
3438 * we'll use initially.
3439 */
3440 if (be_roce_supported(adapter)) {
3441 max_roce_eqs =
3442 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3443 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3444 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3445 } else {
3446 num_vec = max(adapter->cfg_num_rx_irqs,
3447 adapter->cfg_num_tx_irqs);
3448 }
3449
3450 for (i = 0; i < num_vec; i++)
3451 adapter->msix_entries[i].entry = i;
3452
3453 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3454 MIN_MSIX_VECTORS, num_vec);
3455 if (num_vec < 0)
3456 goto fail;
3457
3458 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3459 adapter->num_msix_roce_vec = num_vec / 2;
3460 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3461 adapter->num_msix_roce_vec);
3462 }
3463
3464 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3465
3466 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3467 adapter->num_msix_vec);
3468 return 0;
3469
3470 fail:
3471 dev_warn(dev, "MSIx enable failed\n");
3472
3473 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3474 if (be_virtfn(adapter))
3475 return num_vec;
3476 return 0;
3477 }
3478
be_msix_vec_get(struct be_adapter * adapter,struct be_eq_obj * eqo)3479 static inline int be_msix_vec_get(struct be_adapter *adapter,
3480 struct be_eq_obj *eqo)
3481 {
3482 return adapter->msix_entries[eqo->msix_idx].vector;
3483 }
3484
be_msix_register(struct be_adapter * adapter)3485 static int be_msix_register(struct be_adapter *adapter)
3486 {
3487 struct net_device *netdev = adapter->netdev;
3488 struct be_eq_obj *eqo;
3489 int status, i, vec;
3490
3491 for_all_evt_queues(adapter, eqo, i) {
3492 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3493 vec = be_msix_vec_get(adapter, eqo);
3494 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3495 if (status)
3496 goto err_msix;
3497
3498 irq_update_affinity_hint(vec, eqo->affinity_mask);
3499 }
3500
3501 return 0;
3502 err_msix:
3503 for (i--; i >= 0; i--) {
3504 eqo = &adapter->eq_obj[i];
3505 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3506 }
3507 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3508 status);
3509 be_msix_disable(adapter);
3510 return status;
3511 }
3512
be_irq_register(struct be_adapter * adapter)3513 static int be_irq_register(struct be_adapter *adapter)
3514 {
3515 struct net_device *netdev = adapter->netdev;
3516 int status;
3517
3518 if (msix_enabled(adapter)) {
3519 status = be_msix_register(adapter);
3520 if (status == 0)
3521 goto done;
3522 /* INTx is not supported for VF */
3523 if (be_virtfn(adapter))
3524 return status;
3525 }
3526
3527 /* INTx: only the first EQ is used */
3528 netdev->irq = adapter->pdev->irq;
3529 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3530 &adapter->eq_obj[0]);
3531 if (status) {
3532 dev_err(&adapter->pdev->dev,
3533 "INTx request IRQ failed - err %d\n", status);
3534 return status;
3535 }
3536 done:
3537 adapter->isr_registered = true;
3538 return 0;
3539 }
3540
be_irq_unregister(struct be_adapter * adapter)3541 static void be_irq_unregister(struct be_adapter *adapter)
3542 {
3543 struct net_device *netdev = adapter->netdev;
3544 struct be_eq_obj *eqo;
3545 int i, vec;
3546
3547 if (!adapter->isr_registered)
3548 return;
3549
3550 /* INTx */
3551 if (!msix_enabled(adapter)) {
3552 free_irq(netdev->irq, &adapter->eq_obj[0]);
3553 goto done;
3554 }
3555
3556 /* MSIx */
3557 for_all_evt_queues(adapter, eqo, i) {
3558 vec = be_msix_vec_get(adapter, eqo);
3559 irq_update_affinity_hint(vec, NULL);
3560 free_irq(vec, eqo);
3561 }
3562
3563 done:
3564 adapter->isr_registered = false;
3565 }
3566
be_rx_qs_destroy(struct be_adapter * adapter)3567 static void be_rx_qs_destroy(struct be_adapter *adapter)
3568 {
3569 struct rss_info *rss = &adapter->rss_info;
3570 struct be_queue_info *q;
3571 struct be_rx_obj *rxo;
3572 int i;
3573
3574 for_all_rx_queues(adapter, rxo, i) {
3575 q = &rxo->q;
3576 if (q->created) {
3577 /* If RXQs are destroyed while in an "out of buffer"
3578 * state, there is a possibility of an HW stall on
3579 * Lancer. So, post 64 buffers to each queue to relieve
3580 * the "out of buffer" condition.
3581 * Make sure there's space in the RXQ before posting.
3582 */
3583 if (lancer_chip(adapter)) {
3584 be_rx_cq_clean(rxo);
3585 if (atomic_read(&q->used) == 0)
3586 be_post_rx_frags(rxo, GFP_KERNEL,
3587 MAX_RX_POST);
3588 }
3589
3590 be_cmd_rxq_destroy(adapter, q);
3591 be_rx_cq_clean(rxo);
3592 be_rxq_clean(rxo);
3593 }
3594 be_queue_free(adapter, q);
3595 }
3596
3597 if (rss->rss_flags) {
3598 rss->rss_flags = RSS_ENABLE_NONE;
3599 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3600 128, rss->rss_hkey);
3601 }
3602 }
3603
be_disable_if_filters(struct be_adapter * adapter)3604 static void be_disable_if_filters(struct be_adapter *adapter)
3605 {
3606 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3607 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3608 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3609 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3610 eth_zero_addr(adapter->dev_mac);
3611 }
3612
3613 be_clear_uc_list(adapter);
3614 be_clear_mc_list(adapter);
3615
3616 /* The IFACE flags are enabled in the open path and cleared
3617 * in the close path. When a VF gets detached from the host and
3618 * assigned to a VM the following happens:
3619 * - VF's IFACE flags get cleared in the detach path
3620 * - IFACE create is issued by the VF in the attach path
3621 * Due to a bug in the BE3/Skyhawk-R FW
3622 * (Lancer FW doesn't have the bug), the IFACE capability flags
3623 * specified along with the IFACE create cmd issued by a VF are not
3624 * honoured by FW. As a consequence, if a *new* driver
3625 * (that enables/disables IFACE flags in open/close)
3626 * is loaded in the host and an *old* driver is * used by a VM/VF,
3627 * the IFACE gets created *without* the needed flags.
3628 * To avoid this, disable RX-filter flags only for Lancer.
3629 */
3630 if (lancer_chip(adapter)) {
3631 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3632 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3633 }
3634 }
3635
be_close(struct net_device * netdev)3636 static int be_close(struct net_device *netdev)
3637 {
3638 struct be_adapter *adapter = netdev_priv(netdev);
3639 struct be_eq_obj *eqo;
3640 int i;
3641
3642 /* This protection is needed as be_close() may be called even when the
3643 * adapter is in cleared state (after eeh perm failure)
3644 */
3645 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3646 return 0;
3647
3648 /* Before attempting cleanup ensure all the pending cmds in the
3649 * config_wq have finished execution
3650 */
3651 flush_workqueue(be_wq);
3652
3653 be_disable_if_filters(adapter);
3654
3655 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3656 for_all_evt_queues(adapter, eqo, i) {
3657 napi_disable(&eqo->napi);
3658 }
3659 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3660 }
3661
3662 be_async_mcc_disable(adapter);
3663
3664 /* Wait for all pending tx completions to arrive so that
3665 * all tx skbs are freed.
3666 */
3667 netif_tx_disable(netdev);
3668 be_tx_compl_clean(adapter);
3669
3670 be_rx_qs_destroy(adapter);
3671
3672 for_all_evt_queues(adapter, eqo, i) {
3673 if (msix_enabled(adapter))
3674 synchronize_irq(be_msix_vec_get(adapter, eqo));
3675 else
3676 synchronize_irq(netdev->irq);
3677 be_eq_clean(eqo);
3678 }
3679
3680 be_irq_unregister(adapter);
3681
3682 return 0;
3683 }
3684
be_rx_qs_create(struct be_adapter * adapter)3685 static int be_rx_qs_create(struct be_adapter *adapter)
3686 {
3687 struct rss_info *rss = &adapter->rss_info;
3688 u8 rss_key[RSS_HASH_KEY_LEN];
3689 struct be_rx_obj *rxo;
3690 int rc, i, j;
3691
3692 for_all_rx_queues(adapter, rxo, i) {
3693 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3694 sizeof(struct be_eth_rx_d));
3695 if (rc)
3696 return rc;
3697 }
3698
3699 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3700 rxo = default_rxo(adapter);
3701 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3702 rx_frag_size, adapter->if_handle,
3703 false, &rxo->rss_id);
3704 if (rc)
3705 return rc;
3706 }
3707
3708 for_all_rss_queues(adapter, rxo, i) {
3709 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3710 rx_frag_size, adapter->if_handle,
3711 true, &rxo->rss_id);
3712 if (rc)
3713 return rc;
3714 }
3715
3716 if (be_multi_rxq(adapter)) {
3717 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3718 for_all_rss_queues(adapter, rxo, i) {
3719 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3720 break;
3721 rss->rsstable[j + i] = rxo->rss_id;
3722 rss->rss_queue[j + i] = i;
3723 }
3724 }
3725 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3726 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3727
3728 if (!BEx_chip(adapter))
3729 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3730 RSS_ENABLE_UDP_IPV6;
3731
3732 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3733 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3734 RSS_INDIR_TABLE_LEN, rss_key);
3735 if (rc) {
3736 rss->rss_flags = RSS_ENABLE_NONE;
3737 return rc;
3738 }
3739
3740 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3741 } else {
3742 /* Disable RSS, if only default RX Q is created */
3743 rss->rss_flags = RSS_ENABLE_NONE;
3744 }
3745
3746
3747 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3748 * which is a queue empty condition
3749 */
3750 for_all_rx_queues(adapter, rxo, i)
3751 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3752
3753 return 0;
3754 }
3755
be_enable_if_filters(struct be_adapter * adapter)3756 static int be_enable_if_filters(struct be_adapter *adapter)
3757 {
3758 int status;
3759
3760 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3761 if (status)
3762 return status;
3763
3764 /* Normally this condition usually true as the ->dev_mac is zeroed.
3765 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3766 * subsequent be_dev_mac_add() can fail (after fresh boot)
3767 */
3768 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3769 int old_pmac_id = -1;
3770
3771 /* Remember old programmed MAC if any - can happen on BE3 VF */
3772 if (!is_zero_ether_addr(adapter->dev_mac))
3773 old_pmac_id = adapter->pmac_id[0];
3774
3775 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3776 if (status)
3777 return status;
3778
3779 /* Delete the old programmed MAC as we successfully programmed
3780 * a new MAC
3781 */
3782 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3783 be_dev_mac_del(adapter, old_pmac_id);
3784
3785 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3786 }
3787
3788 if (adapter->vlans_added)
3789 be_vid_config(adapter);
3790
3791 __be_set_rx_mode(adapter);
3792
3793 return 0;
3794 }
3795
be_open(struct net_device * netdev)3796 static int be_open(struct net_device *netdev)
3797 {
3798 struct be_adapter *adapter = netdev_priv(netdev);
3799 struct be_eq_obj *eqo;
3800 struct be_rx_obj *rxo;
3801 struct be_tx_obj *txo;
3802 u8 link_status;
3803 int status, i;
3804
3805 status = be_rx_qs_create(adapter);
3806 if (status)
3807 goto err;
3808
3809 status = be_enable_if_filters(adapter);
3810 if (status)
3811 goto err;
3812
3813 status = be_irq_register(adapter);
3814 if (status)
3815 goto err;
3816
3817 for_all_rx_queues(adapter, rxo, i)
3818 be_cq_notify(adapter, rxo->cq.id, true, 0);
3819
3820 for_all_tx_queues(adapter, txo, i)
3821 be_cq_notify(adapter, txo->cq.id, true, 0);
3822
3823 be_async_mcc_enable(adapter);
3824
3825 for_all_evt_queues(adapter, eqo, i) {
3826 napi_enable(&eqo->napi);
3827 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3828 }
3829 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3830
3831 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3832 if (!status)
3833 be_link_status_update(adapter, link_status);
3834
3835 netif_tx_start_all_queues(netdev);
3836
3837 udp_tunnel_nic_reset_ntf(netdev);
3838
3839 return 0;
3840 err:
3841 be_close(adapter->netdev);
3842 return -EIO;
3843 }
3844
be_vf_eth_addr_generate(struct be_adapter * adapter,u8 * mac)3845 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3846 {
3847 u32 addr;
3848
3849 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3850
3851 mac[5] = (u8)(addr & 0xFF);
3852 mac[4] = (u8)((addr >> 8) & 0xFF);
3853 mac[3] = (u8)((addr >> 16) & 0xFF);
3854 /* Use the OUI from the current MAC address */
3855 memcpy(mac, adapter->netdev->dev_addr, 3);
3856 }
3857
3858 /*
3859 * Generate a seed MAC address from the PF MAC Address using jhash.
3860 * MAC Address for VFs are assigned incrementally starting from the seed.
3861 * These addresses are programmed in the ASIC by the PF and the VF driver
3862 * queries for the MAC address during its probe.
3863 */
be_vf_eth_addr_config(struct be_adapter * adapter)3864 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3865 {
3866 u32 vf;
3867 int status = 0;
3868 u8 mac[ETH_ALEN];
3869 struct be_vf_cfg *vf_cfg;
3870
3871 be_vf_eth_addr_generate(adapter, mac);
3872
3873 for_all_vfs(adapter, vf_cfg, vf) {
3874 if (BEx_chip(adapter))
3875 status = be_cmd_pmac_add(adapter, mac,
3876 vf_cfg->if_handle,
3877 &vf_cfg->pmac_id, vf + 1);
3878 else
3879 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3880 vf + 1);
3881
3882 if (status)
3883 dev_err(&adapter->pdev->dev,
3884 "Mac address assignment failed for VF %d\n",
3885 vf);
3886 else
3887 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3888
3889 mac[5] += 1;
3890 }
3891 return status;
3892 }
3893
be_vfs_mac_query(struct be_adapter * adapter)3894 static int be_vfs_mac_query(struct be_adapter *adapter)
3895 {
3896 int status, vf;
3897 u8 mac[ETH_ALEN];
3898 struct be_vf_cfg *vf_cfg;
3899
3900 for_all_vfs(adapter, vf_cfg, vf) {
3901 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3902 mac, vf_cfg->if_handle,
3903 false, vf+1);
3904 if (status)
3905 return status;
3906 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3907 }
3908 return 0;
3909 }
3910
be_vf_clear(struct be_adapter * adapter)3911 static void be_vf_clear(struct be_adapter *adapter)
3912 {
3913 struct be_vf_cfg *vf_cfg;
3914 u32 vf;
3915
3916 if (pci_vfs_assigned(adapter->pdev)) {
3917 dev_warn(&adapter->pdev->dev,
3918 "VFs are assigned to VMs: not disabling VFs\n");
3919 goto done;
3920 }
3921
3922 pci_disable_sriov(adapter->pdev);
3923
3924 for_all_vfs(adapter, vf_cfg, vf) {
3925 if (BEx_chip(adapter))
3926 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3927 vf_cfg->pmac_id, vf + 1);
3928 else
3929 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3930 vf + 1);
3931
3932 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3933 }
3934
3935 if (BE3_chip(adapter))
3936 be_cmd_set_hsw_config(adapter, 0, 0,
3937 adapter->if_handle,
3938 PORT_FWD_TYPE_PASSTHRU, 0);
3939 done:
3940 kfree(adapter->vf_cfg);
3941 adapter->num_vfs = 0;
3942 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3943 }
3944
be_clear_queues(struct be_adapter * adapter)3945 static void be_clear_queues(struct be_adapter *adapter)
3946 {
3947 be_mcc_queues_destroy(adapter);
3948 be_rx_cqs_destroy(adapter);
3949 be_tx_queues_destroy(adapter);
3950 be_evt_queues_destroy(adapter);
3951 }
3952
be_cancel_worker(struct be_adapter * adapter)3953 static void be_cancel_worker(struct be_adapter *adapter)
3954 {
3955 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3956 cancel_delayed_work_sync(&adapter->work);
3957 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3958 }
3959 }
3960
be_cancel_err_detection(struct be_adapter * adapter)3961 static void be_cancel_err_detection(struct be_adapter *adapter)
3962 {
3963 struct be_error_recovery *err_rec = &adapter->error_recovery;
3964
3965 if (!be_err_recovery_workq)
3966 return;
3967
3968 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3969 cancel_delayed_work_sync(&err_rec->err_detection_work);
3970 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3971 }
3972 }
3973
3974 /* VxLAN offload Notes:
3975 *
3976 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
3977 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
3978 * is expected to work across all types of IP tunnels once exported. Skyhawk
3979 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
3980 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
3981 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
3982 * those other tunnels are unexported on the fly through ndo_features_check().
3983 */
be_vxlan_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)3984 static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
3985 unsigned int entry, struct udp_tunnel_info *ti)
3986 {
3987 struct be_adapter *adapter = netdev_priv(netdev);
3988 struct device *dev = &adapter->pdev->dev;
3989 int status;
3990
3991 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3992 OP_CONVERT_NORMAL_TO_TUNNEL);
3993 if (status) {
3994 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3995 return status;
3996 }
3997 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3998
3999 status = be_cmd_set_vxlan_port(adapter, ti->port);
4000 if (status) {
4001 dev_warn(dev, "Failed to add VxLAN port\n");
4002 return status;
4003 }
4004 adapter->vxlan_port = ti->port;
4005
4006 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4007 NETIF_F_TSO | NETIF_F_TSO6 |
4008 NETIF_F_GSO_UDP_TUNNEL;
4009
4010 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4011 be16_to_cpu(ti->port));
4012 return 0;
4013 }
4014
be_vxlan_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)4015 static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4016 unsigned int entry, struct udp_tunnel_info *ti)
4017 {
4018 struct be_adapter *adapter = netdev_priv(netdev);
4019
4020 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4021 be_cmd_manage_iface(adapter, adapter->if_handle,
4022 OP_CONVERT_TUNNEL_TO_NORMAL);
4023
4024 if (adapter->vxlan_port)
4025 be_cmd_set_vxlan_port(adapter, 0);
4026
4027 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4028 adapter->vxlan_port = 0;
4029
4030 netdev->hw_enc_features = 0;
4031 return 0;
4032 }
4033
4034 static const struct udp_tunnel_nic_info be_udp_tunnels = {
4035 .set_port = be_vxlan_set_port,
4036 .unset_port = be_vxlan_unset_port,
4037 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
4038 .tables = {
4039 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
4040 },
4041 };
4042
be_calculate_vf_res(struct be_adapter * adapter,u16 num_vfs,struct be_resources * vft_res)4043 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4044 struct be_resources *vft_res)
4045 {
4046 struct be_resources res = adapter->pool_res;
4047 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4048 struct be_resources res_mod = {0};
4049 u16 num_vf_qs = 1;
4050
4051 /* Distribute the queue resources among the PF and it's VFs */
4052 if (num_vfs) {
4053 /* Divide the rx queues evenly among the VFs and the PF, capped
4054 * at VF-EQ-count. Any remainder queues belong to the PF.
4055 */
4056 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4057 res.max_rss_qs / (num_vfs + 1));
4058
4059 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4060 * RSS Tables per port. Provide RSS on VFs, only if number of
4061 * VFs requested is less than it's PF Pool's RSS Tables limit.
4062 */
4063 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
4064 num_vf_qs = 1;
4065 }
4066
4067 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4068 * which are modifiable using SET_PROFILE_CONFIG cmd.
4069 */
4070 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4071 RESOURCE_MODIFIABLE, 0);
4072
4073 /* If RSS IFACE capability flags are modifiable for a VF, set the
4074 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4075 * more than 1 RSSQ is available for a VF.
4076 * Otherwise, provision only 1 queue pair for VF.
4077 */
4078 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4079 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4080 if (num_vf_qs > 1) {
4081 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4082 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4083 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4084 } else {
4085 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4086 BE_IF_FLAGS_DEFQ_RSS);
4087 }
4088 } else {
4089 num_vf_qs = 1;
4090 }
4091
4092 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4093 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4094 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4095 }
4096
4097 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4098 vft_res->max_rx_qs = num_vf_qs;
4099 vft_res->max_rss_qs = num_vf_qs;
4100 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4101 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4102
4103 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4104 * among the PF and it's VFs, if the fields are changeable
4105 */
4106 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4107 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4108
4109 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4110 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4111
4112 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4113 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4114
4115 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4116 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
4117 }
4118
be_if_destroy(struct be_adapter * adapter)4119 static void be_if_destroy(struct be_adapter *adapter)
4120 {
4121 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4122
4123 kfree(adapter->pmac_id);
4124 adapter->pmac_id = NULL;
4125
4126 kfree(adapter->mc_list);
4127 adapter->mc_list = NULL;
4128
4129 kfree(adapter->uc_list);
4130 adapter->uc_list = NULL;
4131 }
4132
be_clear(struct be_adapter * adapter)4133 static int be_clear(struct be_adapter *adapter)
4134 {
4135 struct pci_dev *pdev = adapter->pdev;
4136 struct be_resources vft_res = {0};
4137
4138 be_cancel_worker(adapter);
4139
4140 flush_workqueue(be_wq);
4141
4142 if (sriov_enabled(adapter))
4143 be_vf_clear(adapter);
4144
4145 /* Re-configure FW to distribute resources evenly across max-supported
4146 * number of VFs, only when VFs are not already enabled.
4147 */
4148 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4149 !pci_vfs_assigned(pdev)) {
4150 be_calculate_vf_res(adapter,
4151 pci_sriov_get_totalvfs(pdev),
4152 &vft_res);
4153 be_cmd_set_sriov_config(adapter, adapter->pool_res,
4154 pci_sriov_get_totalvfs(pdev),
4155 &vft_res);
4156 }
4157
4158 be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
4159
4160 be_if_destroy(adapter);
4161
4162 be_clear_queues(adapter);
4163
4164 be_msix_disable(adapter);
4165 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
4166 return 0;
4167 }
4168
be_vfs_if_create(struct be_adapter * adapter)4169 static int be_vfs_if_create(struct be_adapter *adapter)
4170 {
4171 struct be_resources res = {0};
4172 u32 cap_flags, en_flags, vf;
4173 struct be_vf_cfg *vf_cfg;
4174 int status;
4175
4176 /* If a FW profile exists, then cap_flags are updated */
4177 cap_flags = BE_VF_IF_EN_FLAGS;
4178
4179 for_all_vfs(adapter, vf_cfg, vf) {
4180 if (!BE3_chip(adapter)) {
4181 status = be_cmd_get_profile_config(adapter, &res, NULL,
4182 ACTIVE_PROFILE_TYPE,
4183 RESOURCE_LIMITS,
4184 vf + 1);
4185 if (!status) {
4186 cap_flags = res.if_cap_flags;
4187 /* Prevent VFs from enabling VLAN promiscuous
4188 * mode
4189 */
4190 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4191 }
4192 }
4193
4194 /* PF should enable IF flags during proxy if_create call */
4195 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
4196 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4197 &vf_cfg->if_handle, vf + 1);
4198 if (status)
4199 return status;
4200 }
4201
4202 return 0;
4203 }
4204
be_vf_setup_init(struct be_adapter * adapter)4205 static int be_vf_setup_init(struct be_adapter *adapter)
4206 {
4207 struct be_vf_cfg *vf_cfg;
4208 int vf;
4209
4210 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4211 GFP_KERNEL);
4212 if (!adapter->vf_cfg)
4213 return -ENOMEM;
4214
4215 for_all_vfs(adapter, vf_cfg, vf) {
4216 vf_cfg->if_handle = -1;
4217 vf_cfg->pmac_id = -1;
4218 }
4219 return 0;
4220 }
4221
be_vf_setup(struct be_adapter * adapter)4222 static int be_vf_setup(struct be_adapter *adapter)
4223 {
4224 struct device *dev = &adapter->pdev->dev;
4225 struct be_vf_cfg *vf_cfg;
4226 int status, old_vfs, vf;
4227 bool spoofchk;
4228
4229 old_vfs = pci_num_vf(adapter->pdev);
4230
4231 status = be_vf_setup_init(adapter);
4232 if (status)
4233 goto err;
4234
4235 if (old_vfs) {
4236 for_all_vfs(adapter, vf_cfg, vf) {
4237 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4238 if (status)
4239 goto err;
4240 }
4241
4242 status = be_vfs_mac_query(adapter);
4243 if (status)
4244 goto err;
4245 } else {
4246 status = be_vfs_if_create(adapter);
4247 if (status)
4248 goto err;
4249
4250 status = be_vf_eth_addr_config(adapter);
4251 if (status)
4252 goto err;
4253 }
4254
4255 for_all_vfs(adapter, vf_cfg, vf) {
4256 /* Allow VFs to programs MAC/VLAN filters */
4257 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4258 vf + 1);
4259 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
4260 status = be_cmd_set_fn_privileges(adapter,
4261 vf_cfg->privileges |
4262 BE_PRIV_FILTMGMT,
4263 vf + 1);
4264 if (!status) {
4265 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
4266 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4267 vf);
4268 }
4269 }
4270
4271 /* Allow full available bandwidth */
4272 if (!old_vfs)
4273 be_cmd_config_qos(adapter, 0, 0, vf + 1);
4274
4275 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4276 vf_cfg->if_handle, NULL,
4277 &spoofchk);
4278 if (!status)
4279 vf_cfg->spoofchk = spoofchk;
4280
4281 if (!old_vfs) {
4282 be_cmd_enable_vf(adapter, vf + 1);
4283 be_cmd_set_logical_link_config(adapter,
4284 IFLA_VF_LINK_STATE_AUTO,
4285 vf+1);
4286 }
4287 }
4288
4289 if (!old_vfs) {
4290 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4291 if (status) {
4292 dev_err(dev, "SRIOV enable failed\n");
4293 adapter->num_vfs = 0;
4294 goto err;
4295 }
4296 }
4297
4298 if (BE3_chip(adapter)) {
4299 /* On BE3, enable VEB only when SRIOV is enabled */
4300 status = be_cmd_set_hsw_config(adapter, 0, 0,
4301 adapter->if_handle,
4302 PORT_FWD_TYPE_VEB, 0);
4303 if (status)
4304 goto err;
4305 }
4306
4307 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
4308 return 0;
4309 err:
4310 dev_err(dev, "VF setup failed\n");
4311 be_vf_clear(adapter);
4312 return status;
4313 }
4314
4315 /* Converting function_mode bits on BE3 to SH mc_type enums */
4316
be_convert_mc_type(u32 function_mode)4317 static u8 be_convert_mc_type(u32 function_mode)
4318 {
4319 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
4320 return vNIC1;
4321 else if (function_mode & QNQ_MODE)
4322 return FLEX10;
4323 else if (function_mode & VNIC_MODE)
4324 return vNIC2;
4325 else if (function_mode & UMC_ENABLED)
4326 return UMC;
4327 else
4328 return MC_NONE;
4329 }
4330
4331 /* On BE2/BE3 FW does not suggest the supported limits */
BEx_get_resources(struct be_adapter * adapter,struct be_resources * res)4332 static void BEx_get_resources(struct be_adapter *adapter,
4333 struct be_resources *res)
4334 {
4335 bool use_sriov = adapter->num_vfs ? 1 : 0;
4336
4337 if (be_physfn(adapter))
4338 res->max_uc_mac = BE_UC_PMAC_COUNT;
4339 else
4340 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4341
4342 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4343
4344 if (be_is_mc(adapter)) {
4345 /* Assuming that there are 4 channels per port,
4346 * when multi-channel is enabled
4347 */
4348 if (be_is_qnq_mode(adapter))
4349 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4350 else
4351 /* In a non-qnq multichannel mode, the pvid
4352 * takes up one vlan entry
4353 */
4354 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4355 } else {
4356 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4357 }
4358
4359 res->max_mcast_mac = BE_MAX_MC;
4360
4361 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4362 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4363 * *only* if it is RSS-capable.
4364 */
4365 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
4366 be_virtfn(adapter) ||
4367 (be_is_mc(adapter) &&
4368 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4369 res->max_tx_qs = 1;
4370 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4371 struct be_resources super_nic_res = {0};
4372
4373 /* On a SuperNIC profile, the driver needs to use the
4374 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4375 */
4376 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4377 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4378 0);
4379 /* Some old versions of BE3 FW don't report max_tx_qs value */
4380 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4381 } else {
4382 res->max_tx_qs = BE3_MAX_TX_QS;
4383 }
4384
4385 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4386 !use_sriov && be_physfn(adapter))
4387 res->max_rss_qs = (adapter->be3_native) ?
4388 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4389 res->max_rx_qs = res->max_rss_qs + 1;
4390
4391 if (be_physfn(adapter))
4392 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4393 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4394 else
4395 res->max_evt_qs = 1;
4396
4397 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4398 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4399 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4400 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4401 }
4402
be_setup_init(struct be_adapter * adapter)4403 static void be_setup_init(struct be_adapter *adapter)
4404 {
4405 adapter->vlan_prio_bmap = 0xff;
4406 adapter->phy.link_speed = -1;
4407 adapter->if_handle = -1;
4408 adapter->be3_native = false;
4409 adapter->if_flags = 0;
4410 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
4411 if (be_physfn(adapter))
4412 adapter->cmd_privileges = MAX_PRIVILEGES;
4413 else
4414 adapter->cmd_privileges = MIN_PRIVILEGES;
4415 }
4416
4417 /* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4418 * However, this HW limitation is not exposed to the host via any SLI cmd.
4419 * As a result, in the case of SRIOV and in particular multi-partition configs
4420 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4421 * for distribution between the VFs. This self-imposed limit will determine the
4422 * no: of VFs for which RSS can be enabled.
4423 */
be_calculate_pf_pool_rss_tables(struct be_adapter * adapter)4424 static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4425 {
4426 struct be_port_resources port_res = {0};
4427 u8 rss_tables_on_port;
4428 u16 max_vfs = be_max_vfs(adapter);
4429
4430 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4431 RESOURCE_LIMITS, 0);
4432
4433 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4434
4435 /* Each PF Pool's RSS Tables limit =
4436 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4437 */
4438 adapter->pool_res.max_rss_tables =
4439 max_vfs * rss_tables_on_port / port_res.max_vfs;
4440 }
4441
be_get_sriov_config(struct be_adapter * adapter)4442 static int be_get_sriov_config(struct be_adapter *adapter)
4443 {
4444 struct be_resources res = {0};
4445 int max_vfs, old_vfs;
4446
4447 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4448 RESOURCE_LIMITS, 0);
4449
4450 /* Some old versions of BE3 FW don't report max_vfs value */
4451 if (BE3_chip(adapter) && !res.max_vfs) {
4452 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4453 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4454 }
4455
4456 adapter->pool_res = res;
4457
4458 /* If during previous unload of the driver, the VFs were not disabled,
4459 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4460 * Instead use the TotalVFs value stored in the pci-dev struct.
4461 */
4462 old_vfs = pci_num_vf(adapter->pdev);
4463 if (old_vfs) {
4464 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4465 old_vfs);
4466
4467 adapter->pool_res.max_vfs =
4468 pci_sriov_get_totalvfs(adapter->pdev);
4469 adapter->num_vfs = old_vfs;
4470 }
4471
4472 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4473 be_calculate_pf_pool_rss_tables(adapter);
4474 dev_info(&adapter->pdev->dev,
4475 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4476 be_max_pf_pool_rss_tables(adapter));
4477 }
4478 return 0;
4479 }
4480
be_alloc_sriov_res(struct be_adapter * adapter)4481 static void be_alloc_sriov_res(struct be_adapter *adapter)
4482 {
4483 int old_vfs = pci_num_vf(adapter->pdev);
4484 struct be_resources vft_res = {0};
4485 int status;
4486
4487 be_get_sriov_config(adapter);
4488
4489 if (!old_vfs)
4490 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4491
4492 /* When the HW is in SRIOV capable configuration, the PF-pool
4493 * resources are given to PF during driver load, if there are no
4494 * old VFs. This facility is not available in BE3 FW.
4495 * Also, this is done by FW in Lancer chip.
4496 */
4497 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4498 be_calculate_vf_res(adapter, 0, &vft_res);
4499 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4500 &vft_res);
4501 if (status)
4502 dev_err(&adapter->pdev->dev,
4503 "Failed to optimize SRIOV resources\n");
4504 }
4505 }
4506
be_get_resources(struct be_adapter * adapter)4507 static int be_get_resources(struct be_adapter *adapter)
4508 {
4509 struct device *dev = &adapter->pdev->dev;
4510 struct be_resources res = {0};
4511 int status;
4512
4513 /* For Lancer, SH etc read per-function resource limits from FW.
4514 * GET_FUNC_CONFIG returns per function guaranteed limits.
4515 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4516 */
4517 if (BEx_chip(adapter)) {
4518 BEx_get_resources(adapter, &res);
4519 } else {
4520 status = be_cmd_get_func_config(adapter, &res);
4521 if (status)
4522 return status;
4523
4524 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4525 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4526 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4527 res.max_rss_qs -= 1;
4528 }
4529
4530 /* If RoCE is supported stash away half the EQs for RoCE */
4531 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4532 res.max_evt_qs / 2 : res.max_evt_qs;
4533 adapter->res = res;
4534
4535 /* If FW supports RSS default queue, then skip creating non-RSS
4536 * queue for non-IP traffic.
4537 */
4538 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4539 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4540
4541 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4542 be_max_txqs(adapter), be_max_rxqs(adapter),
4543 be_max_rss(adapter), be_max_nic_eqs(adapter),
4544 be_max_vfs(adapter));
4545 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4546 be_max_uc(adapter), be_max_mc(adapter),
4547 be_max_vlans(adapter));
4548
4549 /* Ensure RX and TX queues are created in pairs at init time */
4550 adapter->cfg_num_rx_irqs =
4551 min_t(u16, netif_get_num_default_rss_queues(),
4552 be_max_qp_irqs(adapter));
4553 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
4554 return 0;
4555 }
4556
be_get_config(struct be_adapter * adapter)4557 static int be_get_config(struct be_adapter *adapter)
4558 {
4559 int status, level;
4560 u16 profile_id;
4561
4562 status = be_cmd_get_cntl_attributes(adapter);
4563 if (status)
4564 return status;
4565
4566 status = be_cmd_query_fw_cfg(adapter);
4567 if (status)
4568 return status;
4569
4570 if (!lancer_chip(adapter) && be_physfn(adapter))
4571 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4572
4573 if (BEx_chip(adapter)) {
4574 level = be_cmd_get_fw_log_level(adapter);
4575 adapter->msg_enable =
4576 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4577 }
4578
4579 be_cmd_get_acpi_wol_cap(adapter);
4580 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4581 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
4582
4583 be_cmd_query_port_name(adapter);
4584
4585 if (be_physfn(adapter)) {
4586 status = be_cmd_get_active_profile(adapter, &profile_id);
4587 if (!status)
4588 dev_info(&adapter->pdev->dev,
4589 "Using profile 0x%x\n", profile_id);
4590 }
4591
4592 return 0;
4593 }
4594
be_mac_setup(struct be_adapter * adapter)4595 static int be_mac_setup(struct be_adapter *adapter)
4596 {
4597 u8 mac[ETH_ALEN];
4598 int status;
4599
4600 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4601 status = be_cmd_get_perm_mac(adapter, mac);
4602 if (status)
4603 return status;
4604
4605 eth_hw_addr_set(adapter->netdev, mac);
4606 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4607
4608 /* Initial MAC for BE3 VFs is already programmed by PF */
4609 if (BEx_chip(adapter) && be_virtfn(adapter))
4610 memcpy(adapter->dev_mac, mac, ETH_ALEN);
4611 }
4612
4613 return 0;
4614 }
4615
be_schedule_worker(struct be_adapter * adapter)4616 static void be_schedule_worker(struct be_adapter *adapter)
4617 {
4618 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
4619 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4620 }
4621
be_destroy_err_recovery_workq(void)4622 static void be_destroy_err_recovery_workq(void)
4623 {
4624 if (!be_err_recovery_workq)
4625 return;
4626
4627 destroy_workqueue(be_err_recovery_workq);
4628 be_err_recovery_workq = NULL;
4629 }
4630
be_schedule_err_detection(struct be_adapter * adapter,u32 delay)4631 static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
4632 {
4633 struct be_error_recovery *err_rec = &adapter->error_recovery;
4634
4635 if (!be_err_recovery_workq)
4636 return;
4637
4638 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4639 msecs_to_jiffies(delay));
4640 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4641 }
4642
be_setup_queues(struct be_adapter * adapter)4643 static int be_setup_queues(struct be_adapter *adapter)
4644 {
4645 struct net_device *netdev = adapter->netdev;
4646 int status;
4647
4648 status = be_evt_queues_create(adapter);
4649 if (status)
4650 goto err;
4651
4652 status = be_tx_qs_create(adapter);
4653 if (status)
4654 goto err;
4655
4656 status = be_rx_cqs_create(adapter);
4657 if (status)
4658 goto err;
4659
4660 status = be_mcc_queues_create(adapter);
4661 if (status)
4662 goto err;
4663
4664 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4665 if (status)
4666 goto err;
4667
4668 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4669 if (status)
4670 goto err;
4671
4672 return 0;
4673 err:
4674 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4675 return status;
4676 }
4677
be_if_create(struct be_adapter * adapter)4678 static int be_if_create(struct be_adapter *adapter)
4679 {
4680 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4681 u32 cap_flags = be_if_cap_flags(adapter);
4682
4683 /* alloc required memory for other filtering fields */
4684 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4685 sizeof(*adapter->pmac_id), GFP_KERNEL);
4686 if (!adapter->pmac_id)
4687 return -ENOMEM;
4688
4689 adapter->mc_list = kcalloc(be_max_mc(adapter),
4690 sizeof(*adapter->mc_list), GFP_KERNEL);
4691 if (!adapter->mc_list)
4692 return -ENOMEM;
4693
4694 adapter->uc_list = kcalloc(be_max_uc(adapter),
4695 sizeof(*adapter->uc_list), GFP_KERNEL);
4696 if (!adapter->uc_list)
4697 return -ENOMEM;
4698
4699 if (adapter->cfg_num_rx_irqs == 1)
4700 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4701
4702 en_flags &= cap_flags;
4703 /* will enable all the needed filter flags in be_open() */
4704 return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4705 &adapter->if_handle, 0);
4706 }
4707
be_update_queues(struct be_adapter * adapter)4708 int be_update_queues(struct be_adapter *adapter)
4709 {
4710 struct net_device *netdev = adapter->netdev;
4711 int status;
4712
4713 if (netif_running(netdev)) {
4714 /* be_tx_timeout() must not run concurrently with this
4715 * function, synchronize with an already-running dev_watchdog
4716 */
4717 netif_tx_lock_bh(netdev);
4718 /* device cannot transmit now, avoid dev_watchdog timeouts */
4719 netif_carrier_off(netdev);
4720 netif_tx_unlock_bh(netdev);
4721
4722 be_close(netdev);
4723 }
4724
4725 be_cancel_worker(adapter);
4726
4727 /* If any vectors have been shared with RoCE we cannot re-program
4728 * the MSIx table.
4729 */
4730 if (!adapter->num_msix_roce_vec)
4731 be_msix_disable(adapter);
4732
4733 be_clear_queues(adapter);
4734 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4735 if (status)
4736 return status;
4737
4738 if (!msix_enabled(adapter)) {
4739 status = be_msix_enable(adapter);
4740 if (status)
4741 return status;
4742 }
4743
4744 status = be_if_create(adapter);
4745 if (status)
4746 return status;
4747
4748 status = be_setup_queues(adapter);
4749 if (status)
4750 return status;
4751
4752 be_schedule_worker(adapter);
4753
4754 /* The IF was destroyed and re-created. We need to clear
4755 * all promiscuous flags valid for the destroyed IF.
4756 * Without this promisc mode is not restored during
4757 * be_open() because the driver thinks that it is
4758 * already enabled in HW.
4759 */
4760 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4761
4762 if (netif_running(netdev))
4763 status = be_open(netdev);
4764
4765 return status;
4766 }
4767
fw_major_num(const char * fw_ver)4768 static inline int fw_major_num(const char *fw_ver)
4769 {
4770 int fw_major = 0, i;
4771
4772 i = sscanf(fw_ver, "%d.", &fw_major);
4773 if (i != 1)
4774 return 0;
4775
4776 return fw_major;
4777 }
4778
4779 /* If it is error recovery, FLR the PF
4780 * Else if any VFs are already enabled don't FLR the PF
4781 */
be_reset_required(struct be_adapter * adapter)4782 static bool be_reset_required(struct be_adapter *adapter)
4783 {
4784 if (be_error_recovering(adapter))
4785 return true;
4786 else
4787 return pci_num_vf(adapter->pdev) == 0;
4788 }
4789
4790 /* Wait for the FW to be ready and perform the required initialization */
be_func_init(struct be_adapter * adapter)4791 static int be_func_init(struct be_adapter *adapter)
4792 {
4793 int status;
4794
4795 status = be_fw_wait_ready(adapter);
4796 if (status)
4797 return status;
4798
4799 /* FW is now ready; clear errors to allow cmds/doorbell */
4800 be_clear_error(adapter, BE_CLEAR_ALL);
4801
4802 if (be_reset_required(adapter)) {
4803 status = be_cmd_reset_function(adapter);
4804 if (status)
4805 return status;
4806
4807 /* Wait for interrupts to quiesce after an FLR */
4808 msleep(100);
4809 }
4810
4811 /* Tell FW we're ready to fire cmds */
4812 status = be_cmd_fw_init(adapter);
4813 if (status)
4814 return status;
4815
4816 /* Allow interrupts for other ULPs running on NIC function */
4817 be_intr_set(adapter, true);
4818
4819 return 0;
4820 }
4821
be_setup(struct be_adapter * adapter)4822 static int be_setup(struct be_adapter *adapter)
4823 {
4824 struct device *dev = &adapter->pdev->dev;
4825 int status;
4826
4827 status = be_func_init(adapter);
4828 if (status)
4829 return status;
4830
4831 be_setup_init(adapter);
4832
4833 if (!lancer_chip(adapter))
4834 be_cmd_req_native_mode(adapter);
4835
4836 /* invoke this cmd first to get pf_num and vf_num which are needed
4837 * for issuing profile related cmds
4838 */
4839 if (!BEx_chip(adapter)) {
4840 status = be_cmd_get_func_config(adapter, NULL);
4841 if (status)
4842 return status;
4843 }
4844
4845 status = be_get_config(adapter);
4846 if (status)
4847 goto err;
4848
4849 if (!BE2_chip(adapter) && be_physfn(adapter))
4850 be_alloc_sriov_res(adapter);
4851
4852 status = be_get_resources(adapter);
4853 if (status)
4854 goto err;
4855
4856 status = be_msix_enable(adapter);
4857 if (status)
4858 goto err;
4859
4860 /* will enable all the needed filter flags in be_open() */
4861 status = be_if_create(adapter);
4862 if (status)
4863 goto err;
4864
4865 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4866 rtnl_lock();
4867 status = be_setup_queues(adapter);
4868 rtnl_unlock();
4869 if (status)
4870 goto err;
4871
4872 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4873
4874 status = be_mac_setup(adapter);
4875 if (status)
4876 goto err;
4877
4878 be_cmd_get_fw_ver(adapter);
4879 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4880
4881 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4882 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4883 adapter->fw_ver);
4884 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4885 }
4886
4887 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4888 adapter->rx_fc);
4889 if (status)
4890 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4891 &adapter->rx_fc);
4892
4893 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4894 adapter->tx_fc, adapter->rx_fc);
4895
4896 if (be_physfn(adapter))
4897 be_cmd_set_logical_link_config(adapter,
4898 IFLA_VF_LINK_STATE_AUTO, 0);
4899
4900 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4901 * confusing a linux bridge or OVS that it might be connected to.
4902 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4903 * when SRIOV is not enabled.
4904 */
4905 if (BE3_chip(adapter))
4906 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4907 PORT_FWD_TYPE_PASSTHRU, 0);
4908
4909 if (adapter->num_vfs)
4910 be_vf_setup(adapter);
4911
4912 status = be_cmd_get_phy_info(adapter);
4913 if (!status && be_pause_supported(adapter))
4914 adapter->phy.fc_autoneg = 1;
4915
4916 if (be_physfn(adapter) && !lancer_chip(adapter))
4917 be_cmd_set_features(adapter);
4918
4919 be_schedule_worker(adapter);
4920 adapter->flags |= BE_FLAGS_SETUP_DONE;
4921 return 0;
4922 err:
4923 be_clear(adapter);
4924 return status;
4925 }
4926
4927 #ifdef CONFIG_NET_POLL_CONTROLLER
be_netpoll(struct net_device * netdev)4928 static void be_netpoll(struct net_device *netdev)
4929 {
4930 struct be_adapter *adapter = netdev_priv(netdev);
4931 struct be_eq_obj *eqo;
4932 int i;
4933
4934 for_all_evt_queues(adapter, eqo, i) {
4935 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4936 napi_schedule(&eqo->napi);
4937 }
4938 }
4939 #endif
4940
be_load_fw(struct be_adapter * adapter,u8 * fw_file)4941 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4942 {
4943 const struct firmware *fw;
4944 int status;
4945
4946 if (!netif_running(adapter->netdev)) {
4947 dev_err(&adapter->pdev->dev,
4948 "Firmware load not allowed (interface is down)\n");
4949 return -ENETDOWN;
4950 }
4951
4952 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4953 if (status)
4954 goto fw_exit;
4955
4956 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4957
4958 if (lancer_chip(adapter))
4959 status = lancer_fw_download(adapter, fw);
4960 else
4961 status = be_fw_download(adapter, fw);
4962
4963 if (!status)
4964 be_cmd_get_fw_ver(adapter);
4965
4966 fw_exit:
4967 release_firmware(fw);
4968 return status;
4969 }
4970
be_ndo_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)4971 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4972 u16 flags, struct netlink_ext_ack *extack)
4973 {
4974 struct be_adapter *adapter = netdev_priv(dev);
4975 struct nlattr *attr, *br_spec;
4976 int rem;
4977 int status = 0;
4978 u16 mode = 0;
4979
4980 if (!sriov_enabled(adapter))
4981 return -EOPNOTSUPP;
4982
4983 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4984 if (!br_spec)
4985 return -EINVAL;
4986
4987 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
4988 mode = nla_get_u16(attr);
4989 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4990 return -EOPNOTSUPP;
4991
4992 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4993 return -EINVAL;
4994
4995 status = be_cmd_set_hsw_config(adapter, 0, 0,
4996 adapter->if_handle,
4997 mode == BRIDGE_MODE_VEPA ?
4998 PORT_FWD_TYPE_VEPA :
4999 PORT_FWD_TYPE_VEB, 0);
5000 if (status)
5001 goto err;
5002
5003 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5004 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5005
5006 return status;
5007 }
5008 err:
5009 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5010 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5011
5012 return status;
5013 }
5014
be_ndo_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)5015 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5016 struct net_device *dev, u32 filter_mask,
5017 int nlflags)
5018 {
5019 struct be_adapter *adapter = netdev_priv(dev);
5020 int status = 0;
5021 u8 hsw_mode;
5022
5023 /* BE and Lancer chips support VEB mode only */
5024 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5025 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5026 if (!pci_sriov_get_totalvfs(adapter->pdev))
5027 return 0;
5028 hsw_mode = PORT_FWD_TYPE_VEB;
5029 } else {
5030 status = be_cmd_get_hsw_config(adapter, NULL, 0,
5031 adapter->if_handle, &hsw_mode,
5032 NULL);
5033 if (status)
5034 return 0;
5035
5036 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5037 return 0;
5038 }
5039
5040 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5041 hsw_mode == PORT_FWD_TYPE_VEPA ?
5042 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
5043 0, 0, nlflags, filter_mask, NULL);
5044 }
5045
be_alloc_work(struct be_adapter * adapter,void (* func)(struct work_struct *))5046 static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5047 void (*func)(struct work_struct *))
5048 {
5049 struct be_cmd_work *work;
5050
5051 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5052 if (!work) {
5053 dev_err(&adapter->pdev->dev,
5054 "be_work memory allocation failed\n");
5055 return NULL;
5056 }
5057
5058 INIT_WORK(&work->work, func);
5059 work->adapter = adapter;
5060 return work;
5061 }
5062
be_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)5063 static netdev_features_t be_features_check(struct sk_buff *skb,
5064 struct net_device *dev,
5065 netdev_features_t features)
5066 {
5067 struct be_adapter *adapter = netdev_priv(dev);
5068 u8 l4_hdr = 0;
5069
5070 if (skb_is_gso(skb)) {
5071 /* IPv6 TSO requests with extension hdrs are a problem
5072 * to Lancer and BE3 HW. Disable TSO6 feature.
5073 */
5074 if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5075 features &= ~NETIF_F_TSO6;
5076
5077 /* Lancer cannot handle the packet with MSS less than 256.
5078 * Also it can't handle a TSO packet with a single segment
5079 * Disable the GSO support in such cases
5080 */
5081 if (lancer_chip(adapter) &&
5082 (skb_shinfo(skb)->gso_size < 256 ||
5083 skb_shinfo(skb)->gso_segs == 1))
5084 features &= ~NETIF_F_GSO_MASK;
5085 }
5086
5087 /* The code below restricts offload features for some tunneled and
5088 * Q-in-Q packets.
5089 * Offload features for normal (non tunnel) packets are unchanged.
5090 */
5091 features = vlan_features_check(skb, features);
5092 if (!skb->encapsulation ||
5093 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5094 return features;
5095
5096 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5097 * should disable tunnel offload features if it's not a VxLAN packet,
5098 * as tunnel offloads have been enabled only for VxLAN. This is done to
5099 * allow other tunneled traffic like GRE work fine while VxLAN
5100 * offloads are configured in Skyhawk-R.
5101 */
5102 switch (vlan_get_protocol(skb)) {
5103 case htons(ETH_P_IP):
5104 l4_hdr = ip_hdr(skb)->protocol;
5105 break;
5106 case htons(ETH_P_IPV6):
5107 l4_hdr = ipv6_hdr(skb)->nexthdr;
5108 break;
5109 default:
5110 return features;
5111 }
5112
5113 if (l4_hdr != IPPROTO_UDP ||
5114 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5115 skb->inner_protocol != htons(ETH_P_TEB) ||
5116 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5117 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5118 !adapter->vxlan_port ||
5119 udp_hdr(skb)->dest != adapter->vxlan_port)
5120 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
5121
5122 return features;
5123 }
5124
be_get_phys_port_id(struct net_device * dev,struct netdev_phys_item_id * ppid)5125 static int be_get_phys_port_id(struct net_device *dev,
5126 struct netdev_phys_item_id *ppid)
5127 {
5128 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5129 struct be_adapter *adapter = netdev_priv(dev);
5130 u8 *id;
5131
5132 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5133 return -ENOSPC;
5134
5135 ppid->id[0] = adapter->hba_port_num + 1;
5136 id = &ppid->id[1];
5137 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5138 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5139 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5140
5141 ppid->id_len = id_len;
5142
5143 return 0;
5144 }
5145
be_set_rx_mode(struct net_device * dev)5146 static void be_set_rx_mode(struct net_device *dev)
5147 {
5148 struct be_adapter *adapter = netdev_priv(dev);
5149 struct be_cmd_work *work;
5150
5151 work = be_alloc_work(adapter, be_work_set_rx_mode);
5152 if (work)
5153 queue_work(be_wq, &work->work);
5154 }
5155
5156 static const struct net_device_ops be_netdev_ops = {
5157 .ndo_open = be_open,
5158 .ndo_stop = be_close,
5159 .ndo_start_xmit = be_xmit,
5160 .ndo_set_rx_mode = be_set_rx_mode,
5161 .ndo_set_mac_address = be_mac_addr_set,
5162 .ndo_get_stats64 = be_get_stats64,
5163 .ndo_validate_addr = eth_validate_addr,
5164 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5165 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
5166 .ndo_set_vf_mac = be_set_vf_mac,
5167 .ndo_set_vf_vlan = be_set_vf_vlan,
5168 .ndo_set_vf_rate = be_set_vf_tx_rate,
5169 .ndo_get_vf_config = be_get_vf_config,
5170 .ndo_set_vf_link_state = be_set_vf_link_state,
5171 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
5172 .ndo_tx_timeout = be_tx_timeout,
5173 #ifdef CONFIG_NET_POLL_CONTROLLER
5174 .ndo_poll_controller = be_netpoll,
5175 #endif
5176 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5177 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5178 .ndo_features_check = be_features_check,
5179 .ndo_get_phys_port_id = be_get_phys_port_id,
5180 };
5181
be_netdev_init(struct net_device * netdev)5182 static void be_netdev_init(struct net_device *netdev)
5183 {
5184 struct be_adapter *adapter = netdev_priv(netdev);
5185
5186 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5187 NETIF_F_GSO_UDP_TUNNEL |
5188 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5189 NETIF_F_HW_VLAN_CTAG_TX;
5190 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
5191 netdev->hw_features |= NETIF_F_RXHASH;
5192
5193 netdev->features |= netdev->hw_features |
5194 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
5195 NETIF_F_HIGHDMA;
5196
5197 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5198 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5199
5200 netdev->priv_flags |= IFF_UNICAST_FLT;
5201
5202 netdev->flags |= IFF_MULTICAST;
5203
5204 netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
5205
5206 netdev->netdev_ops = &be_netdev_ops;
5207
5208 netdev->ethtool_ops = &be_ethtool_ops;
5209
5210 if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
5211 netdev->udp_tunnel_nic_info = &be_udp_tunnels;
5212
5213 /* MTU range: 256 - 9000 */
5214 netdev->min_mtu = BE_MIN_MTU;
5215 netdev->max_mtu = BE_MAX_MTU;
5216 }
5217
be_cleanup(struct be_adapter * adapter)5218 static void be_cleanup(struct be_adapter *adapter)
5219 {
5220 struct net_device *netdev = adapter->netdev;
5221
5222 rtnl_lock();
5223 netif_device_detach(netdev);
5224 if (netif_running(netdev))
5225 be_close(netdev);
5226 rtnl_unlock();
5227
5228 be_clear(adapter);
5229 }
5230
be_resume(struct be_adapter * adapter)5231 static int be_resume(struct be_adapter *adapter)
5232 {
5233 struct net_device *netdev = adapter->netdev;
5234 int status;
5235
5236 status = be_setup(adapter);
5237 if (status)
5238 return status;
5239
5240 rtnl_lock();
5241 if (netif_running(netdev))
5242 status = be_open(netdev);
5243 rtnl_unlock();
5244
5245 if (status)
5246 return status;
5247
5248 netif_device_attach(netdev);
5249
5250 return 0;
5251 }
5252
be_soft_reset(struct be_adapter * adapter)5253 static void be_soft_reset(struct be_adapter *adapter)
5254 {
5255 u32 val;
5256
5257 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5258 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5259 val |= SLIPORT_SOFTRESET_SR_MASK;
5260 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5261 }
5262
be_err_is_recoverable(struct be_adapter * adapter)5263 static bool be_err_is_recoverable(struct be_adapter *adapter)
5264 {
5265 struct be_error_recovery *err_rec = &adapter->error_recovery;
5266 unsigned long initial_idle_time =
5267 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5268 unsigned long recovery_interval =
5269 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5270 u16 ue_err_code;
5271 u32 val;
5272
5273 val = be_POST_stage_get(adapter);
5274 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5275 return false;
5276 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5277 if (ue_err_code == 0)
5278 return false;
5279
5280 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5281 ue_err_code);
5282
5283 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
5284 dev_err(&adapter->pdev->dev,
5285 "Cannot recover within %lu sec from driver load\n",
5286 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5287 return false;
5288 }
5289
5290 if (err_rec->last_recovery_time && time_before_eq(
5291 jiffies - err_rec->last_recovery_time, recovery_interval)) {
5292 dev_err(&adapter->pdev->dev,
5293 "Cannot recover within %lu sec from last recovery\n",
5294 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5295 return false;
5296 }
5297
5298 if (ue_err_code == err_rec->last_err_code) {
5299 dev_err(&adapter->pdev->dev,
5300 "Cannot recover from a consecutive TPE error\n");
5301 return false;
5302 }
5303
5304 err_rec->last_recovery_time = jiffies;
5305 err_rec->last_err_code = ue_err_code;
5306 return true;
5307 }
5308
be_tpe_recover(struct be_adapter * adapter)5309 static int be_tpe_recover(struct be_adapter *adapter)
5310 {
5311 struct be_error_recovery *err_rec = &adapter->error_recovery;
5312 int status = -EAGAIN;
5313 u32 val;
5314
5315 switch (err_rec->recovery_state) {
5316 case ERR_RECOVERY_ST_NONE:
5317 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5318 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5319 break;
5320
5321 case ERR_RECOVERY_ST_DETECT:
5322 val = be_POST_stage_get(adapter);
5323 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5324 POST_STAGE_RECOVERABLE_ERR) {
5325 dev_err(&adapter->pdev->dev,
5326 "Unrecoverable HW error detected: 0x%x\n", val);
5327 status = -EINVAL;
5328 err_rec->resched_delay = 0;
5329 break;
5330 }
5331
5332 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5333
5334 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5335 * milliseconds before it checks for final error status in
5336 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5337 * If it does, then PF0 initiates a Soft Reset.
5338 */
5339 if (adapter->pf_num == 0) {
5340 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5341 err_rec->resched_delay = err_rec->ue_to_reset_time -
5342 ERR_RECOVERY_UE_DETECT_DURATION;
5343 break;
5344 }
5345
5346 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5347 err_rec->resched_delay = err_rec->ue_to_poll_time -
5348 ERR_RECOVERY_UE_DETECT_DURATION;
5349 break;
5350
5351 case ERR_RECOVERY_ST_RESET:
5352 if (!be_err_is_recoverable(adapter)) {
5353 dev_err(&adapter->pdev->dev,
5354 "Failed to meet recovery criteria\n");
5355 status = -EIO;
5356 err_rec->resched_delay = 0;
5357 break;
5358 }
5359 be_soft_reset(adapter);
5360 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5361 err_rec->resched_delay = err_rec->ue_to_poll_time -
5362 err_rec->ue_to_reset_time;
5363 break;
5364
5365 case ERR_RECOVERY_ST_PRE_POLL:
5366 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5367 err_rec->resched_delay = 0;
5368 status = 0; /* done */
5369 break;
5370
5371 default:
5372 status = -EINVAL;
5373 err_rec->resched_delay = 0;
5374 break;
5375 }
5376
5377 return status;
5378 }
5379
be_err_recover(struct be_adapter * adapter)5380 static int be_err_recover(struct be_adapter *adapter)
5381 {
5382 int status;
5383
5384 if (!lancer_chip(adapter)) {
5385 if (!adapter->error_recovery.recovery_supported ||
5386 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5387 return -EIO;
5388 status = be_tpe_recover(adapter);
5389 if (status)
5390 goto err;
5391 }
5392
5393 /* Wait for adapter to reach quiescent state before
5394 * destroying queues
5395 */
5396 status = be_fw_wait_ready(adapter);
5397 if (status)
5398 goto err;
5399
5400 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5401
5402 be_cleanup(adapter);
5403
5404 status = be_resume(adapter);
5405 if (status)
5406 goto err;
5407
5408 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5409
5410 err:
5411 return status;
5412 }
5413
be_err_detection_task(struct work_struct * work)5414 static void be_err_detection_task(struct work_struct *work)
5415 {
5416 struct be_error_recovery *err_rec =
5417 container_of(work, struct be_error_recovery,
5418 err_detection_work.work);
5419 struct be_adapter *adapter =
5420 container_of(err_rec, struct be_adapter,
5421 error_recovery);
5422 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
5423 struct device *dev = &adapter->pdev->dev;
5424 int recovery_status;
5425
5426 be_detect_error(adapter);
5427 if (!be_check_error(adapter, BE_ERROR_HW))
5428 goto reschedule_task;
5429
5430 recovery_status = be_err_recover(adapter);
5431 if (!recovery_status) {
5432 err_rec->recovery_retries = 0;
5433 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
5434 dev_info(dev, "Adapter recovery successful\n");
5435 goto reschedule_task;
5436 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5437 /* BEx/SH recovery state machine */
5438 if (adapter->pf_num == 0 &&
5439 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5440 dev_err(&adapter->pdev->dev,
5441 "Adapter recovery in progress\n");
5442 resched_delay = err_rec->resched_delay;
5443 goto reschedule_task;
5444 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
5445 /* For VFs, check if PF have allocated resources
5446 * every second.
5447 */
5448 dev_err(dev, "Re-trying adapter recovery\n");
5449 goto reschedule_task;
5450 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5451 ERR_RECOVERY_MAX_RETRY_COUNT) {
5452 /* In case of another error during recovery, it takes 30 sec
5453 * for adapter to come out of error. Retry error recovery after
5454 * this time interval.
5455 */
5456 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5457 resched_delay = ERR_RECOVERY_RETRY_DELAY;
5458 goto reschedule_task;
5459 } else {
5460 dev_err(dev, "Adapter recovery failed\n");
5461 dev_err(dev, "Please reboot server to recover\n");
5462 }
5463
5464 return;
5465
5466 reschedule_task:
5467 be_schedule_err_detection(adapter, resched_delay);
5468 }
5469
be_log_sfp_info(struct be_adapter * adapter)5470 static void be_log_sfp_info(struct be_adapter *adapter)
5471 {
5472 int status;
5473
5474 status = be_cmd_query_sfp_info(adapter);
5475 if (!status) {
5476 dev_err(&adapter->pdev->dev,
5477 "Port %c: %s Vendor: %s part no: %s",
5478 adapter->port_name,
5479 be_misconfig_evt_port_state[adapter->phy_state],
5480 adapter->phy.vendor_name,
5481 adapter->phy.vendor_pn);
5482 }
5483 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
5484 }
5485
be_worker(struct work_struct * work)5486 static void be_worker(struct work_struct *work)
5487 {
5488 struct be_adapter *adapter =
5489 container_of(work, struct be_adapter, work.work);
5490 struct be_rx_obj *rxo;
5491 int i;
5492
5493 if (be_physfn(adapter) &&
5494 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5495 be_cmd_get_die_temperature(adapter);
5496
5497 /* when interrupts are not yet enabled, just reap any pending
5498 * mcc completions
5499 */
5500 if (!netif_running(adapter->netdev)) {
5501 local_bh_disable();
5502 be_process_mcc(adapter);
5503 local_bh_enable();
5504 goto reschedule;
5505 }
5506
5507 if (!adapter->stats_cmd_sent) {
5508 if (lancer_chip(adapter))
5509 lancer_cmd_get_pport_stats(adapter,
5510 &adapter->stats_cmd);
5511 else
5512 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5513 }
5514
5515 for_all_rx_queues(adapter, rxo, i) {
5516 /* Replenish RX-queues starved due to memory
5517 * allocation failures.
5518 */
5519 if (rxo->rx_post_starved)
5520 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5521 }
5522
5523 /* EQ-delay update for Skyhawk is done while notifying EQ */
5524 if (!skyhawk_chip(adapter))
5525 be_eqd_update(adapter, false);
5526
5527 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
5528 be_log_sfp_info(adapter);
5529
5530 reschedule:
5531 adapter->work_counter++;
5532 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
5533 }
5534
be_unmap_pci_bars(struct be_adapter * adapter)5535 static void be_unmap_pci_bars(struct be_adapter *adapter)
5536 {
5537 if (adapter->csr)
5538 pci_iounmap(adapter->pdev, adapter->csr);
5539 if (adapter->db)
5540 pci_iounmap(adapter->pdev, adapter->db);
5541 if (adapter->pcicfg && adapter->pcicfg_mapped)
5542 pci_iounmap(adapter->pdev, adapter->pcicfg);
5543 }
5544
db_bar(struct be_adapter * adapter)5545 static int db_bar(struct be_adapter *adapter)
5546 {
5547 if (lancer_chip(adapter) || be_virtfn(adapter))
5548 return 0;
5549 else
5550 return 4;
5551 }
5552
be_roce_map_pci_bars(struct be_adapter * adapter)5553 static int be_roce_map_pci_bars(struct be_adapter *adapter)
5554 {
5555 if (skyhawk_chip(adapter)) {
5556 adapter->roce_db.size = 4096;
5557 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5558 db_bar(adapter));
5559 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5560 db_bar(adapter));
5561 }
5562 return 0;
5563 }
5564
be_map_pci_bars(struct be_adapter * adapter)5565 static int be_map_pci_bars(struct be_adapter *adapter)
5566 {
5567 struct pci_dev *pdev = adapter->pdev;
5568 u8 __iomem *addr;
5569 u32 sli_intf;
5570
5571 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5572 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5573 SLI_INTF_FAMILY_SHIFT;
5574 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5575
5576 if (BEx_chip(adapter) && be_physfn(adapter)) {
5577 adapter->csr = pci_iomap(pdev, 2, 0);
5578 if (!adapter->csr)
5579 return -ENOMEM;
5580 }
5581
5582 addr = pci_iomap(pdev, db_bar(adapter), 0);
5583 if (!addr)
5584 goto pci_map_err;
5585 adapter->db = addr;
5586
5587 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5588 if (be_physfn(adapter)) {
5589 /* PCICFG is the 2nd BAR in BE2 */
5590 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5591 if (!addr)
5592 goto pci_map_err;
5593 adapter->pcicfg = addr;
5594 adapter->pcicfg_mapped = true;
5595 } else {
5596 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5597 adapter->pcicfg_mapped = false;
5598 }
5599 }
5600
5601 be_roce_map_pci_bars(adapter);
5602 return 0;
5603
5604 pci_map_err:
5605 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5606 be_unmap_pci_bars(adapter);
5607 return -ENOMEM;
5608 }
5609
be_drv_cleanup(struct be_adapter * adapter)5610 static void be_drv_cleanup(struct be_adapter *adapter)
5611 {
5612 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5613 struct device *dev = &adapter->pdev->dev;
5614
5615 if (mem->va)
5616 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5617
5618 mem = &adapter->rx_filter;
5619 if (mem->va)
5620 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5621
5622 mem = &adapter->stats_cmd;
5623 if (mem->va)
5624 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5625 }
5626
5627 /* Allocate and initialize various fields in be_adapter struct */
be_drv_init(struct be_adapter * adapter)5628 static int be_drv_init(struct be_adapter *adapter)
5629 {
5630 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5631 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5632 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5633 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5634 struct device *dev = &adapter->pdev->dev;
5635 int status = 0;
5636
5637 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5638 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5639 &mbox_mem_alloc->dma,
5640 GFP_KERNEL);
5641 if (!mbox_mem_alloc->va)
5642 return -ENOMEM;
5643
5644 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5645 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5646 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5647
5648 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5649 rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5650 &rx_filter->dma, GFP_KERNEL);
5651 if (!rx_filter->va) {
5652 status = -ENOMEM;
5653 goto free_mbox;
5654 }
5655
5656 if (lancer_chip(adapter))
5657 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5658 else if (BE2_chip(adapter))
5659 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5660 else if (BE3_chip(adapter))
5661 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5662 else
5663 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5664 stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5665 &stats_cmd->dma, GFP_KERNEL);
5666 if (!stats_cmd->va) {
5667 status = -ENOMEM;
5668 goto free_rx_filter;
5669 }
5670
5671 mutex_init(&adapter->mbox_lock);
5672 mutex_init(&adapter->rx_filter_lock);
5673 spin_lock_init(&adapter->mcc_lock);
5674 spin_lock_init(&adapter->mcc_cq_lock);
5675 init_completion(&adapter->et_cmd_compl);
5676
5677 pci_save_state(adapter->pdev);
5678
5679 INIT_DELAYED_WORK(&adapter->work, be_worker);
5680
5681 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5682 adapter->error_recovery.resched_delay = 0;
5683 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
5684 be_err_detection_task);
5685
5686 adapter->rx_fc = true;
5687 adapter->tx_fc = true;
5688
5689 /* Must be a power of 2 or else MODULO will BUG_ON */
5690 adapter->be_get_temp_freq = 64;
5691
5692 return 0;
5693
5694 free_rx_filter:
5695 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5696 free_mbox:
5697 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5698 mbox_mem_alloc->dma);
5699 return status;
5700 }
5701
be_remove(struct pci_dev * pdev)5702 static void be_remove(struct pci_dev *pdev)
5703 {
5704 struct be_adapter *adapter = pci_get_drvdata(pdev);
5705
5706 if (!adapter)
5707 return;
5708
5709 be_roce_dev_remove(adapter);
5710 be_intr_set(adapter, false);
5711
5712 be_cancel_err_detection(adapter);
5713
5714 unregister_netdev(adapter->netdev);
5715
5716 be_clear(adapter);
5717
5718 if (!pci_vfs_assigned(adapter->pdev))
5719 be_cmd_reset_function(adapter);
5720
5721 /* tell fw we're done with firing cmds */
5722 be_cmd_fw_clean(adapter);
5723
5724 be_unmap_pci_bars(adapter);
5725 be_drv_cleanup(adapter);
5726
5727 pci_release_regions(pdev);
5728 pci_disable_device(pdev);
5729
5730 free_netdev(adapter->netdev);
5731 }
5732
be_hwmon_show_temp(struct device * dev,struct device_attribute * dev_attr,char * buf)5733 static ssize_t be_hwmon_show_temp(struct device *dev,
5734 struct device_attribute *dev_attr,
5735 char *buf)
5736 {
5737 struct be_adapter *adapter = dev_get_drvdata(dev);
5738
5739 /* Unit: millidegree Celsius */
5740 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5741 return -EIO;
5742 else
5743 return sprintf(buf, "%u\n",
5744 adapter->hwmon_info.be_on_die_temp * 1000);
5745 }
5746
5747 static SENSOR_DEVICE_ATTR(temp1_input, 0444,
5748 be_hwmon_show_temp, NULL, 1);
5749
5750 static struct attribute *be_hwmon_attrs[] = {
5751 &sensor_dev_attr_temp1_input.dev_attr.attr,
5752 NULL
5753 };
5754
5755 ATTRIBUTE_GROUPS(be_hwmon);
5756
mc_name(struct be_adapter * adapter)5757 static char *mc_name(struct be_adapter *adapter)
5758 {
5759 char *str = ""; /* default */
5760
5761 switch (adapter->mc_type) {
5762 case UMC:
5763 str = "UMC";
5764 break;
5765 case FLEX10:
5766 str = "FLEX10";
5767 break;
5768 case vNIC1:
5769 str = "vNIC-1";
5770 break;
5771 case nPAR:
5772 str = "nPAR";
5773 break;
5774 case UFP:
5775 str = "UFP";
5776 break;
5777 case vNIC2:
5778 str = "vNIC-2";
5779 break;
5780 default:
5781 str = "";
5782 }
5783
5784 return str;
5785 }
5786
func_name(struct be_adapter * adapter)5787 static inline char *func_name(struct be_adapter *adapter)
5788 {
5789 return be_physfn(adapter) ? "PF" : "VF";
5790 }
5791
nic_name(struct pci_dev * pdev)5792 static inline char *nic_name(struct pci_dev *pdev)
5793 {
5794 switch (pdev->device) {
5795 case OC_DEVICE_ID1:
5796 return OC_NAME;
5797 case OC_DEVICE_ID2:
5798 return OC_NAME_BE;
5799 case OC_DEVICE_ID3:
5800 case OC_DEVICE_ID4:
5801 return OC_NAME_LANCER;
5802 case BE_DEVICE_ID2:
5803 return BE3_NAME;
5804 case OC_DEVICE_ID5:
5805 case OC_DEVICE_ID6:
5806 return OC_NAME_SH;
5807 default:
5808 return BE_NAME;
5809 }
5810 }
5811
be_probe(struct pci_dev * pdev,const struct pci_device_id * pdev_id)5812 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5813 {
5814 struct be_adapter *adapter;
5815 struct net_device *netdev;
5816 int status = 0;
5817
5818 status = pci_enable_device(pdev);
5819 if (status)
5820 goto do_none;
5821
5822 status = pci_request_regions(pdev, DRV_NAME);
5823 if (status)
5824 goto disable_dev;
5825 pci_set_master(pdev);
5826
5827 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5828 if (!netdev) {
5829 status = -ENOMEM;
5830 goto rel_reg;
5831 }
5832 adapter = netdev_priv(netdev);
5833 adapter->pdev = pdev;
5834 pci_set_drvdata(pdev, adapter);
5835 adapter->netdev = netdev;
5836 SET_NETDEV_DEV(netdev, &pdev->dev);
5837
5838 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5839 if (status) {
5840 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5841 goto free_netdev;
5842 }
5843
5844 status = be_map_pci_bars(adapter);
5845 if (status)
5846 goto free_netdev;
5847
5848 status = be_drv_init(adapter);
5849 if (status)
5850 goto unmap_bars;
5851
5852 status = be_setup(adapter);
5853 if (status)
5854 goto drv_cleanup;
5855
5856 be_netdev_init(netdev);
5857 status = register_netdev(netdev);
5858 if (status != 0)
5859 goto unsetup;
5860
5861 be_roce_dev_add(adapter);
5862
5863 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5864 adapter->error_recovery.probe_time = jiffies;
5865
5866 /* On Die temperature not supported for VF. */
5867 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5868 adapter->hwmon_info.hwmon_dev =
5869 devm_hwmon_device_register_with_groups(&pdev->dev,
5870 DRV_NAME,
5871 adapter,
5872 be_hwmon_groups);
5873 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5874 }
5875
5876 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5877 func_name(adapter), mc_name(adapter), adapter->port_name);
5878
5879 return 0;
5880
5881 unsetup:
5882 be_clear(adapter);
5883 drv_cleanup:
5884 be_drv_cleanup(adapter);
5885 unmap_bars:
5886 be_unmap_pci_bars(adapter);
5887 free_netdev:
5888 free_netdev(netdev);
5889 rel_reg:
5890 pci_release_regions(pdev);
5891 disable_dev:
5892 pci_disable_device(pdev);
5893 do_none:
5894 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5895 return status;
5896 }
5897
be_suspend(struct device * dev_d)5898 static int __maybe_unused be_suspend(struct device *dev_d)
5899 {
5900 struct be_adapter *adapter = dev_get_drvdata(dev_d);
5901
5902 be_intr_set(adapter, false);
5903 be_cancel_err_detection(adapter);
5904
5905 be_cleanup(adapter);
5906
5907 return 0;
5908 }
5909
be_pci_resume(struct device * dev_d)5910 static int __maybe_unused be_pci_resume(struct device *dev_d)
5911 {
5912 struct be_adapter *adapter = dev_get_drvdata(dev_d);
5913 int status = 0;
5914
5915 status = be_resume(adapter);
5916 if (status)
5917 return status;
5918
5919 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5920
5921 return 0;
5922 }
5923
5924 /*
5925 * An FLR will stop BE from DMAing any data.
5926 */
be_shutdown(struct pci_dev * pdev)5927 static void be_shutdown(struct pci_dev *pdev)
5928 {
5929 struct be_adapter *adapter = pci_get_drvdata(pdev);
5930
5931 if (!adapter)
5932 return;
5933
5934 be_roce_dev_shutdown(adapter);
5935 cancel_delayed_work_sync(&adapter->work);
5936 be_cancel_err_detection(adapter);
5937
5938 netif_device_detach(adapter->netdev);
5939
5940 be_cmd_reset_function(adapter);
5941
5942 pci_disable_device(pdev);
5943 }
5944
be_eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)5945 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5946 pci_channel_state_t state)
5947 {
5948 struct be_adapter *adapter = pci_get_drvdata(pdev);
5949
5950 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5951
5952 be_roce_dev_remove(adapter);
5953
5954 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5955 be_set_error(adapter, BE_ERROR_EEH);
5956
5957 be_cancel_err_detection(adapter);
5958
5959 be_cleanup(adapter);
5960 }
5961
5962 if (state == pci_channel_io_perm_failure)
5963 return PCI_ERS_RESULT_DISCONNECT;
5964
5965 pci_disable_device(pdev);
5966
5967 /* The error could cause the FW to trigger a flash debug dump.
5968 * Resetting the card while flash dump is in progress
5969 * can cause it not to recover; wait for it to finish.
5970 * Wait only for first function as it is needed only once per
5971 * adapter.
5972 */
5973 if (pdev->devfn == 0)
5974 ssleep(30);
5975
5976 return PCI_ERS_RESULT_NEED_RESET;
5977 }
5978
be_eeh_reset(struct pci_dev * pdev)5979 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5980 {
5981 struct be_adapter *adapter = pci_get_drvdata(pdev);
5982 int status;
5983
5984 dev_info(&adapter->pdev->dev, "EEH reset\n");
5985
5986 status = pci_enable_device(pdev);
5987 if (status)
5988 return PCI_ERS_RESULT_DISCONNECT;
5989
5990 pci_set_master(pdev);
5991 pci_restore_state(pdev);
5992
5993 /* Check if card is ok and fw is ready */
5994 dev_info(&adapter->pdev->dev,
5995 "Waiting for FW to be ready after EEH reset\n");
5996 status = be_fw_wait_ready(adapter);
5997 if (status)
5998 return PCI_ERS_RESULT_DISCONNECT;
5999
6000 be_clear_error(adapter, BE_CLEAR_ALL);
6001 return PCI_ERS_RESULT_RECOVERED;
6002 }
6003
be_eeh_resume(struct pci_dev * pdev)6004 static void be_eeh_resume(struct pci_dev *pdev)
6005 {
6006 int status = 0;
6007 struct be_adapter *adapter = pci_get_drvdata(pdev);
6008
6009 dev_info(&adapter->pdev->dev, "EEH resume\n");
6010
6011 pci_save_state(pdev);
6012
6013 status = be_resume(adapter);
6014 if (status)
6015 goto err;
6016
6017 be_roce_dev_add(adapter);
6018
6019 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
6020 return;
6021 err:
6022 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
6023 }
6024
be_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)6025 static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6026 {
6027 struct be_adapter *adapter = pci_get_drvdata(pdev);
6028 struct be_resources vft_res = {0};
6029 int status;
6030
6031 if (!num_vfs)
6032 be_vf_clear(adapter);
6033
6034 adapter->num_vfs = num_vfs;
6035
6036 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6037 dev_warn(&pdev->dev,
6038 "Cannot disable VFs while they are assigned\n");
6039 return -EBUSY;
6040 }
6041
6042 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6043 * are equally distributed across the max-number of VFs. The user may
6044 * request only a subset of the max-vfs to be enabled.
6045 * Based on num_vfs, redistribute the resources across num_vfs so that
6046 * each VF will have access to more number of resources.
6047 * This facility is not available in BE3 FW.
6048 * Also, this is done by FW in Lancer chip.
6049 */
6050 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6051 be_calculate_vf_res(adapter, adapter->num_vfs,
6052 &vft_res);
6053 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6054 adapter->num_vfs, &vft_res);
6055 if (status)
6056 dev_err(&pdev->dev,
6057 "Failed to optimize SR-IOV resources\n");
6058 }
6059
6060 status = be_get_resources(adapter);
6061 if (status)
6062 return be_cmd_status(status);
6063
6064 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6065 rtnl_lock();
6066 status = be_update_queues(adapter);
6067 rtnl_unlock();
6068 if (status)
6069 return be_cmd_status(status);
6070
6071 if (adapter->num_vfs)
6072 status = be_vf_setup(adapter);
6073
6074 if (!status)
6075 return adapter->num_vfs;
6076
6077 return 0;
6078 }
6079
6080 static const struct pci_error_handlers be_eeh_handlers = {
6081 .error_detected = be_eeh_err_detected,
6082 .slot_reset = be_eeh_reset,
6083 .resume = be_eeh_resume,
6084 };
6085
6086 static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
6087
6088 static struct pci_driver be_driver = {
6089 .name = DRV_NAME,
6090 .id_table = be_dev_ids,
6091 .probe = be_probe,
6092 .remove = be_remove,
6093 .driver.pm = &be_pci_pm_ops,
6094 .shutdown = be_shutdown,
6095 .sriov_configure = be_pci_sriov_configure,
6096 .err_handler = &be_eeh_handlers
6097 };
6098
be_init_module(void)6099 static int __init be_init_module(void)
6100 {
6101 int status;
6102
6103 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6104 rx_frag_size != 2048) {
6105 printk(KERN_WARNING DRV_NAME
6106 " : Module param rx_frag_size must be 2048/4096/8192."
6107 " Using 2048\n");
6108 rx_frag_size = 2048;
6109 }
6110
6111 if (num_vfs > 0) {
6112 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6113 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6114 }
6115
6116 be_wq = create_singlethread_workqueue("be_wq");
6117 if (!be_wq) {
6118 pr_warn(DRV_NAME "workqueue creation failed\n");
6119 return -1;
6120 }
6121
6122 be_err_recovery_workq =
6123 create_singlethread_workqueue("be_err_recover");
6124 if (!be_err_recovery_workq)
6125 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6126
6127 status = pci_register_driver(&be_driver);
6128 if (status) {
6129 destroy_workqueue(be_wq);
6130 be_destroy_err_recovery_workq();
6131 }
6132 return status;
6133 }
6134 module_init(be_init_module);
6135
be_exit_module(void)6136 static void __exit be_exit_module(void)
6137 {
6138 pci_unregister_driver(&be_driver);
6139
6140 be_destroy_err_recovery_workq();
6141
6142 if (be_wq)
6143 destroy_workqueue(be_wq);
6144 }
6145 module_exit(be_exit_module);
6146