1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
6 */
7
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/delay.h>
13 #include <linux/notifier.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <linux/in.h>
17 #include <linux/ethtool.h>
18 #include <linux/topology.h>
19 #include <linux/gfp.h>
20 #include <linux/interrupt.h>
21 #include "net_driver.h"
22 #include "efx.h"
23 #include "nic.h"
24 #include "selftest.h"
25
26 #include "workarounds.h"
27
28 /**************************************************************************
29 *
30 * Type name strings
31 *
32 **************************************************************************
33 */
34
35 /* Loopback mode names (see LOOPBACK_MODE()) */
36 const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX;
37 const char *const ef4_loopback_mode_names[] = {
38 [LOOPBACK_NONE] = "NONE",
39 [LOOPBACK_DATA] = "DATAPATH",
40 [LOOPBACK_GMAC] = "GMAC",
41 [LOOPBACK_XGMII] = "XGMII",
42 [LOOPBACK_XGXS] = "XGXS",
43 [LOOPBACK_XAUI] = "XAUI",
44 [LOOPBACK_GMII] = "GMII",
45 [LOOPBACK_SGMII] = "SGMII",
46 [LOOPBACK_XGBR] = "XGBR",
47 [LOOPBACK_XFI] = "XFI",
48 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
49 [LOOPBACK_GMII_FAR] = "GMII_FAR",
50 [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
51 [LOOPBACK_XFI_FAR] = "XFI_FAR",
52 [LOOPBACK_GPHY] = "GPHY",
53 [LOOPBACK_PHYXS] = "PHYXS",
54 [LOOPBACK_PCS] = "PCS",
55 [LOOPBACK_PMAPMD] = "PMA/PMD",
56 [LOOPBACK_XPORT] = "XPORT",
57 [LOOPBACK_XGMII_WS] = "XGMII_WS",
58 [LOOPBACK_XAUI_WS] = "XAUI_WS",
59 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
60 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
61 [LOOPBACK_GMII_WS] = "GMII_WS",
62 [LOOPBACK_XFI_WS] = "XFI_WS",
63 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
64 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
65 };
66
67 const unsigned int ef4_reset_type_max = RESET_TYPE_MAX;
68 const char *const ef4_reset_type_names[] = {
69 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
70 [RESET_TYPE_ALL] = "ALL",
71 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
72 [RESET_TYPE_WORLD] = "WORLD",
73 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
74 [RESET_TYPE_DATAPATH] = "DATAPATH",
75 [RESET_TYPE_DISABLE] = "DISABLE",
76 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
77 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
78 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
79 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
80 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
81 };
82
83 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
84 * queued onto this work queue. This is not a per-nic work queue, because
85 * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
86 */
87 static struct workqueue_struct *reset_workqueue;
88
89 /* How often and how many times to poll for a reset while waiting for a
90 * BIST that another function started to complete.
91 */
92 #define BIST_WAIT_DELAY_MS 100
93 #define BIST_WAIT_DELAY_COUNT 100
94
95 /**************************************************************************
96 *
97 * Configurable values
98 *
99 *************************************************************************/
100
101 /*
102 * Use separate channels for TX and RX events
103 *
104 * Set this to 1 to use separate channels for TX and RX. It allows us
105 * to control interrupt affinity separately for TX and RX.
106 *
107 * This is only used in MSI-X interrupt mode
108 */
109 bool ef4_separate_tx_channels;
110 module_param(ef4_separate_tx_channels, bool, 0444);
111 MODULE_PARM_DESC(ef4_separate_tx_channels,
112 "Use separate channels for TX and RX");
113
114 /* This is the time (in jiffies) between invocations of the hardware
115 * monitor.
116 * On Falcon-based NICs, this will:
117 * - Check the on-board hardware monitor;
118 * - Poll the link state and reconfigure the hardware as necessary.
119 * On Siena-based NICs for power systems with EEH support, this will give EEH a
120 * chance to start.
121 */
122 static unsigned int ef4_monitor_interval = 1 * HZ;
123
124 /* Initial interrupt moderation settings. They can be modified after
125 * module load with ethtool.
126 *
127 * The default for RX should strike a balance between increasing the
128 * round-trip latency and reducing overhead.
129 */
130 static unsigned int rx_irq_mod_usec = 60;
131
132 /* Initial interrupt moderation settings. They can be modified after
133 * module load with ethtool.
134 *
135 * This default is chosen to ensure that a 10G link does not go idle
136 * while a TX queue is stopped after it has become full. A queue is
137 * restarted when it drops below half full. The time this takes (assuming
138 * worst case 3 descriptors per packet and 1024 descriptors) is
139 * 512 / 3 * 1.2 = 205 usec.
140 */
141 static unsigned int tx_irq_mod_usec = 150;
142
143 /* This is the first interrupt mode to try out of:
144 * 0 => MSI-X
145 * 1 => MSI
146 * 2 => legacy
147 */
148 static unsigned int interrupt_mode;
149
150 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
151 * i.e. the number of CPUs among which we may distribute simultaneous
152 * interrupt handling.
153 *
154 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
155 * The default (0) means to assign an interrupt to each core.
156 */
157 static unsigned int rss_cpus;
158 module_param(rss_cpus, uint, 0444);
159 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
160
161 static bool phy_flash_cfg;
162 module_param(phy_flash_cfg, bool, 0644);
163 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
164
165 static unsigned irq_adapt_low_thresh = 8000;
166 module_param(irq_adapt_low_thresh, uint, 0644);
167 MODULE_PARM_DESC(irq_adapt_low_thresh,
168 "Threshold score for reducing IRQ moderation");
169
170 static unsigned irq_adapt_high_thresh = 16000;
171 module_param(irq_adapt_high_thresh, uint, 0644);
172 MODULE_PARM_DESC(irq_adapt_high_thresh,
173 "Threshold score for increasing IRQ moderation");
174
175 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
176 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
177 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
178 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
179 module_param(debug, uint, 0);
180 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
181
182 /**************************************************************************
183 *
184 * Utility functions and prototypes
185 *
186 *************************************************************************/
187
188 static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
189 static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
190 static void ef4_remove_channel(struct ef4_channel *channel);
191 static void ef4_remove_channels(struct ef4_nic *efx);
192 static const struct ef4_channel_type ef4_default_channel_type;
193 static void ef4_remove_port(struct ef4_nic *efx);
194 static void ef4_init_napi_channel(struct ef4_channel *channel);
195 static void ef4_fini_napi(struct ef4_nic *efx);
196 static void ef4_fini_napi_channel(struct ef4_channel *channel);
197 static void ef4_fini_struct(struct ef4_nic *efx);
198 static void ef4_start_all(struct ef4_nic *efx);
199 static void ef4_stop_all(struct ef4_nic *efx);
200
201 #define EF4_ASSERT_RESET_SERIALISED(efx) \
202 do { \
203 if ((efx->state == STATE_READY) || \
204 (efx->state == STATE_RECOVERY) || \
205 (efx->state == STATE_DISABLED)) \
206 ASSERT_RTNL(); \
207 } while (0)
208
ef4_check_disabled(struct ef4_nic * efx)209 static int ef4_check_disabled(struct ef4_nic *efx)
210 {
211 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
212 netif_err(efx, drv, efx->net_dev,
213 "device is disabled due to earlier errors\n");
214 return -EIO;
215 }
216 return 0;
217 }
218
219 /**************************************************************************
220 *
221 * Event queue processing
222 *
223 *************************************************************************/
224
225 /* Process channel's event queue
226 *
227 * This function is responsible for processing the event queue of a
228 * single channel. The caller must guarantee that this function will
229 * never be concurrently called more than once on the same channel,
230 * though different channels may be being processed concurrently.
231 */
ef4_process_channel(struct ef4_channel * channel,int budget)232 static int ef4_process_channel(struct ef4_channel *channel, int budget)
233 {
234 struct ef4_tx_queue *tx_queue;
235 int spent;
236
237 if (unlikely(!channel->enabled))
238 return 0;
239
240 ef4_for_each_channel_tx_queue(tx_queue, channel) {
241 tx_queue->pkts_compl = 0;
242 tx_queue->bytes_compl = 0;
243 }
244
245 spent = ef4_nic_process_eventq(channel, budget);
246 if (spent && ef4_channel_has_rx_queue(channel)) {
247 struct ef4_rx_queue *rx_queue =
248 ef4_channel_get_rx_queue(channel);
249
250 ef4_rx_flush_packet(channel);
251 ef4_fast_push_rx_descriptors(rx_queue, true);
252 }
253
254 /* Update BQL */
255 ef4_for_each_channel_tx_queue(tx_queue, channel) {
256 if (tx_queue->bytes_compl) {
257 netdev_tx_completed_queue(tx_queue->core_txq,
258 tx_queue->pkts_compl, tx_queue->bytes_compl);
259 }
260 }
261
262 return spent;
263 }
264
265 /* NAPI poll handler
266 *
267 * NAPI guarantees serialisation of polls of the same device, which
268 * provides the guarantee required by ef4_process_channel().
269 */
ef4_update_irq_mod(struct ef4_nic * efx,struct ef4_channel * channel)270 static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
271 {
272 int step = efx->irq_mod_step_us;
273
274 if (channel->irq_mod_score < irq_adapt_low_thresh) {
275 if (channel->irq_moderation_us > step) {
276 channel->irq_moderation_us -= step;
277 efx->type->push_irq_moderation(channel);
278 }
279 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
280 if (channel->irq_moderation_us <
281 efx->irq_rx_moderation_us) {
282 channel->irq_moderation_us += step;
283 efx->type->push_irq_moderation(channel);
284 }
285 }
286
287 channel->irq_count = 0;
288 channel->irq_mod_score = 0;
289 }
290
ef4_poll(struct napi_struct * napi,int budget)291 static int ef4_poll(struct napi_struct *napi, int budget)
292 {
293 struct ef4_channel *channel =
294 container_of(napi, struct ef4_channel, napi_str);
295 struct ef4_nic *efx = channel->efx;
296 int spent;
297
298 netif_vdbg(efx, intr, efx->net_dev,
299 "channel %d NAPI poll executing on CPU %d\n",
300 channel->channel, raw_smp_processor_id());
301
302 spent = ef4_process_channel(channel, budget);
303
304 if (spent < budget) {
305 if (ef4_channel_has_rx_queue(channel) &&
306 efx->irq_rx_adaptive &&
307 unlikely(++channel->irq_count == 1000)) {
308 ef4_update_irq_mod(efx, channel);
309 }
310
311 ef4_filter_rfs_expire(channel);
312
313 /* There is no race here; although napi_disable() will
314 * only wait for napi_complete(), this isn't a problem
315 * since ef4_nic_eventq_read_ack() will have no effect if
316 * interrupts have already been disabled.
317 */
318 napi_complete_done(napi, spent);
319 ef4_nic_eventq_read_ack(channel);
320 }
321
322 return spent;
323 }
324
325 /* Create event queue
326 * Event queue memory allocations are done only once. If the channel
327 * is reset, the memory buffer will be reused; this guards against
328 * errors during channel reset and also simplifies interrupt handling.
329 */
ef4_probe_eventq(struct ef4_channel * channel)330 static int ef4_probe_eventq(struct ef4_channel *channel)
331 {
332 struct ef4_nic *efx = channel->efx;
333 unsigned long entries;
334
335 netif_dbg(efx, probe, efx->net_dev,
336 "chan %d create event queue\n", channel->channel);
337
338 /* Build an event queue with room for one event per tx and rx buffer,
339 * plus some extra for link state events and MCDI completions. */
340 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
341 EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE);
342 channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1;
343
344 return ef4_nic_probe_eventq(channel);
345 }
346
347 /* Prepare channel's event queue */
ef4_init_eventq(struct ef4_channel * channel)348 static int ef4_init_eventq(struct ef4_channel *channel)
349 {
350 struct ef4_nic *efx = channel->efx;
351 int rc;
352
353 EF4_WARN_ON_PARANOID(channel->eventq_init);
354
355 netif_dbg(efx, drv, efx->net_dev,
356 "chan %d init event queue\n", channel->channel);
357
358 rc = ef4_nic_init_eventq(channel);
359 if (rc == 0) {
360 efx->type->push_irq_moderation(channel);
361 channel->eventq_read_ptr = 0;
362 channel->eventq_init = true;
363 }
364 return rc;
365 }
366
367 /* Enable event queue processing and NAPI */
ef4_start_eventq(struct ef4_channel * channel)368 void ef4_start_eventq(struct ef4_channel *channel)
369 {
370 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
371 "chan %d start event queue\n", channel->channel);
372
373 /* Make sure the NAPI handler sees the enabled flag set */
374 channel->enabled = true;
375 smp_wmb();
376
377 napi_enable(&channel->napi_str);
378 ef4_nic_eventq_read_ack(channel);
379 }
380
381 /* Disable event queue processing and NAPI */
ef4_stop_eventq(struct ef4_channel * channel)382 void ef4_stop_eventq(struct ef4_channel *channel)
383 {
384 if (!channel->enabled)
385 return;
386
387 napi_disable(&channel->napi_str);
388 channel->enabled = false;
389 }
390
ef4_fini_eventq(struct ef4_channel * channel)391 static void ef4_fini_eventq(struct ef4_channel *channel)
392 {
393 if (!channel->eventq_init)
394 return;
395
396 netif_dbg(channel->efx, drv, channel->efx->net_dev,
397 "chan %d fini event queue\n", channel->channel);
398
399 ef4_nic_fini_eventq(channel);
400 channel->eventq_init = false;
401 }
402
ef4_remove_eventq(struct ef4_channel * channel)403 static void ef4_remove_eventq(struct ef4_channel *channel)
404 {
405 netif_dbg(channel->efx, drv, channel->efx->net_dev,
406 "chan %d remove event queue\n", channel->channel);
407
408 ef4_nic_remove_eventq(channel);
409 }
410
411 /**************************************************************************
412 *
413 * Channel handling
414 *
415 *************************************************************************/
416
417 /* Allocate and initialise a channel structure. */
418 static struct ef4_channel *
ef4_alloc_channel(struct ef4_nic * efx,int i,struct ef4_channel * old_channel)419 ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
420 {
421 struct ef4_channel *channel;
422 struct ef4_rx_queue *rx_queue;
423 struct ef4_tx_queue *tx_queue;
424 int j;
425
426 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
427 if (!channel)
428 return NULL;
429
430 channel->efx = efx;
431 channel->channel = i;
432 channel->type = &ef4_default_channel_type;
433
434 for (j = 0; j < EF4_TXQ_TYPES; j++) {
435 tx_queue = &channel->tx_queue[j];
436 tx_queue->efx = efx;
437 tx_queue->queue = i * EF4_TXQ_TYPES + j;
438 tx_queue->channel = channel;
439 }
440
441 rx_queue = &channel->rx_queue;
442 rx_queue->efx = efx;
443 timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
444
445 return channel;
446 }
447
448 /* Allocate and initialise a channel structure, copying parameters
449 * (but not resources) from an old channel structure.
450 */
451 static struct ef4_channel *
ef4_copy_channel(const struct ef4_channel * old_channel)452 ef4_copy_channel(const struct ef4_channel *old_channel)
453 {
454 struct ef4_channel *channel;
455 struct ef4_rx_queue *rx_queue;
456 struct ef4_tx_queue *tx_queue;
457 int j;
458
459 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
460 if (!channel)
461 return NULL;
462
463 *channel = *old_channel;
464
465 channel->napi_dev = NULL;
466 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
467 channel->napi_str.napi_id = 0;
468 channel->napi_str.state = 0;
469 memset(&channel->eventq, 0, sizeof(channel->eventq));
470
471 for (j = 0; j < EF4_TXQ_TYPES; j++) {
472 tx_queue = &channel->tx_queue[j];
473 if (tx_queue->channel)
474 tx_queue->channel = channel;
475 tx_queue->buffer = NULL;
476 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
477 }
478
479 rx_queue = &channel->rx_queue;
480 rx_queue->buffer = NULL;
481 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
482 timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
483
484 return channel;
485 }
486
ef4_probe_channel(struct ef4_channel * channel)487 static int ef4_probe_channel(struct ef4_channel *channel)
488 {
489 struct ef4_tx_queue *tx_queue;
490 struct ef4_rx_queue *rx_queue;
491 int rc;
492
493 netif_dbg(channel->efx, probe, channel->efx->net_dev,
494 "creating channel %d\n", channel->channel);
495
496 rc = channel->type->pre_probe(channel);
497 if (rc)
498 goto fail;
499
500 rc = ef4_probe_eventq(channel);
501 if (rc)
502 goto fail;
503
504 ef4_for_each_channel_tx_queue(tx_queue, channel) {
505 rc = ef4_probe_tx_queue(tx_queue);
506 if (rc)
507 goto fail;
508 }
509
510 ef4_for_each_channel_rx_queue(rx_queue, channel) {
511 rc = ef4_probe_rx_queue(rx_queue);
512 if (rc)
513 goto fail;
514 }
515
516 return 0;
517
518 fail:
519 ef4_remove_channel(channel);
520 return rc;
521 }
522
523 static void
ef4_get_channel_name(struct ef4_channel * channel,char * buf,size_t len)524 ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len)
525 {
526 struct ef4_nic *efx = channel->efx;
527 const char *type;
528 int number;
529
530 number = channel->channel;
531 if (efx->tx_channel_offset == 0) {
532 type = "";
533 } else if (channel->channel < efx->tx_channel_offset) {
534 type = "-rx";
535 } else {
536 type = "-tx";
537 number -= efx->tx_channel_offset;
538 }
539 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
540 }
541
ef4_set_channel_names(struct ef4_nic * efx)542 static void ef4_set_channel_names(struct ef4_nic *efx)
543 {
544 struct ef4_channel *channel;
545
546 ef4_for_each_channel(channel, efx)
547 channel->type->get_name(channel,
548 efx->msi_context[channel->channel].name,
549 sizeof(efx->msi_context[0].name));
550 }
551
ef4_probe_channels(struct ef4_nic * efx)552 static int ef4_probe_channels(struct ef4_nic *efx)
553 {
554 struct ef4_channel *channel;
555 int rc;
556
557 /* Restart special buffer allocation */
558 efx->next_buffer_table = 0;
559
560 /* Probe channels in reverse, so that any 'extra' channels
561 * use the start of the buffer table. This allows the traffic
562 * channels to be resized without moving them or wasting the
563 * entries before them.
564 */
565 ef4_for_each_channel_rev(channel, efx) {
566 rc = ef4_probe_channel(channel);
567 if (rc) {
568 netif_err(efx, probe, efx->net_dev,
569 "failed to create channel %d\n",
570 channel->channel);
571 goto fail;
572 }
573 }
574 ef4_set_channel_names(efx);
575
576 return 0;
577
578 fail:
579 ef4_remove_channels(efx);
580 return rc;
581 }
582
583 /* Channels are shutdown and reinitialised whilst the NIC is running
584 * to propagate configuration changes (mtu, checksum offload), or
585 * to clear hardware error conditions
586 */
ef4_start_datapath(struct ef4_nic * efx)587 static void ef4_start_datapath(struct ef4_nic *efx)
588 {
589 netdev_features_t old_features = efx->net_dev->features;
590 bool old_rx_scatter = efx->rx_scatter;
591 struct ef4_tx_queue *tx_queue;
592 struct ef4_rx_queue *rx_queue;
593 struct ef4_channel *channel;
594 size_t rx_buf_len;
595
596 /* Calculate the rx buffer allocation parameters required to
597 * support the current MTU, including padding for header
598 * alignment and overruns.
599 */
600 efx->rx_dma_len = (efx->rx_prefix_size +
601 EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
602 efx->type->rx_buffer_padding);
603 rx_buf_len = (sizeof(struct ef4_rx_page_state) +
604 efx->rx_ip_align + efx->rx_dma_len);
605 if (rx_buf_len <= PAGE_SIZE) {
606 efx->rx_scatter = efx->type->always_rx_scatter;
607 efx->rx_buffer_order = 0;
608 } else if (efx->type->can_rx_scatter) {
609 BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
610 BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) +
611 2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE,
612 EF4_RX_BUF_ALIGNMENT) >
613 PAGE_SIZE);
614 efx->rx_scatter = true;
615 efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
616 efx->rx_buffer_order = 0;
617 } else {
618 efx->rx_scatter = false;
619 efx->rx_buffer_order = get_order(rx_buf_len);
620 }
621
622 ef4_rx_config_page_split(efx);
623 if (efx->rx_buffer_order)
624 netif_dbg(efx, drv, efx->net_dev,
625 "RX buf len=%u; page order=%u batch=%u\n",
626 efx->rx_dma_len, efx->rx_buffer_order,
627 efx->rx_pages_per_batch);
628 else
629 netif_dbg(efx, drv, efx->net_dev,
630 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
631 efx->rx_dma_len, efx->rx_page_buf_step,
632 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
633
634 /* Restore previously fixed features in hw_features and remove
635 * features which are fixed now
636 */
637 efx->net_dev->hw_features |= efx->net_dev->features;
638 efx->net_dev->hw_features &= ~efx->fixed_features;
639 efx->net_dev->features |= efx->fixed_features;
640 if (efx->net_dev->features != old_features)
641 netdev_features_change(efx->net_dev);
642
643 /* RX filters may also have scatter-enabled flags */
644 if (efx->rx_scatter != old_rx_scatter)
645 efx->type->filter_update_rx_scatter(efx);
646
647 /* We must keep at least one descriptor in a TX ring empty.
648 * We could avoid this when the queue size does not exactly
649 * match the hardware ring size, but it's not that important.
650 * Therefore we stop the queue when one more skb might fill
651 * the ring completely. We wake it when half way back to
652 * empty.
653 */
654 efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
655 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
656
657 /* Initialise the channels */
658 ef4_for_each_channel(channel, efx) {
659 ef4_for_each_channel_tx_queue(tx_queue, channel) {
660 ef4_init_tx_queue(tx_queue);
661 atomic_inc(&efx->active_queues);
662 }
663
664 ef4_for_each_channel_rx_queue(rx_queue, channel) {
665 ef4_init_rx_queue(rx_queue);
666 atomic_inc(&efx->active_queues);
667 ef4_stop_eventq(channel);
668 ef4_fast_push_rx_descriptors(rx_queue, false);
669 ef4_start_eventq(channel);
670 }
671
672 WARN_ON(channel->rx_pkt_n_frags);
673 }
674
675 if (netif_device_present(efx->net_dev))
676 netif_tx_wake_all_queues(efx->net_dev);
677 }
678
ef4_stop_datapath(struct ef4_nic * efx)679 static void ef4_stop_datapath(struct ef4_nic *efx)
680 {
681 struct ef4_channel *channel;
682 struct ef4_tx_queue *tx_queue;
683 struct ef4_rx_queue *rx_queue;
684 int rc;
685
686 EF4_ASSERT_RESET_SERIALISED(efx);
687 BUG_ON(efx->port_enabled);
688
689 /* Stop RX refill */
690 ef4_for_each_channel(channel, efx) {
691 ef4_for_each_channel_rx_queue(rx_queue, channel)
692 rx_queue->refill_enabled = false;
693 }
694
695 ef4_for_each_channel(channel, efx) {
696 /* RX packet processing is pipelined, so wait for the
697 * NAPI handler to complete. At least event queue 0
698 * might be kept active by non-data events, so don't
699 * use napi_synchronize() but actually disable NAPI
700 * temporarily.
701 */
702 if (ef4_channel_has_rx_queue(channel)) {
703 ef4_stop_eventq(channel);
704 ef4_start_eventq(channel);
705 }
706 }
707
708 rc = efx->type->fini_dmaq(efx);
709 if (rc && EF4_WORKAROUND_7803(efx)) {
710 /* Schedule a reset to recover from the flush failure. The
711 * descriptor caches reference memory we're about to free,
712 * but falcon_reconfigure_mac_wrapper() won't reconnect
713 * the MACs because of the pending reset.
714 */
715 netif_err(efx, drv, efx->net_dev,
716 "Resetting to recover from flush failure\n");
717 ef4_schedule_reset(efx, RESET_TYPE_ALL);
718 } else if (rc) {
719 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
720 } else {
721 netif_dbg(efx, drv, efx->net_dev,
722 "successfully flushed all queues\n");
723 }
724
725 ef4_for_each_channel(channel, efx) {
726 ef4_for_each_channel_rx_queue(rx_queue, channel)
727 ef4_fini_rx_queue(rx_queue);
728 ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
729 ef4_fini_tx_queue(tx_queue);
730 }
731 }
732
ef4_remove_channel(struct ef4_channel * channel)733 static void ef4_remove_channel(struct ef4_channel *channel)
734 {
735 struct ef4_tx_queue *tx_queue;
736 struct ef4_rx_queue *rx_queue;
737
738 netif_dbg(channel->efx, drv, channel->efx->net_dev,
739 "destroy chan %d\n", channel->channel);
740
741 ef4_for_each_channel_rx_queue(rx_queue, channel)
742 ef4_remove_rx_queue(rx_queue);
743 ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
744 ef4_remove_tx_queue(tx_queue);
745 ef4_remove_eventq(channel);
746 channel->type->post_remove(channel);
747 }
748
ef4_remove_channels(struct ef4_nic * efx)749 static void ef4_remove_channels(struct ef4_nic *efx)
750 {
751 struct ef4_channel *channel;
752
753 ef4_for_each_channel(channel, efx)
754 ef4_remove_channel(channel);
755 }
756
757 int
ef4_realloc_channels(struct ef4_nic * efx,u32 rxq_entries,u32 txq_entries)758 ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
759 {
760 struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel;
761 u32 old_rxq_entries, old_txq_entries;
762 unsigned i, next_buffer_table = 0;
763 int rc, rc2;
764
765 rc = ef4_check_disabled(efx);
766 if (rc)
767 return rc;
768
769 /* Not all channels should be reallocated. We must avoid
770 * reallocating their buffer table entries.
771 */
772 ef4_for_each_channel(channel, efx) {
773 struct ef4_rx_queue *rx_queue;
774 struct ef4_tx_queue *tx_queue;
775
776 if (channel->type->copy)
777 continue;
778 next_buffer_table = max(next_buffer_table,
779 channel->eventq.index +
780 channel->eventq.entries);
781 ef4_for_each_channel_rx_queue(rx_queue, channel)
782 next_buffer_table = max(next_buffer_table,
783 rx_queue->rxd.index +
784 rx_queue->rxd.entries);
785 ef4_for_each_channel_tx_queue(tx_queue, channel)
786 next_buffer_table = max(next_buffer_table,
787 tx_queue->txd.index +
788 tx_queue->txd.entries);
789 }
790
791 ef4_device_detach_sync(efx);
792 ef4_stop_all(efx);
793 ef4_soft_disable_interrupts(efx);
794
795 /* Clone channels (where possible) */
796 memset(other_channel, 0, sizeof(other_channel));
797 for (i = 0; i < efx->n_channels; i++) {
798 channel = efx->channel[i];
799 if (channel->type->copy)
800 channel = channel->type->copy(channel);
801 if (!channel) {
802 rc = -ENOMEM;
803 goto out;
804 }
805 other_channel[i] = channel;
806 }
807
808 /* Swap entry counts and channel pointers */
809 old_rxq_entries = efx->rxq_entries;
810 old_txq_entries = efx->txq_entries;
811 efx->rxq_entries = rxq_entries;
812 efx->txq_entries = txq_entries;
813 for (i = 0; i < efx->n_channels; i++) {
814 swap(efx->channel[i], other_channel[i]);
815 }
816
817 /* Restart buffer table allocation */
818 efx->next_buffer_table = next_buffer_table;
819
820 for (i = 0; i < efx->n_channels; i++) {
821 channel = efx->channel[i];
822 if (!channel->type->copy)
823 continue;
824 rc = ef4_probe_channel(channel);
825 if (rc)
826 goto rollback;
827 ef4_init_napi_channel(efx->channel[i]);
828 }
829
830 out:
831 /* Destroy unused channel structures */
832 for (i = 0; i < efx->n_channels; i++) {
833 channel = other_channel[i];
834 if (channel && channel->type->copy) {
835 ef4_fini_napi_channel(channel);
836 ef4_remove_channel(channel);
837 kfree(channel);
838 }
839 }
840
841 rc2 = ef4_soft_enable_interrupts(efx);
842 if (rc2) {
843 rc = rc ? rc : rc2;
844 netif_err(efx, drv, efx->net_dev,
845 "unable to restart interrupts on channel reallocation\n");
846 ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
847 } else {
848 ef4_start_all(efx);
849 netif_device_attach(efx->net_dev);
850 }
851 return rc;
852
853 rollback:
854 /* Swap back */
855 efx->rxq_entries = old_rxq_entries;
856 efx->txq_entries = old_txq_entries;
857 for (i = 0; i < efx->n_channels; i++) {
858 swap(efx->channel[i], other_channel[i]);
859 }
860 goto out;
861 }
862
ef4_schedule_slow_fill(struct ef4_rx_queue * rx_queue)863 void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue)
864 {
865 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
866 }
867
868 static const struct ef4_channel_type ef4_default_channel_type = {
869 .pre_probe = ef4_channel_dummy_op_int,
870 .post_remove = ef4_channel_dummy_op_void,
871 .get_name = ef4_get_channel_name,
872 .copy = ef4_copy_channel,
873 .keep_eventq = false,
874 };
875
ef4_channel_dummy_op_int(struct ef4_channel * channel)876 int ef4_channel_dummy_op_int(struct ef4_channel *channel)
877 {
878 return 0;
879 }
880
ef4_channel_dummy_op_void(struct ef4_channel * channel)881 void ef4_channel_dummy_op_void(struct ef4_channel *channel)
882 {
883 }
884
885 /**************************************************************************
886 *
887 * Port handling
888 *
889 **************************************************************************/
890
891 /* This ensures that the kernel is kept informed (via
892 * netif_carrier_on/off) of the link status, and also maintains the
893 * link status's stop on the port's TX queue.
894 */
ef4_link_status_changed(struct ef4_nic * efx)895 void ef4_link_status_changed(struct ef4_nic *efx)
896 {
897 struct ef4_link_state *link_state = &efx->link_state;
898
899 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
900 * that no events are triggered between unregister_netdev() and the
901 * driver unloading. A more general condition is that NETDEV_CHANGE
902 * can only be generated between NETDEV_UP and NETDEV_DOWN */
903 if (!netif_running(efx->net_dev))
904 return;
905
906 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
907 efx->n_link_state_changes++;
908
909 if (link_state->up)
910 netif_carrier_on(efx->net_dev);
911 else
912 netif_carrier_off(efx->net_dev);
913 }
914
915 /* Status message for kernel log */
916 if (link_state->up)
917 netif_info(efx, link, efx->net_dev,
918 "link up at %uMbps %s-duplex (MTU %d)\n",
919 link_state->speed, link_state->fd ? "full" : "half",
920 efx->net_dev->mtu);
921 else
922 netif_info(efx, link, efx->net_dev, "link down\n");
923 }
924
ef4_link_set_advertising(struct ef4_nic * efx,u32 advertising)925 void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
926 {
927 efx->link_advertising = advertising;
928 if (advertising) {
929 if (advertising & ADVERTISED_Pause)
930 efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
931 else
932 efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
933 if (advertising & ADVERTISED_Asym_Pause)
934 efx->wanted_fc ^= EF4_FC_TX;
935 }
936 }
937
ef4_link_set_wanted_fc(struct ef4_nic * efx,u8 wanted_fc)938 void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
939 {
940 efx->wanted_fc = wanted_fc;
941 if (efx->link_advertising) {
942 if (wanted_fc & EF4_FC_RX)
943 efx->link_advertising |= (ADVERTISED_Pause |
944 ADVERTISED_Asym_Pause);
945 else
946 efx->link_advertising &= ~(ADVERTISED_Pause |
947 ADVERTISED_Asym_Pause);
948 if (wanted_fc & EF4_FC_TX)
949 efx->link_advertising ^= ADVERTISED_Asym_Pause;
950 }
951 }
952
953 static void ef4_fini_port(struct ef4_nic *efx);
954
955 /* We assume that efx->type->reconfigure_mac will always try to sync RX
956 * filters and therefore needs to read-lock the filter table against freeing
957 */
ef4_mac_reconfigure(struct ef4_nic * efx)958 void ef4_mac_reconfigure(struct ef4_nic *efx)
959 {
960 down_read(&efx->filter_sem);
961 efx->type->reconfigure_mac(efx);
962 up_read(&efx->filter_sem);
963 }
964
965 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
966 * the MAC appropriately. All other PHY configuration changes are pushed
967 * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
968 * through ef4_monitor().
969 *
970 * Callers must hold the mac_lock
971 */
__ef4_reconfigure_port(struct ef4_nic * efx)972 int __ef4_reconfigure_port(struct ef4_nic *efx)
973 {
974 enum ef4_phy_mode phy_mode;
975 int rc;
976
977 WARN_ON(!mutex_is_locked(&efx->mac_lock));
978
979 /* Disable PHY transmit in mac level loopbacks */
980 phy_mode = efx->phy_mode;
981 if (LOOPBACK_INTERNAL(efx))
982 efx->phy_mode |= PHY_MODE_TX_DISABLED;
983 else
984 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
985
986 rc = efx->type->reconfigure_port(efx);
987
988 if (rc)
989 efx->phy_mode = phy_mode;
990
991 return rc;
992 }
993
994 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
995 * disabled. */
ef4_reconfigure_port(struct ef4_nic * efx)996 int ef4_reconfigure_port(struct ef4_nic *efx)
997 {
998 int rc;
999
1000 EF4_ASSERT_RESET_SERIALISED(efx);
1001
1002 mutex_lock(&efx->mac_lock);
1003 rc = __ef4_reconfigure_port(efx);
1004 mutex_unlock(&efx->mac_lock);
1005
1006 return rc;
1007 }
1008
1009 /* Asynchronous work item for changing MAC promiscuity and multicast
1010 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1011 * MAC directly. */
ef4_mac_work(struct work_struct * data)1012 static void ef4_mac_work(struct work_struct *data)
1013 {
1014 struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
1015
1016 mutex_lock(&efx->mac_lock);
1017 if (efx->port_enabled)
1018 ef4_mac_reconfigure(efx);
1019 mutex_unlock(&efx->mac_lock);
1020 }
1021
ef4_probe_port(struct ef4_nic * efx)1022 static int ef4_probe_port(struct ef4_nic *efx)
1023 {
1024 int rc;
1025
1026 netif_dbg(efx, probe, efx->net_dev, "create port\n");
1027
1028 if (phy_flash_cfg)
1029 efx->phy_mode = PHY_MODE_SPECIAL;
1030
1031 /* Connect up MAC/PHY operations table */
1032 rc = efx->type->probe_port(efx);
1033 if (rc)
1034 return rc;
1035
1036 /* Initialise MAC address to permanent address */
1037 eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
1038
1039 return 0;
1040 }
1041
ef4_init_port(struct ef4_nic * efx)1042 static int ef4_init_port(struct ef4_nic *efx)
1043 {
1044 int rc;
1045
1046 netif_dbg(efx, drv, efx->net_dev, "init port\n");
1047
1048 mutex_lock(&efx->mac_lock);
1049
1050 rc = efx->phy_op->init(efx);
1051 if (rc)
1052 goto fail1;
1053
1054 efx->port_initialized = true;
1055
1056 /* Reconfigure the MAC before creating dma queues (required for
1057 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1058 ef4_mac_reconfigure(efx);
1059
1060 /* Ensure the PHY advertises the correct flow control settings */
1061 rc = efx->phy_op->reconfigure(efx);
1062 if (rc && rc != -EPERM)
1063 goto fail2;
1064
1065 mutex_unlock(&efx->mac_lock);
1066 return 0;
1067
1068 fail2:
1069 efx->phy_op->fini(efx);
1070 fail1:
1071 mutex_unlock(&efx->mac_lock);
1072 return rc;
1073 }
1074
ef4_start_port(struct ef4_nic * efx)1075 static void ef4_start_port(struct ef4_nic *efx)
1076 {
1077 netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1078 BUG_ON(efx->port_enabled);
1079
1080 mutex_lock(&efx->mac_lock);
1081 efx->port_enabled = true;
1082
1083 /* Ensure MAC ingress/egress is enabled */
1084 ef4_mac_reconfigure(efx);
1085
1086 mutex_unlock(&efx->mac_lock);
1087 }
1088
1089 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
1090 * and the async self-test, wait for them to finish and prevent them
1091 * being scheduled again. This doesn't cover online resets, which
1092 * should only be cancelled when removing the device.
1093 */
ef4_stop_port(struct ef4_nic * efx)1094 static void ef4_stop_port(struct ef4_nic *efx)
1095 {
1096 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1097
1098 EF4_ASSERT_RESET_SERIALISED(efx);
1099
1100 mutex_lock(&efx->mac_lock);
1101 efx->port_enabled = false;
1102 mutex_unlock(&efx->mac_lock);
1103
1104 /* Serialise against ef4_set_multicast_list() */
1105 netif_addr_lock_bh(efx->net_dev);
1106 netif_addr_unlock_bh(efx->net_dev);
1107
1108 cancel_delayed_work_sync(&efx->monitor_work);
1109 ef4_selftest_async_cancel(efx);
1110 cancel_work_sync(&efx->mac_work);
1111 }
1112
ef4_fini_port(struct ef4_nic * efx)1113 static void ef4_fini_port(struct ef4_nic *efx)
1114 {
1115 netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1116
1117 if (!efx->port_initialized)
1118 return;
1119
1120 efx->phy_op->fini(efx);
1121 efx->port_initialized = false;
1122
1123 efx->link_state.up = false;
1124 ef4_link_status_changed(efx);
1125 }
1126
ef4_remove_port(struct ef4_nic * efx)1127 static void ef4_remove_port(struct ef4_nic *efx)
1128 {
1129 netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1130
1131 efx->type->remove_port(efx);
1132 }
1133
1134 /**************************************************************************
1135 *
1136 * NIC handling
1137 *
1138 **************************************************************************/
1139
1140 static LIST_HEAD(ef4_primary_list);
1141 static LIST_HEAD(ef4_unassociated_list);
1142
ef4_same_controller(struct ef4_nic * left,struct ef4_nic * right)1143 static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right)
1144 {
1145 return left->type == right->type &&
1146 left->vpd_sn && right->vpd_sn &&
1147 !strcmp(left->vpd_sn, right->vpd_sn);
1148 }
1149
ef4_associate(struct ef4_nic * efx)1150 static void ef4_associate(struct ef4_nic *efx)
1151 {
1152 struct ef4_nic *other, *next;
1153
1154 if (efx->primary == efx) {
1155 /* Adding primary function; look for secondaries */
1156
1157 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1158 list_add_tail(&efx->node, &ef4_primary_list);
1159
1160 list_for_each_entry_safe(other, next, &ef4_unassociated_list,
1161 node) {
1162 if (ef4_same_controller(efx, other)) {
1163 list_del(&other->node);
1164 netif_dbg(other, probe, other->net_dev,
1165 "moving to secondary list of %s %s\n",
1166 pci_name(efx->pci_dev),
1167 efx->net_dev->name);
1168 list_add_tail(&other->node,
1169 &efx->secondary_list);
1170 other->primary = efx;
1171 }
1172 }
1173 } else {
1174 /* Adding secondary function; look for primary */
1175
1176 list_for_each_entry(other, &ef4_primary_list, node) {
1177 if (ef4_same_controller(efx, other)) {
1178 netif_dbg(efx, probe, efx->net_dev,
1179 "adding to secondary list of %s %s\n",
1180 pci_name(other->pci_dev),
1181 other->net_dev->name);
1182 list_add_tail(&efx->node,
1183 &other->secondary_list);
1184 efx->primary = other;
1185 return;
1186 }
1187 }
1188
1189 netif_dbg(efx, probe, efx->net_dev,
1190 "adding to unassociated list\n");
1191 list_add_tail(&efx->node, &ef4_unassociated_list);
1192 }
1193 }
1194
ef4_dissociate(struct ef4_nic * efx)1195 static void ef4_dissociate(struct ef4_nic *efx)
1196 {
1197 struct ef4_nic *other, *next;
1198
1199 list_del(&efx->node);
1200 efx->primary = NULL;
1201
1202 list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1203 list_del(&other->node);
1204 netif_dbg(other, probe, other->net_dev,
1205 "moving to unassociated list\n");
1206 list_add_tail(&other->node, &ef4_unassociated_list);
1207 other->primary = NULL;
1208 }
1209 }
1210
1211 /* This configures the PCI device to enable I/O and DMA. */
ef4_init_io(struct ef4_nic * efx)1212 static int ef4_init_io(struct ef4_nic *efx)
1213 {
1214 struct pci_dev *pci_dev = efx->pci_dev;
1215 dma_addr_t dma_mask = efx->type->max_dma_mask;
1216 unsigned int mem_map_size = efx->type->mem_map_size(efx);
1217 int rc, bar;
1218
1219 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1220
1221 bar = efx->type->mem_bar;
1222
1223 rc = pci_enable_device(pci_dev);
1224 if (rc) {
1225 netif_err(efx, probe, efx->net_dev,
1226 "failed to enable PCI device\n");
1227 goto fail1;
1228 }
1229
1230 pci_set_master(pci_dev);
1231
1232 /* Set the PCI DMA mask. Try all possibilities from our genuine mask
1233 * down to 32 bits, because some architectures will allow 40 bit
1234 * masks event though they reject 46 bit masks.
1235 */
1236 while (dma_mask > 0x7fffffffUL) {
1237 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1238 if (rc == 0)
1239 break;
1240 dma_mask >>= 1;
1241 }
1242 if (rc) {
1243 netif_err(efx, probe, efx->net_dev,
1244 "could not find a suitable DMA mask\n");
1245 goto fail2;
1246 }
1247 netif_dbg(efx, probe, efx->net_dev,
1248 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1249
1250 efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1251 rc = pci_request_region(pci_dev, bar, "sfc");
1252 if (rc) {
1253 netif_err(efx, probe, efx->net_dev,
1254 "request for memory BAR failed\n");
1255 rc = -EIO;
1256 goto fail3;
1257 }
1258 efx->membase = ioremap(efx->membase_phys, mem_map_size);
1259 if (!efx->membase) {
1260 netif_err(efx, probe, efx->net_dev,
1261 "could not map memory BAR at %llx+%x\n",
1262 (unsigned long long)efx->membase_phys, mem_map_size);
1263 rc = -ENOMEM;
1264 goto fail4;
1265 }
1266 netif_dbg(efx, probe, efx->net_dev,
1267 "memory BAR at %llx+%x (virtual %p)\n",
1268 (unsigned long long)efx->membase_phys, mem_map_size,
1269 efx->membase);
1270
1271 return 0;
1272
1273 fail4:
1274 pci_release_region(efx->pci_dev, bar);
1275 fail3:
1276 efx->membase_phys = 0;
1277 fail2:
1278 pci_disable_device(efx->pci_dev);
1279 fail1:
1280 return rc;
1281 }
1282
ef4_fini_io(struct ef4_nic * efx)1283 static void ef4_fini_io(struct ef4_nic *efx)
1284 {
1285 int bar;
1286
1287 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1288
1289 if (efx->membase) {
1290 iounmap(efx->membase);
1291 efx->membase = NULL;
1292 }
1293
1294 if (efx->membase_phys) {
1295 bar = efx->type->mem_bar;
1296 pci_release_region(efx->pci_dev, bar);
1297 efx->membase_phys = 0;
1298 }
1299
1300 /* Don't disable bus-mastering if VFs are assigned */
1301 if (!pci_vfs_assigned(efx->pci_dev))
1302 pci_disable_device(efx->pci_dev);
1303 }
1304
ef4_set_default_rx_indir_table(struct ef4_nic * efx)1305 void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
1306 {
1307 size_t i;
1308
1309 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1310 efx->rx_indir_table[i] =
1311 ethtool_rxfh_indir_default(i, efx->rss_spread);
1312 }
1313
ef4_wanted_parallelism(struct ef4_nic * efx)1314 static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1315 {
1316 cpumask_var_t thread_mask;
1317 unsigned int count;
1318 int cpu;
1319
1320 if (rss_cpus) {
1321 count = rss_cpus;
1322 } else {
1323 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1324 netif_warn(efx, probe, efx->net_dev,
1325 "RSS disabled due to allocation failure\n");
1326 return 1;
1327 }
1328
1329 count = 0;
1330 for_each_online_cpu(cpu) {
1331 if (!cpumask_test_cpu(cpu, thread_mask)) {
1332 ++count;
1333 cpumask_or(thread_mask, thread_mask,
1334 topology_sibling_cpumask(cpu));
1335 }
1336 }
1337
1338 free_cpumask_var(thread_mask);
1339 }
1340
1341 if (count > EF4_MAX_RX_QUEUES) {
1342 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1343 "Reducing number of rx queues from %u to %u.\n",
1344 count, EF4_MAX_RX_QUEUES);
1345 count = EF4_MAX_RX_QUEUES;
1346 }
1347
1348 return count;
1349 }
1350
1351 /* Probe the number and type of interrupts we are able to obtain, and
1352 * the resulting numbers of channels and RX queues.
1353 */
ef4_probe_interrupts(struct ef4_nic * efx)1354 static int ef4_probe_interrupts(struct ef4_nic *efx)
1355 {
1356 unsigned int extra_channels = 0;
1357 unsigned int i, j;
1358 int rc;
1359
1360 for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++)
1361 if (efx->extra_channel_type[i])
1362 ++extra_channels;
1363
1364 if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
1365 struct msix_entry xentries[EF4_MAX_CHANNELS];
1366 unsigned int n_channels;
1367
1368 n_channels = ef4_wanted_parallelism(efx);
1369 if (ef4_separate_tx_channels)
1370 n_channels *= 2;
1371 n_channels += extra_channels;
1372 n_channels = min(n_channels, efx->max_channels);
1373
1374 for (i = 0; i < n_channels; i++)
1375 xentries[i].entry = i;
1376 rc = pci_enable_msix_range(efx->pci_dev,
1377 xentries, 1, n_channels);
1378 if (rc < 0) {
1379 /* Fall back to single channel MSI */
1380 efx->interrupt_mode = EF4_INT_MODE_MSI;
1381 netif_err(efx, drv, efx->net_dev,
1382 "could not enable MSI-X\n");
1383 } else if (rc < n_channels) {
1384 netif_err(efx, drv, efx->net_dev,
1385 "WARNING: Insufficient MSI-X vectors"
1386 " available (%d < %u).\n", rc, n_channels);
1387 netif_err(efx, drv, efx->net_dev,
1388 "WARNING: Performance may be reduced.\n");
1389 n_channels = rc;
1390 }
1391
1392 if (rc > 0) {
1393 efx->n_channels = n_channels;
1394 if (n_channels > extra_channels)
1395 n_channels -= extra_channels;
1396 if (ef4_separate_tx_channels) {
1397 efx->n_tx_channels = clamp(n_channels / 2, 1U,
1398 efx->max_tx_channels);
1399 efx->n_rx_channels = max(n_channels -
1400 efx->n_tx_channels,
1401 1U);
1402 } else {
1403 efx->n_tx_channels = min(n_channels,
1404 efx->max_tx_channels);
1405 efx->n_rx_channels = n_channels;
1406 }
1407 for (i = 0; i < efx->n_channels; i++)
1408 ef4_get_channel(efx, i)->irq =
1409 xentries[i].vector;
1410 }
1411 }
1412
1413 /* Try single interrupt MSI */
1414 if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
1415 efx->n_channels = 1;
1416 efx->n_rx_channels = 1;
1417 efx->n_tx_channels = 1;
1418 rc = pci_enable_msi(efx->pci_dev);
1419 if (rc == 0) {
1420 ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1421 } else {
1422 netif_err(efx, drv, efx->net_dev,
1423 "could not enable MSI\n");
1424 efx->interrupt_mode = EF4_INT_MODE_LEGACY;
1425 }
1426 }
1427
1428 /* Assume legacy interrupts */
1429 if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
1430 efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
1431 efx->n_rx_channels = 1;
1432 efx->n_tx_channels = 1;
1433 efx->legacy_irq = efx->pci_dev->irq;
1434 }
1435
1436 /* Assign extra channels if possible */
1437 j = efx->n_channels;
1438 for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) {
1439 if (!efx->extra_channel_type[i])
1440 continue;
1441 if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
1442 efx->n_channels <= extra_channels) {
1443 efx->extra_channel_type[i]->handle_no_channel(efx);
1444 } else {
1445 --j;
1446 ef4_get_channel(efx, j)->type =
1447 efx->extra_channel_type[i];
1448 }
1449 }
1450
1451 efx->rss_spread = efx->n_rx_channels;
1452
1453 return 0;
1454 }
1455
ef4_soft_enable_interrupts(struct ef4_nic * efx)1456 static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
1457 {
1458 struct ef4_channel *channel, *end_channel;
1459 int rc;
1460
1461 BUG_ON(efx->state == STATE_DISABLED);
1462
1463 efx->irq_soft_enabled = true;
1464 smp_wmb();
1465
1466 ef4_for_each_channel(channel, efx) {
1467 if (!channel->type->keep_eventq) {
1468 rc = ef4_init_eventq(channel);
1469 if (rc)
1470 goto fail;
1471 }
1472 ef4_start_eventq(channel);
1473 }
1474
1475 return 0;
1476 fail:
1477 end_channel = channel;
1478 ef4_for_each_channel(channel, efx) {
1479 if (channel == end_channel)
1480 break;
1481 ef4_stop_eventq(channel);
1482 if (!channel->type->keep_eventq)
1483 ef4_fini_eventq(channel);
1484 }
1485
1486 return rc;
1487 }
1488
ef4_soft_disable_interrupts(struct ef4_nic * efx)1489 static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
1490 {
1491 struct ef4_channel *channel;
1492
1493 if (efx->state == STATE_DISABLED)
1494 return;
1495
1496 efx->irq_soft_enabled = false;
1497 smp_wmb();
1498
1499 if (efx->legacy_irq)
1500 synchronize_irq(efx->legacy_irq);
1501
1502 ef4_for_each_channel(channel, efx) {
1503 if (channel->irq)
1504 synchronize_irq(channel->irq);
1505
1506 ef4_stop_eventq(channel);
1507 if (!channel->type->keep_eventq)
1508 ef4_fini_eventq(channel);
1509 }
1510 }
1511
ef4_enable_interrupts(struct ef4_nic * efx)1512 static int ef4_enable_interrupts(struct ef4_nic *efx)
1513 {
1514 struct ef4_channel *channel, *end_channel;
1515 int rc;
1516
1517 BUG_ON(efx->state == STATE_DISABLED);
1518
1519 if (efx->eeh_disabled_legacy_irq) {
1520 enable_irq(efx->legacy_irq);
1521 efx->eeh_disabled_legacy_irq = false;
1522 }
1523
1524 efx->type->irq_enable_master(efx);
1525
1526 ef4_for_each_channel(channel, efx) {
1527 if (channel->type->keep_eventq) {
1528 rc = ef4_init_eventq(channel);
1529 if (rc)
1530 goto fail;
1531 }
1532 }
1533
1534 rc = ef4_soft_enable_interrupts(efx);
1535 if (rc)
1536 goto fail;
1537
1538 return 0;
1539
1540 fail:
1541 end_channel = channel;
1542 ef4_for_each_channel(channel, efx) {
1543 if (channel == end_channel)
1544 break;
1545 if (channel->type->keep_eventq)
1546 ef4_fini_eventq(channel);
1547 }
1548
1549 efx->type->irq_disable_non_ev(efx);
1550
1551 return rc;
1552 }
1553
ef4_disable_interrupts(struct ef4_nic * efx)1554 static void ef4_disable_interrupts(struct ef4_nic *efx)
1555 {
1556 struct ef4_channel *channel;
1557
1558 ef4_soft_disable_interrupts(efx);
1559
1560 ef4_for_each_channel(channel, efx) {
1561 if (channel->type->keep_eventq)
1562 ef4_fini_eventq(channel);
1563 }
1564
1565 efx->type->irq_disable_non_ev(efx);
1566 }
1567
ef4_remove_interrupts(struct ef4_nic * efx)1568 static void ef4_remove_interrupts(struct ef4_nic *efx)
1569 {
1570 struct ef4_channel *channel;
1571
1572 /* Remove MSI/MSI-X interrupts */
1573 ef4_for_each_channel(channel, efx)
1574 channel->irq = 0;
1575 pci_disable_msi(efx->pci_dev);
1576 pci_disable_msix(efx->pci_dev);
1577
1578 /* Remove legacy interrupt */
1579 efx->legacy_irq = 0;
1580 }
1581
ef4_set_channels(struct ef4_nic * efx)1582 static void ef4_set_channels(struct ef4_nic *efx)
1583 {
1584 struct ef4_channel *channel;
1585 struct ef4_tx_queue *tx_queue;
1586
1587 efx->tx_channel_offset =
1588 ef4_separate_tx_channels ?
1589 efx->n_channels - efx->n_tx_channels : 0;
1590
1591 /* We need to mark which channels really have RX and TX
1592 * queues, and adjust the TX queue numbers if we have separate
1593 * RX-only and TX-only channels.
1594 */
1595 ef4_for_each_channel(channel, efx) {
1596 if (channel->channel < efx->n_rx_channels)
1597 channel->rx_queue.core_index = channel->channel;
1598 else
1599 channel->rx_queue.core_index = -1;
1600
1601 ef4_for_each_channel_tx_queue(tx_queue, channel)
1602 tx_queue->queue -= (efx->tx_channel_offset *
1603 EF4_TXQ_TYPES);
1604 }
1605 }
1606
ef4_probe_nic(struct ef4_nic * efx)1607 static int ef4_probe_nic(struct ef4_nic *efx)
1608 {
1609 int rc;
1610
1611 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1612
1613 /* Carry out hardware-type specific initialisation */
1614 rc = efx->type->probe(efx);
1615 if (rc)
1616 return rc;
1617
1618 do {
1619 if (!efx->max_channels || !efx->max_tx_channels) {
1620 netif_err(efx, drv, efx->net_dev,
1621 "Insufficient resources to allocate"
1622 " any channels\n");
1623 rc = -ENOSPC;
1624 goto fail1;
1625 }
1626
1627 /* Determine the number of channels and queues by trying
1628 * to hook in MSI-X interrupts.
1629 */
1630 rc = ef4_probe_interrupts(efx);
1631 if (rc)
1632 goto fail1;
1633
1634 ef4_set_channels(efx);
1635
1636 /* dimension_resources can fail with EAGAIN */
1637 rc = efx->type->dimension_resources(efx);
1638 if (rc != 0 && rc != -EAGAIN)
1639 goto fail2;
1640
1641 if (rc == -EAGAIN)
1642 /* try again with new max_channels */
1643 ef4_remove_interrupts(efx);
1644
1645 } while (rc == -EAGAIN);
1646
1647 if (efx->n_channels > 1)
1648 netdev_rss_key_fill(&efx->rx_hash_key,
1649 sizeof(efx->rx_hash_key));
1650 ef4_set_default_rx_indir_table(efx);
1651
1652 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1653 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1654
1655 /* Initialise the interrupt moderation settings */
1656 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1657 ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1658 true);
1659
1660 return 0;
1661
1662 fail2:
1663 ef4_remove_interrupts(efx);
1664 fail1:
1665 efx->type->remove(efx);
1666 return rc;
1667 }
1668
ef4_remove_nic(struct ef4_nic * efx)1669 static void ef4_remove_nic(struct ef4_nic *efx)
1670 {
1671 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1672
1673 ef4_remove_interrupts(efx);
1674 efx->type->remove(efx);
1675 }
1676
ef4_probe_filters(struct ef4_nic * efx)1677 static int ef4_probe_filters(struct ef4_nic *efx)
1678 {
1679 int rc;
1680
1681 spin_lock_init(&efx->filter_lock);
1682 init_rwsem(&efx->filter_sem);
1683 mutex_lock(&efx->mac_lock);
1684 down_write(&efx->filter_sem);
1685 rc = efx->type->filter_table_probe(efx);
1686 if (rc)
1687 goto out_unlock;
1688
1689 #ifdef CONFIG_RFS_ACCEL
1690 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1691 struct ef4_channel *channel;
1692 int i, success = 1;
1693
1694 ef4_for_each_channel(channel, efx) {
1695 channel->rps_flow_id =
1696 kcalloc(efx->type->max_rx_ip_filters,
1697 sizeof(*channel->rps_flow_id),
1698 GFP_KERNEL);
1699 if (!channel->rps_flow_id)
1700 success = 0;
1701 else
1702 for (i = 0;
1703 i < efx->type->max_rx_ip_filters;
1704 ++i)
1705 channel->rps_flow_id[i] =
1706 RPS_FLOW_ID_INVALID;
1707 }
1708
1709 if (!success) {
1710 ef4_for_each_channel(channel, efx)
1711 kfree(channel->rps_flow_id);
1712 efx->type->filter_table_remove(efx);
1713 rc = -ENOMEM;
1714 goto out_unlock;
1715 }
1716
1717 efx->rps_expire_index = efx->rps_expire_channel = 0;
1718 }
1719 #endif
1720 out_unlock:
1721 up_write(&efx->filter_sem);
1722 mutex_unlock(&efx->mac_lock);
1723 return rc;
1724 }
1725
ef4_remove_filters(struct ef4_nic * efx)1726 static void ef4_remove_filters(struct ef4_nic *efx)
1727 {
1728 #ifdef CONFIG_RFS_ACCEL
1729 struct ef4_channel *channel;
1730
1731 ef4_for_each_channel(channel, efx)
1732 kfree(channel->rps_flow_id);
1733 #endif
1734 down_write(&efx->filter_sem);
1735 efx->type->filter_table_remove(efx);
1736 up_write(&efx->filter_sem);
1737 }
1738
ef4_restore_filters(struct ef4_nic * efx)1739 static void ef4_restore_filters(struct ef4_nic *efx)
1740 {
1741 down_read(&efx->filter_sem);
1742 efx->type->filter_table_restore(efx);
1743 up_read(&efx->filter_sem);
1744 }
1745
1746 /**************************************************************************
1747 *
1748 * NIC startup/shutdown
1749 *
1750 *************************************************************************/
1751
ef4_probe_all(struct ef4_nic * efx)1752 static int ef4_probe_all(struct ef4_nic *efx)
1753 {
1754 int rc;
1755
1756 rc = ef4_probe_nic(efx);
1757 if (rc) {
1758 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1759 goto fail1;
1760 }
1761
1762 rc = ef4_probe_port(efx);
1763 if (rc) {
1764 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1765 goto fail2;
1766 }
1767
1768 BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT);
1769 if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
1770 rc = -EINVAL;
1771 goto fail3;
1772 }
1773 efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
1774
1775 rc = ef4_probe_filters(efx);
1776 if (rc) {
1777 netif_err(efx, probe, efx->net_dev,
1778 "failed to create filter tables\n");
1779 goto fail4;
1780 }
1781
1782 rc = ef4_probe_channels(efx);
1783 if (rc)
1784 goto fail5;
1785
1786 return 0;
1787
1788 fail5:
1789 ef4_remove_filters(efx);
1790 fail4:
1791 fail3:
1792 ef4_remove_port(efx);
1793 fail2:
1794 ef4_remove_nic(efx);
1795 fail1:
1796 return rc;
1797 }
1798
1799 /* If the interface is supposed to be running but is not, start
1800 * the hardware and software data path, regular activity for the port
1801 * (MAC statistics, link polling, etc.) and schedule the port to be
1802 * reconfigured. Interrupts must already be enabled. This function
1803 * is safe to call multiple times, so long as the NIC is not disabled.
1804 * Requires the RTNL lock.
1805 */
ef4_start_all(struct ef4_nic * efx)1806 static void ef4_start_all(struct ef4_nic *efx)
1807 {
1808 EF4_ASSERT_RESET_SERIALISED(efx);
1809 BUG_ON(efx->state == STATE_DISABLED);
1810
1811 /* Check that it is appropriate to restart the interface. All
1812 * of these flags are safe to read under just the rtnl lock */
1813 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1814 efx->reset_pending)
1815 return;
1816
1817 ef4_start_port(efx);
1818 ef4_start_datapath(efx);
1819
1820 /* Start the hardware monitor if there is one */
1821 if (efx->type->monitor != NULL)
1822 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1823 ef4_monitor_interval);
1824
1825 efx->type->start_stats(efx);
1826 efx->type->pull_stats(efx);
1827 spin_lock_bh(&efx->stats_lock);
1828 efx->type->update_stats(efx, NULL, NULL);
1829 spin_unlock_bh(&efx->stats_lock);
1830 }
1831
1832 /* Quiesce the hardware and software data path, and regular activity
1833 * for the port without bringing the link down. Safe to call multiple
1834 * times with the NIC in almost any state, but interrupts should be
1835 * enabled. Requires the RTNL lock.
1836 */
ef4_stop_all(struct ef4_nic * efx)1837 static void ef4_stop_all(struct ef4_nic *efx)
1838 {
1839 EF4_ASSERT_RESET_SERIALISED(efx);
1840
1841 /* port_enabled can be read safely under the rtnl lock */
1842 if (!efx->port_enabled)
1843 return;
1844
1845 /* update stats before we go down so we can accurately count
1846 * rx_nodesc_drops
1847 */
1848 efx->type->pull_stats(efx);
1849 spin_lock_bh(&efx->stats_lock);
1850 efx->type->update_stats(efx, NULL, NULL);
1851 spin_unlock_bh(&efx->stats_lock);
1852 efx->type->stop_stats(efx);
1853 ef4_stop_port(efx);
1854
1855 /* Stop the kernel transmit interface. This is only valid if
1856 * the device is stopped or detached; otherwise the watchdog
1857 * may fire immediately.
1858 */
1859 WARN_ON(netif_running(efx->net_dev) &&
1860 netif_device_present(efx->net_dev));
1861 netif_tx_disable(efx->net_dev);
1862
1863 ef4_stop_datapath(efx);
1864 }
1865
ef4_remove_all(struct ef4_nic * efx)1866 static void ef4_remove_all(struct ef4_nic *efx)
1867 {
1868 ef4_remove_channels(efx);
1869 ef4_remove_filters(efx);
1870 ef4_remove_port(efx);
1871 ef4_remove_nic(efx);
1872 }
1873
1874 /**************************************************************************
1875 *
1876 * Interrupt moderation
1877 *
1878 **************************************************************************/
ef4_usecs_to_ticks(struct ef4_nic * efx,unsigned int usecs)1879 unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
1880 {
1881 if (usecs == 0)
1882 return 0;
1883 if (usecs * 1000 < efx->timer_quantum_ns)
1884 return 1; /* never round down to 0 */
1885 return usecs * 1000 / efx->timer_quantum_ns;
1886 }
1887
1888 /* Set interrupt moderation parameters */
ef4_init_irq_moderation(struct ef4_nic * efx,unsigned int tx_usecs,unsigned int rx_usecs,bool rx_adaptive,bool rx_may_override_tx)1889 int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
1890 unsigned int rx_usecs, bool rx_adaptive,
1891 bool rx_may_override_tx)
1892 {
1893 struct ef4_channel *channel;
1894 unsigned int timer_max_us;
1895
1896 EF4_ASSERT_RESET_SERIALISED(efx);
1897
1898 timer_max_us = efx->timer_max_ns / 1000;
1899
1900 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
1901 return -EINVAL;
1902
1903 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
1904 !rx_may_override_tx) {
1905 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1906 "RX and TX IRQ moderation must be equal\n");
1907 return -EINVAL;
1908 }
1909
1910 efx->irq_rx_adaptive = rx_adaptive;
1911 efx->irq_rx_moderation_us = rx_usecs;
1912 ef4_for_each_channel(channel, efx) {
1913 if (ef4_channel_has_rx_queue(channel))
1914 channel->irq_moderation_us = rx_usecs;
1915 else if (ef4_channel_has_tx_queues(channel))
1916 channel->irq_moderation_us = tx_usecs;
1917 }
1918
1919 return 0;
1920 }
1921
ef4_get_irq_moderation(struct ef4_nic * efx,unsigned int * tx_usecs,unsigned int * rx_usecs,bool * rx_adaptive)1922 void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
1923 unsigned int *rx_usecs, bool *rx_adaptive)
1924 {
1925 *rx_adaptive = efx->irq_rx_adaptive;
1926 *rx_usecs = efx->irq_rx_moderation_us;
1927
1928 /* If channels are shared between RX and TX, so is IRQ
1929 * moderation. Otherwise, IRQ moderation is the same for all
1930 * TX channels and is not adaptive.
1931 */
1932 if (efx->tx_channel_offset == 0) {
1933 *tx_usecs = *rx_usecs;
1934 } else {
1935 struct ef4_channel *tx_channel;
1936
1937 tx_channel = efx->channel[efx->tx_channel_offset];
1938 *tx_usecs = tx_channel->irq_moderation_us;
1939 }
1940 }
1941
1942 /**************************************************************************
1943 *
1944 * Hardware monitor
1945 *
1946 **************************************************************************/
1947
1948 /* Run periodically off the general workqueue */
ef4_monitor(struct work_struct * data)1949 static void ef4_monitor(struct work_struct *data)
1950 {
1951 struct ef4_nic *efx = container_of(data, struct ef4_nic,
1952 monitor_work.work);
1953
1954 netif_vdbg(efx, timer, efx->net_dev,
1955 "hardware monitor executing on CPU %d\n",
1956 raw_smp_processor_id());
1957 BUG_ON(efx->type->monitor == NULL);
1958
1959 /* If the mac_lock is already held then it is likely a port
1960 * reconfiguration is already in place, which will likely do
1961 * most of the work of monitor() anyway. */
1962 if (mutex_trylock(&efx->mac_lock)) {
1963 if (efx->port_enabled)
1964 efx->type->monitor(efx);
1965 mutex_unlock(&efx->mac_lock);
1966 }
1967
1968 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1969 ef4_monitor_interval);
1970 }
1971
1972 /**************************************************************************
1973 *
1974 * ioctls
1975 *
1976 *************************************************************************/
1977
1978 /* Net device ioctl
1979 * Context: process, rtnl_lock() held.
1980 */
ef4_ioctl(struct net_device * net_dev,struct ifreq * ifr,int cmd)1981 static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1982 {
1983 struct ef4_nic *efx = netdev_priv(net_dev);
1984 struct mii_ioctl_data *data = if_mii(ifr);
1985
1986 /* Convert phy_id from older PRTAD/DEVAD format */
1987 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1988 (data->phy_id & 0xfc00) == 0x0400)
1989 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
1990
1991 return mdio_mii_ioctl(&efx->mdio, data, cmd);
1992 }
1993
1994 /**************************************************************************
1995 *
1996 * NAPI interface
1997 *
1998 **************************************************************************/
1999
ef4_init_napi_channel(struct ef4_channel * channel)2000 static void ef4_init_napi_channel(struct ef4_channel *channel)
2001 {
2002 struct ef4_nic *efx = channel->efx;
2003
2004 channel->napi_dev = efx->net_dev;
2005 netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
2006 }
2007
ef4_init_napi(struct ef4_nic * efx)2008 static void ef4_init_napi(struct ef4_nic *efx)
2009 {
2010 struct ef4_channel *channel;
2011
2012 ef4_for_each_channel(channel, efx)
2013 ef4_init_napi_channel(channel);
2014 }
2015
ef4_fini_napi_channel(struct ef4_channel * channel)2016 static void ef4_fini_napi_channel(struct ef4_channel *channel)
2017 {
2018 if (channel->napi_dev)
2019 netif_napi_del(&channel->napi_str);
2020
2021 channel->napi_dev = NULL;
2022 }
2023
ef4_fini_napi(struct ef4_nic * efx)2024 static void ef4_fini_napi(struct ef4_nic *efx)
2025 {
2026 struct ef4_channel *channel;
2027
2028 ef4_for_each_channel(channel, efx)
2029 ef4_fini_napi_channel(channel);
2030 }
2031
2032 /**************************************************************************
2033 *
2034 * Kernel net device interface
2035 *
2036 *************************************************************************/
2037
2038 /* Context: process, rtnl_lock() held. */
ef4_net_open(struct net_device * net_dev)2039 int ef4_net_open(struct net_device *net_dev)
2040 {
2041 struct ef4_nic *efx = netdev_priv(net_dev);
2042 int rc;
2043
2044 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2045 raw_smp_processor_id());
2046
2047 rc = ef4_check_disabled(efx);
2048 if (rc)
2049 return rc;
2050 if (efx->phy_mode & PHY_MODE_SPECIAL)
2051 return -EBUSY;
2052
2053 /* Notify the kernel of the link state polled during driver load,
2054 * before the monitor starts running */
2055 ef4_link_status_changed(efx);
2056
2057 ef4_start_all(efx);
2058 ef4_selftest_async_start(efx);
2059 return 0;
2060 }
2061
2062 /* Context: process, rtnl_lock() held.
2063 * Note that the kernel will ignore our return code; this method
2064 * should really be a void.
2065 */
ef4_net_stop(struct net_device * net_dev)2066 int ef4_net_stop(struct net_device *net_dev)
2067 {
2068 struct ef4_nic *efx = netdev_priv(net_dev);
2069
2070 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2071 raw_smp_processor_id());
2072
2073 /* Stop the device and flush all the channels */
2074 ef4_stop_all(efx);
2075
2076 return 0;
2077 }
2078
2079 /* Context: process, rcu_read_lock or RTNL held, non-blocking. */
ef4_net_stats(struct net_device * net_dev,struct rtnl_link_stats64 * stats)2080 static void ef4_net_stats(struct net_device *net_dev,
2081 struct rtnl_link_stats64 *stats)
2082 {
2083 struct ef4_nic *efx = netdev_priv(net_dev);
2084
2085 spin_lock_bh(&efx->stats_lock);
2086 efx->type->update_stats(efx, NULL, stats);
2087 spin_unlock_bh(&efx->stats_lock);
2088 }
2089
2090 /* Context: netif_tx_lock held, BHs disabled. */
ef4_watchdog(struct net_device * net_dev,unsigned int txqueue)2091 static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
2092 {
2093 struct ef4_nic *efx = netdev_priv(net_dev);
2094
2095 netif_err(efx, tx_err, efx->net_dev,
2096 "TX stuck with port_enabled=%d: resetting channels\n",
2097 efx->port_enabled);
2098
2099 ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2100 }
2101
2102
2103 /* Context: process, rtnl_lock() held. */
ef4_change_mtu(struct net_device * net_dev,int new_mtu)2104 static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
2105 {
2106 struct ef4_nic *efx = netdev_priv(net_dev);
2107 int rc;
2108
2109 rc = ef4_check_disabled(efx);
2110 if (rc)
2111 return rc;
2112
2113 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2114
2115 ef4_device_detach_sync(efx);
2116 ef4_stop_all(efx);
2117
2118 mutex_lock(&efx->mac_lock);
2119 WRITE_ONCE(net_dev->mtu, new_mtu);
2120 ef4_mac_reconfigure(efx);
2121 mutex_unlock(&efx->mac_lock);
2122
2123 ef4_start_all(efx);
2124 netif_device_attach(efx->net_dev);
2125 return 0;
2126 }
2127
ef4_set_mac_address(struct net_device * net_dev,void * data)2128 static int ef4_set_mac_address(struct net_device *net_dev, void *data)
2129 {
2130 struct ef4_nic *efx = netdev_priv(net_dev);
2131 struct sockaddr *addr = data;
2132 u8 *new_addr = addr->sa_data;
2133 u8 old_addr[6];
2134 int rc;
2135
2136 if (!is_valid_ether_addr(new_addr)) {
2137 netif_err(efx, drv, efx->net_dev,
2138 "invalid ethernet MAC address requested: %pM\n",
2139 new_addr);
2140 return -EADDRNOTAVAIL;
2141 }
2142
2143 /* save old address */
2144 ether_addr_copy(old_addr, net_dev->dev_addr);
2145 eth_hw_addr_set(net_dev, new_addr);
2146 if (efx->type->set_mac_address) {
2147 rc = efx->type->set_mac_address(efx);
2148 if (rc) {
2149 eth_hw_addr_set(net_dev, old_addr);
2150 return rc;
2151 }
2152 }
2153
2154 /* Reconfigure the MAC */
2155 mutex_lock(&efx->mac_lock);
2156 ef4_mac_reconfigure(efx);
2157 mutex_unlock(&efx->mac_lock);
2158
2159 return 0;
2160 }
2161
2162 /* Context: netif_addr_lock held, BHs disabled. */
ef4_set_rx_mode(struct net_device * net_dev)2163 static void ef4_set_rx_mode(struct net_device *net_dev)
2164 {
2165 struct ef4_nic *efx = netdev_priv(net_dev);
2166
2167 if (efx->port_enabled)
2168 queue_work(efx->workqueue, &efx->mac_work);
2169 /* Otherwise ef4_start_port() will do this */
2170 }
2171
ef4_set_features(struct net_device * net_dev,netdev_features_t data)2172 static int ef4_set_features(struct net_device *net_dev, netdev_features_t data)
2173 {
2174 struct ef4_nic *efx = netdev_priv(net_dev);
2175 int rc;
2176
2177 /* If disabling RX n-tuple filtering, clear existing filters */
2178 if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2179 rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
2180 if (rc)
2181 return rc;
2182 }
2183
2184 /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2185 if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
2186 /* ef4_set_rx_mode() will schedule MAC work to update filters
2187 * when a new features are finally set in net_dev.
2188 */
2189 ef4_set_rx_mode(net_dev);
2190 }
2191
2192 return 0;
2193 }
2194
2195 static const struct net_device_ops ef4_netdev_ops = {
2196 .ndo_open = ef4_net_open,
2197 .ndo_stop = ef4_net_stop,
2198 .ndo_get_stats64 = ef4_net_stats,
2199 .ndo_tx_timeout = ef4_watchdog,
2200 .ndo_start_xmit = ef4_hard_start_xmit,
2201 .ndo_validate_addr = eth_validate_addr,
2202 .ndo_eth_ioctl = ef4_ioctl,
2203 .ndo_change_mtu = ef4_change_mtu,
2204 .ndo_set_mac_address = ef4_set_mac_address,
2205 .ndo_set_rx_mode = ef4_set_rx_mode,
2206 .ndo_set_features = ef4_set_features,
2207 .ndo_setup_tc = ef4_setup_tc,
2208 #ifdef CONFIG_RFS_ACCEL
2209 .ndo_rx_flow_steer = ef4_filter_rfs,
2210 #endif
2211 };
2212
ef4_update_name(struct ef4_nic * efx)2213 static void ef4_update_name(struct ef4_nic *efx)
2214 {
2215 strcpy(efx->name, efx->net_dev->name);
2216 ef4_mtd_rename(efx);
2217 ef4_set_channel_names(efx);
2218 }
2219
ef4_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)2220 static int ef4_netdev_event(struct notifier_block *this,
2221 unsigned long event, void *ptr)
2222 {
2223 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2224
2225 if ((net_dev->netdev_ops == &ef4_netdev_ops) &&
2226 event == NETDEV_CHANGENAME)
2227 ef4_update_name(netdev_priv(net_dev));
2228
2229 return NOTIFY_DONE;
2230 }
2231
2232 static struct notifier_block ef4_netdev_notifier = {
2233 .notifier_call = ef4_netdev_event,
2234 };
2235
2236 static ssize_t
phy_type_show(struct device * dev,struct device_attribute * attr,char * buf)2237 phy_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2238 {
2239 struct ef4_nic *efx = dev_get_drvdata(dev);
2240 return sprintf(buf, "%d\n", efx->phy_type);
2241 }
2242 static DEVICE_ATTR_RO(phy_type);
2243
ef4_register_netdev(struct ef4_nic * efx)2244 static int ef4_register_netdev(struct ef4_nic *efx)
2245 {
2246 struct net_device *net_dev = efx->net_dev;
2247 struct ef4_channel *channel;
2248 int rc;
2249
2250 net_dev->watchdog_timeo = 5 * HZ;
2251 net_dev->irq = efx->pci_dev->irq;
2252 net_dev->netdev_ops = &ef4_netdev_ops;
2253 net_dev->ethtool_ops = &ef4_ethtool_ops;
2254 netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
2255 net_dev->min_mtu = EF4_MIN_MTU;
2256 net_dev->max_mtu = EF4_MAX_MTU;
2257
2258 rtnl_lock();
2259
2260 /* Enable resets to be scheduled and check whether any were
2261 * already requested. If so, the NIC is probably hosed so we
2262 * abort.
2263 */
2264 efx->state = STATE_READY;
2265 smp_mb(); /* ensure we change state before checking reset_pending */
2266 if (efx->reset_pending) {
2267 netif_err(efx, probe, efx->net_dev,
2268 "aborting probe due to scheduled reset\n");
2269 rc = -EIO;
2270 goto fail_locked;
2271 }
2272
2273 rc = dev_alloc_name(net_dev, net_dev->name);
2274 if (rc < 0)
2275 goto fail_locked;
2276 ef4_update_name(efx);
2277
2278 /* Always start with carrier off; PHY events will detect the link */
2279 netif_carrier_off(net_dev);
2280
2281 rc = register_netdevice(net_dev);
2282 if (rc)
2283 goto fail_locked;
2284
2285 ef4_for_each_channel(channel, efx) {
2286 struct ef4_tx_queue *tx_queue;
2287 ef4_for_each_channel_tx_queue(tx_queue, channel)
2288 ef4_init_tx_queue_core_txq(tx_queue);
2289 }
2290
2291 ef4_associate(efx);
2292
2293 rtnl_unlock();
2294
2295 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2296 if (rc) {
2297 netif_err(efx, drv, efx->net_dev,
2298 "failed to init net dev attributes\n");
2299 goto fail_registered;
2300 }
2301 return 0;
2302
2303 fail_registered:
2304 rtnl_lock();
2305 ef4_dissociate(efx);
2306 unregister_netdevice(net_dev);
2307 fail_locked:
2308 efx->state = STATE_UNINIT;
2309 rtnl_unlock();
2310 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2311 return rc;
2312 }
2313
ef4_unregister_netdev(struct ef4_nic * efx)2314 static void ef4_unregister_netdev(struct ef4_nic *efx)
2315 {
2316 if (!efx->net_dev)
2317 return;
2318
2319 BUG_ON(netdev_priv(efx->net_dev) != efx);
2320
2321 if (ef4_dev_registered(efx)) {
2322 strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2323 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2324 unregister_netdev(efx->net_dev);
2325 }
2326 }
2327
2328 /**************************************************************************
2329 *
2330 * Device reset and suspend
2331 *
2332 **************************************************************************/
2333
2334 /* Tears down the entire software state and most of the hardware state
2335 * before reset. */
ef4_reset_down(struct ef4_nic * efx,enum reset_type method)2336 void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
2337 {
2338 EF4_ASSERT_RESET_SERIALISED(efx);
2339
2340 ef4_stop_all(efx);
2341 ef4_disable_interrupts(efx);
2342
2343 mutex_lock(&efx->mac_lock);
2344 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2345 method != RESET_TYPE_DATAPATH)
2346 efx->phy_op->fini(efx);
2347 efx->type->fini(efx);
2348 }
2349
2350 /* This function will always ensure that the locks acquired in
2351 * ef4_reset_down() are released. A failure return code indicates
2352 * that we were unable to reinitialise the hardware, and the
2353 * driver should be disabled. If ok is false, then the rx and tx
2354 * engines are not restarted, pending a RESET_DISABLE. */
ef4_reset_up(struct ef4_nic * efx,enum reset_type method,bool ok)2355 int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
2356 {
2357 int rc;
2358
2359 EF4_ASSERT_RESET_SERIALISED(efx);
2360
2361 /* Ensure that SRAM is initialised even if we're disabling the device */
2362 rc = efx->type->init(efx);
2363 if (rc) {
2364 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2365 goto fail;
2366 }
2367
2368 if (!ok)
2369 goto fail;
2370
2371 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2372 method != RESET_TYPE_DATAPATH) {
2373 rc = efx->phy_op->init(efx);
2374 if (rc)
2375 goto fail;
2376 rc = efx->phy_op->reconfigure(efx);
2377 if (rc && rc != -EPERM)
2378 netif_err(efx, drv, efx->net_dev,
2379 "could not restore PHY settings\n");
2380 }
2381
2382 rc = ef4_enable_interrupts(efx);
2383 if (rc)
2384 goto fail;
2385
2386 down_read(&efx->filter_sem);
2387 ef4_restore_filters(efx);
2388 up_read(&efx->filter_sem);
2389
2390 mutex_unlock(&efx->mac_lock);
2391
2392 ef4_start_all(efx);
2393
2394 return 0;
2395
2396 fail:
2397 efx->port_initialized = false;
2398
2399 mutex_unlock(&efx->mac_lock);
2400
2401 return rc;
2402 }
2403
2404 /* Reset the NIC using the specified method. Note that the reset may
2405 * fail, in which case the card will be left in an unusable state.
2406 *
2407 * Caller must hold the rtnl_lock.
2408 */
ef4_reset(struct ef4_nic * efx,enum reset_type method)2409 int ef4_reset(struct ef4_nic *efx, enum reset_type method)
2410 {
2411 int rc, rc2;
2412 bool disabled;
2413
2414 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2415 RESET_TYPE(method));
2416
2417 ef4_device_detach_sync(efx);
2418 ef4_reset_down(efx, method);
2419
2420 rc = efx->type->reset(efx, method);
2421 if (rc) {
2422 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2423 goto out;
2424 }
2425
2426 /* Clear flags for the scopes we covered. We assume the NIC and
2427 * driver are now quiescent so that there is no race here.
2428 */
2429 if (method < RESET_TYPE_MAX_METHOD)
2430 efx->reset_pending &= -(1 << (method + 1));
2431 else /* it doesn't fit into the well-ordered scope hierarchy */
2432 __clear_bit(method, &efx->reset_pending);
2433
2434 /* Reinitialise bus-mastering, which may have been turned off before
2435 * the reset was scheduled. This is still appropriate, even in the
2436 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2437 * can respond to requests. */
2438 pci_set_master(efx->pci_dev);
2439
2440 out:
2441 /* Leave device stopped if necessary */
2442 disabled = rc ||
2443 method == RESET_TYPE_DISABLE ||
2444 method == RESET_TYPE_RECOVER_OR_DISABLE;
2445 rc2 = ef4_reset_up(efx, method, !disabled);
2446 if (rc2) {
2447 disabled = true;
2448 if (!rc)
2449 rc = rc2;
2450 }
2451
2452 if (disabled) {
2453 dev_close(efx->net_dev);
2454 netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2455 efx->state = STATE_DISABLED;
2456 } else {
2457 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2458 netif_device_attach(efx->net_dev);
2459 }
2460 return rc;
2461 }
2462
2463 /* Try recovery mechanisms.
2464 * For now only EEH is supported.
2465 * Returns 0 if the recovery mechanisms are unsuccessful.
2466 * Returns a non-zero value otherwise.
2467 */
ef4_try_recovery(struct ef4_nic * efx)2468 int ef4_try_recovery(struct ef4_nic *efx)
2469 {
2470 #ifdef CONFIG_EEH
2471 /* A PCI error can occur and not be seen by EEH because nothing
2472 * happens on the PCI bus. In this case the driver may fail and
2473 * schedule a 'recover or reset', leading to this recovery handler.
2474 * Manually call the eeh failure check function.
2475 */
2476 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2477 if (eeh_dev_check_failure(eehdev)) {
2478 /* The EEH mechanisms will handle the error and reset the
2479 * device if necessary.
2480 */
2481 return 1;
2482 }
2483 #endif
2484 return 0;
2485 }
2486
2487 /* The worker thread exists so that code that cannot sleep can
2488 * schedule a reset for later.
2489 */
ef4_reset_work(struct work_struct * data)2490 static void ef4_reset_work(struct work_struct *data)
2491 {
2492 struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
2493 unsigned long pending;
2494 enum reset_type method;
2495
2496 pending = READ_ONCE(efx->reset_pending);
2497 method = fls(pending) - 1;
2498
2499 if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2500 method == RESET_TYPE_RECOVER_OR_ALL) &&
2501 ef4_try_recovery(efx))
2502 return;
2503
2504 if (!pending)
2505 return;
2506
2507 rtnl_lock();
2508
2509 /* We checked the state in ef4_schedule_reset() but it may
2510 * have changed by now. Now that we have the RTNL lock,
2511 * it cannot change again.
2512 */
2513 if (efx->state == STATE_READY)
2514 (void)ef4_reset(efx, method);
2515
2516 rtnl_unlock();
2517 }
2518
ef4_schedule_reset(struct ef4_nic * efx,enum reset_type type)2519 void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
2520 {
2521 enum reset_type method;
2522
2523 if (efx->state == STATE_RECOVERY) {
2524 netif_dbg(efx, drv, efx->net_dev,
2525 "recovering: skip scheduling %s reset\n",
2526 RESET_TYPE(type));
2527 return;
2528 }
2529
2530 switch (type) {
2531 case RESET_TYPE_INVISIBLE:
2532 case RESET_TYPE_ALL:
2533 case RESET_TYPE_RECOVER_OR_ALL:
2534 case RESET_TYPE_WORLD:
2535 case RESET_TYPE_DISABLE:
2536 case RESET_TYPE_RECOVER_OR_DISABLE:
2537 case RESET_TYPE_DATAPATH:
2538 method = type;
2539 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2540 RESET_TYPE(method));
2541 break;
2542 default:
2543 method = efx->type->map_reset_reason(type);
2544 netif_dbg(efx, drv, efx->net_dev,
2545 "scheduling %s reset for %s\n",
2546 RESET_TYPE(method), RESET_TYPE(type));
2547 break;
2548 }
2549
2550 set_bit(method, &efx->reset_pending);
2551 smp_mb(); /* ensure we change reset_pending before checking state */
2552
2553 /* If we're not READY then just leave the flags set as the cue
2554 * to abort probing or reschedule the reset later.
2555 */
2556 if (READ_ONCE(efx->state) != STATE_READY)
2557 return;
2558
2559 queue_work(reset_workqueue, &efx->reset_work);
2560 }
2561
2562 /**************************************************************************
2563 *
2564 * List of NICs we support
2565 *
2566 **************************************************************************/
2567
2568 /* PCI device ID table */
2569 static const struct pci_device_id ef4_pci_table[] = {
2570 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2571 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2572 .driver_data = (unsigned long) &falcon_a1_nic_type},
2573 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2574 PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2575 .driver_data = (unsigned long) &falcon_b0_nic_type},
2576 {0} /* end of list */
2577 };
2578
2579 /**************************************************************************
2580 *
2581 * Dummy PHY/MAC operations
2582 *
2583 * Can be used for some unimplemented operations
2584 * Needed so all function pointers are valid and do not have to be tested
2585 * before use
2586 *
2587 **************************************************************************/
ef4_port_dummy_op_int(struct ef4_nic * efx)2588 int ef4_port_dummy_op_int(struct ef4_nic *efx)
2589 {
2590 return 0;
2591 }
ef4_port_dummy_op_void(struct ef4_nic * efx)2592 void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
2593
ef4_port_dummy_op_poll(struct ef4_nic * efx)2594 static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
2595 {
2596 return false;
2597 }
2598
2599 static const struct ef4_phy_operations ef4_dummy_phy_operations = {
2600 .init = ef4_port_dummy_op_int,
2601 .reconfigure = ef4_port_dummy_op_int,
2602 .poll = ef4_port_dummy_op_poll,
2603 .fini = ef4_port_dummy_op_void,
2604 };
2605
2606 /**************************************************************************
2607 *
2608 * Data housekeeping
2609 *
2610 **************************************************************************/
2611
2612 /* This zeroes out and then fills in the invariants in a struct
2613 * ef4_nic (including all sub-structures).
2614 */
ef4_init_struct(struct ef4_nic * efx,struct pci_dev * pci_dev,struct net_device * net_dev)2615 static int ef4_init_struct(struct ef4_nic *efx,
2616 struct pci_dev *pci_dev, struct net_device *net_dev)
2617 {
2618 int i;
2619
2620 /* Initialise common structures */
2621 INIT_LIST_HEAD(&efx->node);
2622 INIT_LIST_HEAD(&efx->secondary_list);
2623 spin_lock_init(&efx->biu_lock);
2624 #ifdef CONFIG_SFC_FALCON_MTD
2625 INIT_LIST_HEAD(&efx->mtd_list);
2626 #endif
2627 INIT_WORK(&efx->reset_work, ef4_reset_work);
2628 INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
2629 INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
2630 efx->pci_dev = pci_dev;
2631 efx->msg_enable = debug;
2632 efx->state = STATE_UNINIT;
2633 strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2634
2635 efx->net_dev = net_dev;
2636 efx->rx_prefix_size = efx->type->rx_prefix_size;
2637 efx->rx_ip_align =
2638 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2639 efx->rx_packet_hash_offset =
2640 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2641 efx->rx_packet_ts_offset =
2642 efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2643 spin_lock_init(&efx->stats_lock);
2644 mutex_init(&efx->mac_lock);
2645 efx->phy_op = &ef4_dummy_phy_operations;
2646 efx->mdio.dev = net_dev;
2647 INIT_WORK(&efx->mac_work, ef4_mac_work);
2648 init_waitqueue_head(&efx->flush_wq);
2649
2650 for (i = 0; i < EF4_MAX_CHANNELS; i++) {
2651 efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
2652 if (!efx->channel[i])
2653 goto fail;
2654 efx->msi_context[i].efx = efx;
2655 efx->msi_context[i].index = i;
2656 }
2657
2658 /* Higher numbered interrupt modes are less capable! */
2659 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2660 interrupt_mode);
2661
2662 /* Would be good to use the net_dev name, but we're too early */
2663 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2664 pci_name(pci_dev));
2665 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2666 if (!efx->workqueue)
2667 goto fail;
2668
2669 return 0;
2670
2671 fail:
2672 ef4_fini_struct(efx);
2673 return -ENOMEM;
2674 }
2675
ef4_fini_struct(struct ef4_nic * efx)2676 static void ef4_fini_struct(struct ef4_nic *efx)
2677 {
2678 int i;
2679
2680 for (i = 0; i < EF4_MAX_CHANNELS; i++)
2681 kfree(efx->channel[i]);
2682
2683 kfree(efx->vpd_sn);
2684
2685 if (efx->workqueue) {
2686 destroy_workqueue(efx->workqueue);
2687 efx->workqueue = NULL;
2688 }
2689 }
2690
ef4_update_sw_stats(struct ef4_nic * efx,u64 * stats)2691 void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
2692 {
2693 u64 n_rx_nodesc_trunc = 0;
2694 struct ef4_channel *channel;
2695
2696 ef4_for_each_channel(channel, efx)
2697 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2698 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2699 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2700 }
2701
2702 /**************************************************************************
2703 *
2704 * PCI interface
2705 *
2706 **************************************************************************/
2707
2708 /* Main body of final NIC shutdown code
2709 * This is called only at module unload (or hotplug removal).
2710 */
ef4_pci_remove_main(struct ef4_nic * efx)2711 static void ef4_pci_remove_main(struct ef4_nic *efx)
2712 {
2713 /* Flush reset_work. It can no longer be scheduled since we
2714 * are not READY.
2715 */
2716 BUG_ON(efx->state == STATE_READY);
2717 cancel_work_sync(&efx->reset_work);
2718
2719 ef4_disable_interrupts(efx);
2720 ef4_nic_fini_interrupt(efx);
2721 ef4_fini_port(efx);
2722 efx->type->fini(efx);
2723 ef4_fini_napi(efx);
2724 ef4_remove_all(efx);
2725 }
2726
2727 /* Final NIC shutdown
2728 * This is called only at module unload (or hotplug removal). A PF can call
2729 * this on its VFs to ensure they are unbound first.
2730 */
ef4_pci_remove(struct pci_dev * pci_dev)2731 static void ef4_pci_remove(struct pci_dev *pci_dev)
2732 {
2733 struct ef4_nic *efx;
2734
2735 efx = pci_get_drvdata(pci_dev);
2736 if (!efx)
2737 return;
2738
2739 /* Mark the NIC as fini, then stop the interface */
2740 rtnl_lock();
2741 ef4_dissociate(efx);
2742 dev_close(efx->net_dev);
2743 ef4_disable_interrupts(efx);
2744 efx->state = STATE_UNINIT;
2745 rtnl_unlock();
2746
2747 ef4_unregister_netdev(efx);
2748
2749 ef4_mtd_remove(efx);
2750
2751 ef4_pci_remove_main(efx);
2752
2753 ef4_fini_io(efx);
2754 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2755
2756 ef4_fini_struct(efx);
2757 free_netdev(efx->net_dev);
2758 };
2759
2760 /* NIC VPD information
2761 * Called during probe to display the part number of the installed NIC.
2762 */
ef4_probe_vpd_strings(struct ef4_nic * efx)2763 static void ef4_probe_vpd_strings(struct ef4_nic *efx)
2764 {
2765 struct pci_dev *dev = efx->pci_dev;
2766 unsigned int vpd_size, kw_len;
2767 u8 *vpd_data;
2768 int start;
2769
2770 vpd_data = pci_vpd_alloc(dev, &vpd_size);
2771 if (IS_ERR(vpd_data)) {
2772 pci_warn(dev, "Unable to read VPD\n");
2773 return;
2774 }
2775
2776 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2777 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
2778 if (start < 0)
2779 pci_warn(dev, "Part number not found or incomplete\n");
2780 else
2781 pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
2782
2783 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2784 PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
2785 if (start < 0)
2786 pci_warn(dev, "Serial number not found or incomplete\n");
2787 else
2788 efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
2789
2790 kfree(vpd_data);
2791 }
2792
2793
2794 /* Main body of NIC initialisation
2795 * This is called at module load (or hotplug insertion, theoretically).
2796 */
ef4_pci_probe_main(struct ef4_nic * efx)2797 static int ef4_pci_probe_main(struct ef4_nic *efx)
2798 {
2799 int rc;
2800
2801 /* Do start-of-day initialisation */
2802 rc = ef4_probe_all(efx);
2803 if (rc)
2804 goto fail1;
2805
2806 ef4_init_napi(efx);
2807
2808 rc = efx->type->init(efx);
2809 if (rc) {
2810 netif_err(efx, probe, efx->net_dev,
2811 "failed to initialise NIC\n");
2812 goto fail3;
2813 }
2814
2815 rc = ef4_init_port(efx);
2816 if (rc) {
2817 netif_err(efx, probe, efx->net_dev,
2818 "failed to initialise port\n");
2819 goto fail4;
2820 }
2821
2822 rc = ef4_nic_init_interrupt(efx);
2823 if (rc)
2824 goto fail5;
2825 rc = ef4_enable_interrupts(efx);
2826 if (rc)
2827 goto fail6;
2828
2829 return 0;
2830
2831 fail6:
2832 ef4_nic_fini_interrupt(efx);
2833 fail5:
2834 ef4_fini_port(efx);
2835 fail4:
2836 efx->type->fini(efx);
2837 fail3:
2838 ef4_fini_napi(efx);
2839 ef4_remove_all(efx);
2840 fail1:
2841 return rc;
2842 }
2843
2844 /* NIC initialisation
2845 *
2846 * This is called at module load (or hotplug insertion,
2847 * theoretically). It sets up PCI mappings, resets the NIC,
2848 * sets up and registers the network devices with the kernel and hooks
2849 * the interrupt service routine. It does not prepare the device for
2850 * transmission; this is left to the first time one of the network
2851 * interfaces is brought up (i.e. ef4_net_open).
2852 */
ef4_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * entry)2853 static int ef4_pci_probe(struct pci_dev *pci_dev,
2854 const struct pci_device_id *entry)
2855 {
2856 struct net_device *net_dev;
2857 struct ef4_nic *efx;
2858 int rc;
2859
2860 /* Allocate and initialise a struct net_device and struct ef4_nic */
2861 net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
2862 EF4_MAX_RX_QUEUES);
2863 if (!net_dev)
2864 return -ENOMEM;
2865 efx = netdev_priv(net_dev);
2866 efx->type = (const struct ef4_nic_type *) entry->driver_data;
2867 efx->fixed_features |= NETIF_F_HIGHDMA;
2868
2869 pci_set_drvdata(pci_dev, efx);
2870 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2871 rc = ef4_init_struct(efx, pci_dev, net_dev);
2872 if (rc)
2873 goto fail1;
2874
2875 netif_info(efx, probe, efx->net_dev,
2876 "Solarflare NIC detected\n");
2877
2878 ef4_probe_vpd_strings(efx);
2879
2880 /* Set up basic I/O (BAR mappings etc) */
2881 rc = ef4_init_io(efx);
2882 if (rc)
2883 goto fail2;
2884
2885 rc = ef4_pci_probe_main(efx);
2886 if (rc)
2887 goto fail3;
2888
2889 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2890 NETIF_F_RXCSUM);
2891 /* Mask for features that also apply to VLAN devices */
2892 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
2893 NETIF_F_HIGHDMA | NETIF_F_RXCSUM);
2894
2895 net_dev->hw_features = net_dev->features & ~efx->fixed_features;
2896
2897 /* Disable VLAN filtering by default. It may be enforced if
2898 * the feature is fixed (i.e. VLAN filters are required to
2899 * receive VLAN tagged packets due to vPort restrictions).
2900 */
2901 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2902 net_dev->features |= efx->fixed_features;
2903
2904 rc = ef4_register_netdev(efx);
2905 if (rc)
2906 goto fail4;
2907
2908 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2909
2910 /* Try to create MTDs, but allow this to fail */
2911 rtnl_lock();
2912 rc = ef4_mtd_probe(efx);
2913 rtnl_unlock();
2914 if (rc && rc != -EPERM)
2915 netif_warn(efx, probe, efx->net_dev,
2916 "failed to create MTDs (%d)\n", rc);
2917
2918 return 0;
2919
2920 fail4:
2921 ef4_pci_remove_main(efx);
2922 fail3:
2923 ef4_fini_io(efx);
2924 fail2:
2925 ef4_fini_struct(efx);
2926 fail1:
2927 WARN_ON(rc > 0);
2928 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2929 free_netdev(net_dev);
2930 return rc;
2931 }
2932
ef4_pm_freeze(struct device * dev)2933 static int ef4_pm_freeze(struct device *dev)
2934 {
2935 struct ef4_nic *efx = dev_get_drvdata(dev);
2936
2937 rtnl_lock();
2938
2939 if (efx->state != STATE_DISABLED) {
2940 efx->state = STATE_UNINIT;
2941
2942 ef4_device_detach_sync(efx);
2943
2944 ef4_stop_all(efx);
2945 ef4_disable_interrupts(efx);
2946 }
2947
2948 rtnl_unlock();
2949
2950 return 0;
2951 }
2952
ef4_pm_thaw(struct device * dev)2953 static int ef4_pm_thaw(struct device *dev)
2954 {
2955 int rc;
2956 struct ef4_nic *efx = dev_get_drvdata(dev);
2957
2958 rtnl_lock();
2959
2960 if (efx->state != STATE_DISABLED) {
2961 rc = ef4_enable_interrupts(efx);
2962 if (rc)
2963 goto fail;
2964
2965 mutex_lock(&efx->mac_lock);
2966 efx->phy_op->reconfigure(efx);
2967 mutex_unlock(&efx->mac_lock);
2968
2969 ef4_start_all(efx);
2970
2971 netif_device_attach(efx->net_dev);
2972
2973 efx->state = STATE_READY;
2974
2975 efx->type->resume_wol(efx);
2976 }
2977
2978 rtnl_unlock();
2979
2980 /* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
2981 queue_work(reset_workqueue, &efx->reset_work);
2982
2983 return 0;
2984
2985 fail:
2986 rtnl_unlock();
2987
2988 return rc;
2989 }
2990
ef4_pm_poweroff(struct device * dev)2991 static int ef4_pm_poweroff(struct device *dev)
2992 {
2993 struct pci_dev *pci_dev = to_pci_dev(dev);
2994 struct ef4_nic *efx = pci_get_drvdata(pci_dev);
2995
2996 efx->type->fini(efx);
2997
2998 efx->reset_pending = 0;
2999
3000 pci_save_state(pci_dev);
3001 return pci_set_power_state(pci_dev, PCI_D3hot);
3002 }
3003
3004 /* Used for both resume and restore */
ef4_pm_resume(struct device * dev)3005 static int ef4_pm_resume(struct device *dev)
3006 {
3007 struct pci_dev *pci_dev = to_pci_dev(dev);
3008 struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3009 int rc;
3010
3011 rc = pci_set_power_state(pci_dev, PCI_D0);
3012 if (rc)
3013 return rc;
3014 pci_restore_state(pci_dev);
3015 rc = pci_enable_device(pci_dev);
3016 if (rc)
3017 return rc;
3018 pci_set_master(efx->pci_dev);
3019 rc = efx->type->reset(efx, RESET_TYPE_ALL);
3020 if (rc)
3021 return rc;
3022 rc = efx->type->init(efx);
3023 if (rc)
3024 return rc;
3025 rc = ef4_pm_thaw(dev);
3026 return rc;
3027 }
3028
ef4_pm_suspend(struct device * dev)3029 static int ef4_pm_suspend(struct device *dev)
3030 {
3031 int rc;
3032
3033 ef4_pm_freeze(dev);
3034 rc = ef4_pm_poweroff(dev);
3035 if (rc)
3036 ef4_pm_resume(dev);
3037 return rc;
3038 }
3039
3040 static const struct dev_pm_ops ef4_pm_ops = {
3041 .suspend = ef4_pm_suspend,
3042 .resume = ef4_pm_resume,
3043 .freeze = ef4_pm_freeze,
3044 .thaw = ef4_pm_thaw,
3045 .poweroff = ef4_pm_poweroff,
3046 .restore = ef4_pm_resume,
3047 };
3048
3049 /* A PCI error affecting this device was detected.
3050 * At this point MMIO and DMA may be disabled.
3051 * Stop the software path and request a slot reset.
3052 */
ef4_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)3053 static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev,
3054 pci_channel_state_t state)
3055 {
3056 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3057 struct ef4_nic *efx = pci_get_drvdata(pdev);
3058
3059 if (state == pci_channel_io_perm_failure)
3060 return PCI_ERS_RESULT_DISCONNECT;
3061
3062 rtnl_lock();
3063
3064 if (efx->state != STATE_DISABLED) {
3065 efx->state = STATE_RECOVERY;
3066 efx->reset_pending = 0;
3067
3068 ef4_device_detach_sync(efx);
3069
3070 ef4_stop_all(efx);
3071 ef4_disable_interrupts(efx);
3072
3073 status = PCI_ERS_RESULT_NEED_RESET;
3074 } else {
3075 /* If the interface is disabled we don't want to do anything
3076 * with it.
3077 */
3078 status = PCI_ERS_RESULT_RECOVERED;
3079 }
3080
3081 rtnl_unlock();
3082
3083 pci_disable_device(pdev);
3084
3085 return status;
3086 }
3087
3088 /* Fake a successful reset, which will be performed later in ef4_io_resume. */
ef4_io_slot_reset(struct pci_dev * pdev)3089 static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
3090 {
3091 struct ef4_nic *efx = pci_get_drvdata(pdev);
3092 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3093
3094 if (pci_enable_device(pdev)) {
3095 netif_err(efx, hw, efx->net_dev,
3096 "Cannot re-enable PCI device after reset.\n");
3097 status = PCI_ERS_RESULT_DISCONNECT;
3098 }
3099
3100 return status;
3101 }
3102
3103 /* Perform the actual reset and resume I/O operations. */
ef4_io_resume(struct pci_dev * pdev)3104 static void ef4_io_resume(struct pci_dev *pdev)
3105 {
3106 struct ef4_nic *efx = pci_get_drvdata(pdev);
3107 int rc;
3108
3109 rtnl_lock();
3110
3111 if (efx->state == STATE_DISABLED)
3112 goto out;
3113
3114 rc = ef4_reset(efx, RESET_TYPE_ALL);
3115 if (rc) {
3116 netif_err(efx, hw, efx->net_dev,
3117 "ef4_reset failed after PCI error (%d)\n", rc);
3118 } else {
3119 efx->state = STATE_READY;
3120 netif_dbg(efx, hw, efx->net_dev,
3121 "Done resetting and resuming IO after PCI error.\n");
3122 }
3123
3124 out:
3125 rtnl_unlock();
3126 }
3127
3128 /* For simplicity and reliability, we always require a slot reset and try to
3129 * reset the hardware when a pci error affecting the device is detected.
3130 */
3131 static const struct pci_error_handlers ef4_err_handlers = {
3132 .error_detected = ef4_io_error_detected,
3133 .slot_reset = ef4_io_slot_reset,
3134 .resume = ef4_io_resume,
3135 };
3136
3137 static struct pci_driver ef4_pci_driver = {
3138 .name = KBUILD_MODNAME,
3139 .id_table = ef4_pci_table,
3140 .probe = ef4_pci_probe,
3141 .remove = ef4_pci_remove,
3142 .driver.pm = &ef4_pm_ops,
3143 .err_handler = &ef4_err_handlers,
3144 };
3145
3146 /**************************************************************************
3147 *
3148 * Kernel module interface
3149 *
3150 *************************************************************************/
3151
3152 module_param(interrupt_mode, uint, 0444);
3153 MODULE_PARM_DESC(interrupt_mode,
3154 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3155
ef4_init_module(void)3156 static int __init ef4_init_module(void)
3157 {
3158 int rc;
3159
3160 printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n");
3161
3162 rc = register_netdevice_notifier(&ef4_netdev_notifier);
3163 if (rc)
3164 goto err_notifier;
3165
3166 reset_workqueue = create_singlethread_workqueue("sfc_reset");
3167 if (!reset_workqueue) {
3168 rc = -ENOMEM;
3169 goto err_reset;
3170 }
3171
3172 rc = pci_register_driver(&ef4_pci_driver);
3173 if (rc < 0)
3174 goto err_pci;
3175
3176 return 0;
3177
3178 err_pci:
3179 destroy_workqueue(reset_workqueue);
3180 err_reset:
3181 unregister_netdevice_notifier(&ef4_netdev_notifier);
3182 err_notifier:
3183 return rc;
3184 }
3185
ef4_exit_module(void)3186 static void __exit ef4_exit_module(void)
3187 {
3188 printk(KERN_INFO "Solarflare Falcon driver unloading\n");
3189
3190 pci_unregister_driver(&ef4_pci_driver);
3191 destroy_workqueue(reset_workqueue);
3192 unregister_netdevice_notifier(&ef4_netdev_notifier);
3193
3194 }
3195
3196 module_init(ef4_init_module);
3197 module_exit(ef4_exit_module);
3198
3199 MODULE_AUTHOR("Solarflare Communications and "
3200 "Michael Brown <mbrown@fensystems.co.uk>");
3201 MODULE_DESCRIPTION("Solarflare Falcon network driver");
3202 MODULE_LICENSE("GPL");
3203 MODULE_DEVICE_TABLE(pci, ef4_pci_table);
3204 MODULE_VERSION(EF4_DRIVER_VERSION);
3205