xref: /linux/drivers/net/ethernet/sfc/selftest.c (revision 800c5eb7b5eba6cb2a32738d763fd59f0fbcdde4)
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2006-2010 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/netdevice.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/pci.h>
16 #include <linux/ethtool.h>
17 #include <linux/ip.h>
18 #include <linux/in.h>
19 #include <linux/udp.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/slab.h>
22 #include "net_driver.h"
23 #include "efx.h"
24 #include "nic.h"
25 #include "selftest.h"
26 #include "workarounds.h"
27 
28 /*
29  * Loopback test packet structure
30  *
31  * The self-test should stress every RSS vector, and unfortunately
32  * Falcon only performs RSS on TCP/UDP packets.
33  */
34 struct efx_loopback_payload {
35 	struct ethhdr header;
36 	struct iphdr ip;
37 	struct udphdr udp;
38 	__be16 iteration;
39 	const char msg[64];
40 } __packed;
41 
42 /* Loopback test source MAC address */
43 static const unsigned char payload_source[ETH_ALEN] = {
44 	0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
45 };
46 
47 static const char payload_msg[] =
48 	"Hello world! This is an Efx loopback test in progress!";
49 
50 /* Interrupt mode names */
51 static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
52 static const char *const efx_interrupt_mode_names[] = {
53 	[EFX_INT_MODE_MSIX]   = "MSI-X",
54 	[EFX_INT_MODE_MSI]    = "MSI",
55 	[EFX_INT_MODE_LEGACY] = "legacy",
56 };
57 #define INT_MODE(efx) \
58 	STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
59 
60 /**
61  * efx_loopback_state - persistent state during a loopback selftest
62  * @flush:		Drop all packets in efx_loopback_rx_packet
63  * @packet_count:	Number of packets being used in this test
64  * @skbs:		An array of skbs transmitted
65  * @offload_csum:	Checksums are being offloaded
66  * @rx_good:		RX good packet count
67  * @rx_bad:		RX bad packet count
68  * @payload:		Payload used in tests
69  */
70 struct efx_loopback_state {
71 	bool flush;
72 	int packet_count;
73 	struct sk_buff **skbs;
74 	bool offload_csum;
75 	atomic_t rx_good;
76 	atomic_t rx_bad;
77 	struct efx_loopback_payload payload;
78 };
79 
80 /**************************************************************************
81  *
82  * MII, NVRAM and register tests
83  *
84  **************************************************************************/
85 
86 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
87 {
88 	int rc = 0;
89 
90 	if (efx->phy_op->test_alive) {
91 		rc = efx->phy_op->test_alive(efx);
92 		tests->phy_alive = rc ? -1 : 1;
93 	}
94 
95 	return rc;
96 }
97 
98 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
99 {
100 	int rc = 0;
101 
102 	if (efx->type->test_nvram) {
103 		rc = efx->type->test_nvram(efx);
104 		tests->nvram = rc ? -1 : 1;
105 	}
106 
107 	return rc;
108 }
109 
110 static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
111 {
112 	int rc = 0;
113 
114 	/* Test register access */
115 	if (efx->type->test_registers) {
116 		rc = efx->type->test_registers(efx);
117 		tests->registers = rc ? -1 : 1;
118 	}
119 
120 	return rc;
121 }
122 
123 /**************************************************************************
124  *
125  * Interrupt and event queue testing
126  *
127  **************************************************************************/
128 
129 /* Test generation and receipt of interrupts */
130 static int efx_test_interrupts(struct efx_nic *efx,
131 			       struct efx_self_tests *tests)
132 {
133 	int cpu;
134 
135 	netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
136 	tests->interrupt = -1;
137 
138 	/* Reset interrupt flag */
139 	efx->last_irq_cpu = -1;
140 	smp_wmb();
141 
142 	efx_nic_generate_interrupt(efx);
143 
144 	/* Wait for arrival of test interrupt. */
145 	netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
146 	schedule_timeout_uninterruptible(HZ / 10);
147 	cpu = ACCESS_ONCE(efx->last_irq_cpu);
148 	if (cpu >= 0)
149 		goto success;
150 
151 	netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
152 	return -ETIMEDOUT;
153 
154  success:
155 	netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
156 		  INT_MODE(efx), cpu);
157 	tests->interrupt = 1;
158 	return 0;
159 }
160 
161 /* Test generation and receipt of interrupting events */
162 static int efx_test_eventq_irq(struct efx_channel *channel,
163 			       struct efx_self_tests *tests)
164 {
165 	struct efx_nic *efx = channel->efx;
166 	unsigned int read_ptr;
167 	bool napi_ran, dma_seen, int_seen;
168 
169 	read_ptr = channel->eventq_read_ptr;
170 	channel->last_irq_cpu = -1;
171 	smp_wmb();
172 
173 	efx_nic_generate_test_event(channel);
174 
175 	/* Wait for arrival of interrupt.  NAPI processing may or may
176 	 * not complete in time, but we can cope in any case.
177 	 */
178 	msleep(10);
179 	napi_disable(&channel->napi_str);
180 	if (channel->eventq_read_ptr != read_ptr) {
181 		napi_ran = true;
182 		dma_seen = true;
183 		int_seen = true;
184 	} else {
185 		napi_ran = false;
186 		dma_seen = efx_nic_event_present(channel);
187 		int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0;
188 	}
189 	napi_enable(&channel->napi_str);
190 	efx_nic_eventq_read_ack(channel);
191 
192 	tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
193 	tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
194 
195 	if (dma_seen && int_seen) {
196 		netif_dbg(efx, drv, efx->net_dev,
197 			  "channel %d event queue passed (with%s NAPI)\n",
198 			  channel->channel, napi_ran ? "" : "out");
199 		return 0;
200 	} else {
201 		/* Report failure and whether either interrupt or DMA worked */
202 		netif_err(efx, drv, efx->net_dev,
203 			  "channel %d timed out waiting for event queue\n",
204 			  channel->channel);
205 		if (int_seen)
206 			netif_err(efx, drv, efx->net_dev,
207 				  "channel %d saw interrupt "
208 				  "during event queue test\n",
209 				  channel->channel);
210 		if (dma_seen)
211 			netif_err(efx, drv, efx->net_dev,
212 				  "channel %d event was generated, but "
213 				  "failed to trigger an interrupt\n",
214 				  channel->channel);
215 		return -ETIMEDOUT;
216 	}
217 }
218 
219 static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
220 			unsigned flags)
221 {
222 	int rc;
223 
224 	if (!efx->phy_op->run_tests)
225 		return 0;
226 
227 	mutex_lock(&efx->mac_lock);
228 	rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
229 	mutex_unlock(&efx->mac_lock);
230 	return rc;
231 }
232 
233 /**************************************************************************
234  *
235  * Loopback testing
236  * NB Only one loopback test can be executing concurrently.
237  *
238  **************************************************************************/
239 
240 /* Loopback test RX callback
241  * This is called for each received packet during loopback testing.
242  */
243 void efx_loopback_rx_packet(struct efx_nic *efx,
244 			    const char *buf_ptr, int pkt_len)
245 {
246 	struct efx_loopback_state *state = efx->loopback_selftest;
247 	struct efx_loopback_payload *received;
248 	struct efx_loopback_payload *payload;
249 
250 	BUG_ON(!buf_ptr);
251 
252 	/* If we are just flushing, then drop the packet */
253 	if ((state == NULL) || state->flush)
254 		return;
255 
256 	payload = &state->payload;
257 
258 	received = (struct efx_loopback_payload *) buf_ptr;
259 	received->ip.saddr = payload->ip.saddr;
260 	if (state->offload_csum)
261 		received->ip.check = payload->ip.check;
262 
263 	/* Check that header exists */
264 	if (pkt_len < sizeof(received->header)) {
265 		netif_err(efx, drv, efx->net_dev,
266 			  "saw runt RX packet (length %d) in %s loopback "
267 			  "test\n", pkt_len, LOOPBACK_MODE(efx));
268 		goto err;
269 	}
270 
271 	/* Check that the ethernet header exists */
272 	if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
273 		netif_err(efx, drv, efx->net_dev,
274 			  "saw non-loopback RX packet in %s loopback test\n",
275 			  LOOPBACK_MODE(efx));
276 		goto err;
277 	}
278 
279 	/* Check packet length */
280 	if (pkt_len != sizeof(*payload)) {
281 		netif_err(efx, drv, efx->net_dev,
282 			  "saw incorrect RX packet length %d (wanted %d) in "
283 			  "%s loopback test\n", pkt_len, (int)sizeof(*payload),
284 			  LOOPBACK_MODE(efx));
285 		goto err;
286 	}
287 
288 	/* Check that IP header matches */
289 	if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
290 		netif_err(efx, drv, efx->net_dev,
291 			  "saw corrupted IP header in %s loopback test\n",
292 			  LOOPBACK_MODE(efx));
293 		goto err;
294 	}
295 
296 	/* Check that msg and padding matches */
297 	if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
298 		netif_err(efx, drv, efx->net_dev,
299 			  "saw corrupted RX packet in %s loopback test\n",
300 			  LOOPBACK_MODE(efx));
301 		goto err;
302 	}
303 
304 	/* Check that iteration matches */
305 	if (received->iteration != payload->iteration) {
306 		netif_err(efx, drv, efx->net_dev,
307 			  "saw RX packet from iteration %d (wanted %d) in "
308 			  "%s loopback test\n", ntohs(received->iteration),
309 			  ntohs(payload->iteration), LOOPBACK_MODE(efx));
310 		goto err;
311 	}
312 
313 	/* Increase correct RX count */
314 	netif_vdbg(efx, drv, efx->net_dev,
315 		   "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
316 
317 	atomic_inc(&state->rx_good);
318 	return;
319 
320  err:
321 #ifdef DEBUG
322 	if (atomic_read(&state->rx_bad) == 0) {
323 		netif_err(efx, drv, efx->net_dev, "received packet:\n");
324 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
325 			       buf_ptr, pkt_len, 0);
326 		netif_err(efx, drv, efx->net_dev, "expected packet:\n");
327 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
328 			       &state->payload, sizeof(state->payload), 0);
329 	}
330 #endif
331 	atomic_inc(&state->rx_bad);
332 }
333 
334 /* Initialise an efx_selftest_state for a new iteration */
335 static void efx_iterate_state(struct efx_nic *efx)
336 {
337 	struct efx_loopback_state *state = efx->loopback_selftest;
338 	struct net_device *net_dev = efx->net_dev;
339 	struct efx_loopback_payload *payload = &state->payload;
340 
341 	/* Initialise the layerII header */
342 	memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
343 	memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
344 	payload->header.h_proto = htons(ETH_P_IP);
345 
346 	/* saddr set later and used as incrementing count */
347 	payload->ip.daddr = htonl(INADDR_LOOPBACK);
348 	payload->ip.ihl = 5;
349 	payload->ip.check = htons(0xdead);
350 	payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
351 	payload->ip.version = IPVERSION;
352 	payload->ip.protocol = IPPROTO_UDP;
353 
354 	/* Initialise udp header */
355 	payload->udp.source = 0;
356 	payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
357 				 sizeof(struct iphdr));
358 	payload->udp.check = 0;	/* checksum ignored */
359 
360 	/* Fill out payload */
361 	payload->iteration = htons(ntohs(payload->iteration) + 1);
362 	memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
363 
364 	/* Fill out remaining state members */
365 	atomic_set(&state->rx_good, 0);
366 	atomic_set(&state->rx_bad, 0);
367 	smp_wmb();
368 }
369 
370 static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
371 {
372 	struct efx_nic *efx = tx_queue->efx;
373 	struct efx_loopback_state *state = efx->loopback_selftest;
374 	struct efx_loopback_payload *payload;
375 	struct sk_buff *skb;
376 	int i;
377 	netdev_tx_t rc;
378 
379 	/* Transmit N copies of buffer */
380 	for (i = 0; i < state->packet_count; i++) {
381 		/* Allocate an skb, holding an extra reference for
382 		 * transmit completion counting */
383 		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
384 		if (!skb)
385 			return -ENOMEM;
386 		state->skbs[i] = skb;
387 		skb_get(skb);
388 
389 		/* Copy the payload in, incrementing the source address to
390 		 * exercise the rss vectors */
391 		payload = ((struct efx_loopback_payload *)
392 			   skb_put(skb, sizeof(state->payload)));
393 		memcpy(payload, &state->payload, sizeof(state->payload));
394 		payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
395 
396 		/* Ensure everything we've written is visible to the
397 		 * interrupt handler. */
398 		smp_wmb();
399 
400 		netif_tx_lock_bh(efx->net_dev);
401 		rc = efx_enqueue_skb(tx_queue, skb);
402 		netif_tx_unlock_bh(efx->net_dev);
403 
404 		if (rc != NETDEV_TX_OK) {
405 			netif_err(efx, drv, efx->net_dev,
406 				  "TX queue %d could not transmit packet %d of "
407 				  "%d in %s loopback test\n", tx_queue->queue,
408 				  i + 1, state->packet_count,
409 				  LOOPBACK_MODE(efx));
410 
411 			/* Defer cleaning up the other skbs for the caller */
412 			kfree_skb(skb);
413 			return -EPIPE;
414 		}
415 	}
416 
417 	return 0;
418 }
419 
420 static int efx_poll_loopback(struct efx_nic *efx)
421 {
422 	struct efx_loopback_state *state = efx->loopback_selftest;
423 	struct efx_channel *channel;
424 
425 	/* NAPI polling is not enabled, so process channels
426 	 * synchronously */
427 	efx_for_each_channel(channel, efx) {
428 		if (channel->work_pending)
429 			efx_process_channel_now(channel);
430 	}
431 	return atomic_read(&state->rx_good) == state->packet_count;
432 }
433 
434 static int efx_end_loopback(struct efx_tx_queue *tx_queue,
435 			    struct efx_loopback_self_tests *lb_tests)
436 {
437 	struct efx_nic *efx = tx_queue->efx;
438 	struct efx_loopback_state *state = efx->loopback_selftest;
439 	struct sk_buff *skb;
440 	int tx_done = 0, rx_good, rx_bad;
441 	int i, rc = 0;
442 
443 	netif_tx_lock_bh(efx->net_dev);
444 
445 	/* Count the number of tx completions, and decrement the refcnt. Any
446 	 * skbs not already completed will be free'd when the queue is flushed */
447 	for (i = 0; i < state->packet_count; i++) {
448 		skb = state->skbs[i];
449 		if (skb && !skb_shared(skb))
450 			++tx_done;
451 		dev_kfree_skb_any(skb);
452 	}
453 
454 	netif_tx_unlock_bh(efx->net_dev);
455 
456 	/* Check TX completion and received packet counts */
457 	rx_good = atomic_read(&state->rx_good);
458 	rx_bad = atomic_read(&state->rx_bad);
459 	if (tx_done != state->packet_count) {
460 		/* Don't free the skbs; they will be picked up on TX
461 		 * overflow or channel teardown.
462 		 */
463 		netif_err(efx, drv, efx->net_dev,
464 			  "TX queue %d saw only %d out of an expected %d "
465 			  "TX completion events in %s loopback test\n",
466 			  tx_queue->queue, tx_done, state->packet_count,
467 			  LOOPBACK_MODE(efx));
468 		rc = -ETIMEDOUT;
469 		/* Allow to fall through so we see the RX errors as well */
470 	}
471 
472 	/* We may always be up to a flush away from our desired packet total */
473 	if (rx_good != state->packet_count) {
474 		netif_dbg(efx, drv, efx->net_dev,
475 			  "TX queue %d saw only %d out of an expected %d "
476 			  "received packets in %s loopback test\n",
477 			  tx_queue->queue, rx_good, state->packet_count,
478 			  LOOPBACK_MODE(efx));
479 		rc = -ETIMEDOUT;
480 		/* Fall through */
481 	}
482 
483 	/* Update loopback test structure */
484 	lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
485 	lb_tests->tx_done[tx_queue->queue] += tx_done;
486 	lb_tests->rx_good += rx_good;
487 	lb_tests->rx_bad += rx_bad;
488 
489 	return rc;
490 }
491 
492 static int
493 efx_test_loopback(struct efx_tx_queue *tx_queue,
494 		  struct efx_loopback_self_tests *lb_tests)
495 {
496 	struct efx_nic *efx = tx_queue->efx;
497 	struct efx_loopback_state *state = efx->loopback_selftest;
498 	int i, begin_rc, end_rc;
499 
500 	for (i = 0; i < 3; i++) {
501 		/* Determine how many packets to send */
502 		state->packet_count = efx->txq_entries / 3;
503 		state->packet_count = min(1 << (i << 2), state->packet_count);
504 		state->skbs = kcalloc(state->packet_count,
505 				      sizeof(state->skbs[0]), GFP_KERNEL);
506 		if (!state->skbs)
507 			return -ENOMEM;
508 		state->flush = false;
509 
510 		netif_dbg(efx, drv, efx->net_dev,
511 			  "TX queue %d testing %s loopback with %d packets\n",
512 			  tx_queue->queue, LOOPBACK_MODE(efx),
513 			  state->packet_count);
514 
515 		efx_iterate_state(efx);
516 		begin_rc = efx_begin_loopback(tx_queue);
517 
518 		/* This will normally complete very quickly, but be
519 		 * prepared to wait up to 100 ms. */
520 		msleep(1);
521 		if (!efx_poll_loopback(efx)) {
522 			msleep(100);
523 			efx_poll_loopback(efx);
524 		}
525 
526 		end_rc = efx_end_loopback(tx_queue, lb_tests);
527 		kfree(state->skbs);
528 
529 		if (begin_rc || end_rc) {
530 			/* Wait a while to ensure there are no packets
531 			 * floating around after a failure. */
532 			schedule_timeout_uninterruptible(HZ / 10);
533 			return begin_rc ? begin_rc : end_rc;
534 		}
535 	}
536 
537 	netif_dbg(efx, drv, efx->net_dev,
538 		  "TX queue %d passed %s loopback test with a burst length "
539 		  "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
540 		  state->packet_count);
541 
542 	return 0;
543 }
544 
545 /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
546  * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
547  * to delay and retry. Therefore, it's safer to just poll directly. Wait
548  * for link up and any faults to dissipate. */
549 static int efx_wait_for_link(struct efx_nic *efx)
550 {
551 	struct efx_link_state *link_state = &efx->link_state;
552 	int count, link_up_count = 0;
553 	bool link_up;
554 
555 	for (count = 0; count < 40; count++) {
556 		schedule_timeout_uninterruptible(HZ / 10);
557 
558 		if (efx->type->monitor != NULL) {
559 			mutex_lock(&efx->mac_lock);
560 			efx->type->monitor(efx);
561 			mutex_unlock(&efx->mac_lock);
562 		} else {
563 			struct efx_channel *channel = efx_get_channel(efx, 0);
564 			if (channel->work_pending)
565 				efx_process_channel_now(channel);
566 		}
567 
568 		mutex_lock(&efx->mac_lock);
569 		link_up = link_state->up;
570 		if (link_up)
571 			link_up = !efx->type->check_mac_fault(efx);
572 		mutex_unlock(&efx->mac_lock);
573 
574 		if (link_up) {
575 			if (++link_up_count == 2)
576 				return 0;
577 		} else {
578 			link_up_count = 0;
579 		}
580 	}
581 
582 	return -ETIMEDOUT;
583 }
584 
585 static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
586 			      unsigned int loopback_modes)
587 {
588 	enum efx_loopback_mode mode;
589 	struct efx_loopback_state *state;
590 	struct efx_channel *channel = efx_get_channel(efx, 0);
591 	struct efx_tx_queue *tx_queue;
592 	int rc = 0;
593 
594 	/* Set the port loopback_selftest member. From this point on
595 	 * all received packets will be dropped. Mark the state as
596 	 * "flushing" so all inflight packets are dropped */
597 	state = kzalloc(sizeof(*state), GFP_KERNEL);
598 	if (state == NULL)
599 		return -ENOMEM;
600 	BUG_ON(efx->loopback_selftest);
601 	state->flush = true;
602 	efx->loopback_selftest = state;
603 
604 	/* Test all supported loopback modes */
605 	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
606 		if (!(loopback_modes & (1 << mode)))
607 			continue;
608 
609 		/* Move the port into the specified loopback mode. */
610 		state->flush = true;
611 		mutex_lock(&efx->mac_lock);
612 		efx->loopback_mode = mode;
613 		rc = __efx_reconfigure_port(efx);
614 		mutex_unlock(&efx->mac_lock);
615 		if (rc) {
616 			netif_err(efx, drv, efx->net_dev,
617 				  "unable to move into %s loopback\n",
618 				  LOOPBACK_MODE(efx));
619 			goto out;
620 		}
621 
622 		rc = efx_wait_for_link(efx);
623 		if (rc) {
624 			netif_err(efx, drv, efx->net_dev,
625 				  "loopback %s never came up\n",
626 				  LOOPBACK_MODE(efx));
627 			goto out;
628 		}
629 
630 		/* Test all enabled types of TX queue */
631 		efx_for_each_channel_tx_queue(tx_queue, channel) {
632 			state->offload_csum = (tx_queue->queue &
633 					       EFX_TXQ_TYPE_OFFLOAD);
634 			rc = efx_test_loopback(tx_queue,
635 					       &tests->loopback[mode]);
636 			if (rc)
637 				goto out;
638 		}
639 	}
640 
641  out:
642 	/* Remove the flush. The caller will remove the loopback setting */
643 	state->flush = true;
644 	efx->loopback_selftest = NULL;
645 	wmb();
646 	kfree(state);
647 
648 	return rc;
649 }
650 
651 /**************************************************************************
652  *
653  * Entry point
654  *
655  *************************************************************************/
656 
657 int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
658 		 unsigned flags)
659 {
660 	enum efx_loopback_mode loopback_mode = efx->loopback_mode;
661 	int phy_mode = efx->phy_mode;
662 	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
663 	struct efx_channel *channel;
664 	int rc_test = 0, rc_reset = 0, rc;
665 
666 	/* Online (i.e. non-disruptive) testing
667 	 * This checks interrupt generation, event delivery and PHY presence. */
668 
669 	rc = efx_test_phy_alive(efx, tests);
670 	if (rc && !rc_test)
671 		rc_test = rc;
672 
673 	rc = efx_test_nvram(efx, tests);
674 	if (rc && !rc_test)
675 		rc_test = rc;
676 
677 	rc = efx_test_interrupts(efx, tests);
678 	if (rc && !rc_test)
679 		rc_test = rc;
680 
681 	efx_for_each_channel(channel, efx) {
682 		rc = efx_test_eventq_irq(channel, tests);
683 		if (rc && !rc_test)
684 			rc_test = rc;
685 	}
686 
687 	if (rc_test)
688 		return rc_test;
689 
690 	if (!(flags & ETH_TEST_FL_OFFLINE))
691 		return efx_test_phy(efx, tests, flags);
692 
693 	/* Offline (i.e. disruptive) testing
694 	 * This checks MAC and PHY loopback on the specified port. */
695 
696 	/* Detach the device so the kernel doesn't transmit during the
697 	 * loopback test and the watchdog timeout doesn't fire.
698 	 */
699 	netif_device_detach(efx->net_dev);
700 
701 	mutex_lock(&efx->mac_lock);
702 	if (efx->loopback_modes) {
703 		/* We need the 312 clock from the PHY to test the XMAC
704 		 * registers, so move into XGMII loopback if available */
705 		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
706 			efx->loopback_mode = LOOPBACK_XGMII;
707 		else
708 			efx->loopback_mode = __ffs(efx->loopback_modes);
709 	}
710 
711 	__efx_reconfigure_port(efx);
712 	mutex_unlock(&efx->mac_lock);
713 
714 	/* free up all consumers of SRAM (including all the queues) */
715 	efx_reset_down(efx, reset_method);
716 
717 	rc = efx_test_chip(efx, tests);
718 	if (rc && !rc_test)
719 		rc_test = rc;
720 
721 	/* reset the chip to recover from the register test */
722 	rc_reset = efx->type->reset(efx, reset_method);
723 
724 	/* Ensure that the phy is powered and out of loopback
725 	 * for the bist and loopback tests */
726 	efx->phy_mode &= ~PHY_MODE_LOW_POWER;
727 	efx->loopback_mode = LOOPBACK_NONE;
728 
729 	rc = efx_reset_up(efx, reset_method, rc_reset == 0);
730 	if (rc && !rc_reset)
731 		rc_reset = rc;
732 
733 	if (rc_reset) {
734 		netif_err(efx, drv, efx->net_dev,
735 			  "Unable to recover from chip test\n");
736 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
737 		return rc_reset;
738 	}
739 
740 	rc = efx_test_phy(efx, tests, flags);
741 	if (rc && !rc_test)
742 		rc_test = rc;
743 
744 	rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
745 	if (rc && !rc_test)
746 		rc_test = rc;
747 
748 	/* restore the PHY to the previous state */
749 	mutex_lock(&efx->mac_lock);
750 	efx->phy_mode = phy_mode;
751 	efx->loopback_mode = loopback_mode;
752 	__efx_reconfigure_port(efx);
753 	mutex_unlock(&efx->mac_lock);
754 
755 	netif_device_attach(efx->net_dev);
756 
757 	return rc_test;
758 }
759 
760