xref: /linux/drivers/net/ethernet/sfc/mcdi.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2008-2013 Solarflare Communications Inc.
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/moduleparam.h>
9 #include <linux/atomic.h>
10 #include "net_driver.h"
11 #include "nic.h"
12 #include "io.h"
13 #include "mcdi_pcol.h"
14 
15 /**************************************************************************
16  *
17  * Management-Controller-to-Driver Interface
18  *
19  **************************************************************************
20  */
21 
22 #define MCDI_RPC_TIMEOUT       (10 * HZ)
23 
24 /* A reboot/assertion causes the MCDI status word to be set after the
25  * command word is set or a REBOOT event is sent. If we notice a reboot
26  * via these mechanisms then wait 250ms for the status word to be set.
27  */
28 #define MCDI_STATUS_DELAY_US		100
29 #define MCDI_STATUS_DELAY_COUNT		2500
30 #define MCDI_STATUS_SLEEP_MS						\
31 	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
32 
33 #define SEQ_MASK							\
34 	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
35 
36 struct efx_mcdi_async_param {
37 	struct list_head list;
38 	unsigned int cmd;
39 	size_t inlen;
40 	size_t outlen;
41 	bool quiet;
42 	efx_mcdi_async_completer *complete;
43 	unsigned long cookie;
44 	/* followed by request/response buffer */
45 };
46 
47 static void efx_mcdi_timeout_async(struct timer_list *t);
48 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
49 			       bool *was_attached_out);
50 static bool efx_mcdi_poll_once(struct efx_nic *efx);
51 static void efx_mcdi_abandon(struct efx_nic *efx);
52 
53 #ifdef CONFIG_SFC_MCDI_LOGGING
54 static bool mcdi_logging_default;
55 module_param(mcdi_logging_default, bool, 0644);
56 MODULE_PARM_DESC(mcdi_logging_default,
57 		 "Enable MCDI logging on newly-probed functions");
58 #endif
59 
60 int efx_mcdi_init(struct efx_nic *efx)
61 {
62 	struct efx_mcdi_iface *mcdi;
63 	bool already_attached;
64 	int rc = -ENOMEM;
65 
66 	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
67 	if (!efx->mcdi)
68 		goto fail;
69 
70 	mcdi = efx_mcdi(efx);
71 	mcdi->efx = efx;
72 #ifdef CONFIG_SFC_MCDI_LOGGING
73 	/* consuming code assumes buffer is page-sized */
74 	mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
75 	if (!mcdi->logging_buffer)
76 		goto fail1;
77 	mcdi->logging_enabled = mcdi_logging_default;
78 #endif
79 	init_waitqueue_head(&mcdi->wq);
80 	init_waitqueue_head(&mcdi->proxy_rx_wq);
81 	spin_lock_init(&mcdi->iface_lock);
82 	mcdi->state = MCDI_STATE_QUIESCENT;
83 	mcdi->mode = MCDI_MODE_POLL;
84 	spin_lock_init(&mcdi->async_lock);
85 	INIT_LIST_HEAD(&mcdi->async_list);
86 	timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
87 
88 	(void) efx_mcdi_poll_reboot(efx);
89 	mcdi->new_epoch = true;
90 
91 	/* Recover from a failed assertion before probing */
92 	rc = efx_mcdi_handle_assertion(efx);
93 	if (rc)
94 		goto fail2;
95 
96 	/* Let the MC (and BMC, if this is a LOM) know that the driver
97 	 * is loaded. We should do this before we reset the NIC.
98 	 */
99 	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
100 	if (rc) {
101 		pci_err(efx->pci_dev, "Unable to register driver with MCPU\n");
102 		goto fail2;
103 	}
104 	if (already_attached)
105 		/* Not a fatal error */
106 		pci_err(efx->pci_dev, "Host already registered with MCPU\n");
107 
108 	if (efx->mcdi->fn_flags &
109 	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
110 		efx->primary = efx;
111 
112 	return 0;
113 fail2:
114 #ifdef CONFIG_SFC_MCDI_LOGGING
115 	free_page((unsigned long)mcdi->logging_buffer);
116 fail1:
117 #endif
118 	kfree(efx->mcdi);
119 	efx->mcdi = NULL;
120 fail:
121 	return rc;
122 }
123 
124 void efx_mcdi_detach(struct efx_nic *efx)
125 {
126 	if (!efx->mcdi)
127 		return;
128 
129 	BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
130 
131 	/* Relinquish the device (back to the BMC, if this is a LOM) */
132 	efx_mcdi_drv_attach(efx, false, NULL);
133 }
134 
135 void efx_mcdi_fini(struct efx_nic *efx)
136 {
137 	if (!efx->mcdi)
138 		return;
139 
140 #ifdef CONFIG_SFC_MCDI_LOGGING
141 	free_page((unsigned long)efx->mcdi->iface.logging_buffer);
142 #endif
143 
144 	kfree(efx->mcdi);
145 }
146 
147 static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
148 				  const efx_dword_t *inbuf, size_t inlen)
149 {
150 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
151 #ifdef CONFIG_SFC_MCDI_LOGGING
152 	char *buf = mcdi->logging_buffer; /* page-sized */
153 #endif
154 	efx_dword_t hdr[2];
155 	size_t hdr_len;
156 	u32 xflags, seqno;
157 
158 	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
159 
160 	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
161 	spin_lock_bh(&mcdi->iface_lock);
162 	++mcdi->seqno;
163 	seqno = mcdi->seqno & SEQ_MASK;
164 	spin_unlock_bh(&mcdi->iface_lock);
165 
166 	xflags = 0;
167 	if (mcdi->mode == MCDI_MODE_EVENTS)
168 		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
169 
170 	if (efx->type->mcdi_max_ver == 1) {
171 		/* MCDI v1 */
172 		EFX_POPULATE_DWORD_7(hdr[0],
173 				     MCDI_HEADER_RESPONSE, 0,
174 				     MCDI_HEADER_RESYNC, 1,
175 				     MCDI_HEADER_CODE, cmd,
176 				     MCDI_HEADER_DATALEN, inlen,
177 				     MCDI_HEADER_SEQ, seqno,
178 				     MCDI_HEADER_XFLAGS, xflags,
179 				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
180 		hdr_len = 4;
181 	} else {
182 		/* MCDI v2 */
183 		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
184 		EFX_POPULATE_DWORD_7(hdr[0],
185 				     MCDI_HEADER_RESPONSE, 0,
186 				     MCDI_HEADER_RESYNC, 1,
187 				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
188 				     MCDI_HEADER_DATALEN, 0,
189 				     MCDI_HEADER_SEQ, seqno,
190 				     MCDI_HEADER_XFLAGS, xflags,
191 				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
192 		EFX_POPULATE_DWORD_2(hdr[1],
193 				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
194 				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
195 		hdr_len = 8;
196 	}
197 
198 #ifdef CONFIG_SFC_MCDI_LOGGING
199 	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
200 		int bytes = 0;
201 		int i;
202 		/* Lengths should always be a whole number of dwords, so scream
203 		 * if they're not.
204 		 */
205 		WARN_ON_ONCE(hdr_len % 4);
206 		WARN_ON_ONCE(inlen % 4);
207 
208 		/* We own the logging buffer, as only one MCDI can be in
209 		 * progress on a NIC at any one time.  So no need for locking.
210 		 */
211 		for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
212 			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
213 					   " %08x",
214 					   le32_to_cpu(hdr[i].u32[0]));
215 
216 		for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
217 			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
218 					   " %08x",
219 					   le32_to_cpu(inbuf[i].u32[0]));
220 
221 		netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
222 	}
223 #endif
224 
225 	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
226 
227 	mcdi->new_epoch = false;
228 }
229 
230 static int efx_mcdi_errno(unsigned int mcdi_err)
231 {
232 	switch (mcdi_err) {
233 	case 0:
234 		return 0;
235 #define TRANSLATE_ERROR(name)					\
236 	case MC_CMD_ERR_ ## name:				\
237 		return -name;
238 	TRANSLATE_ERROR(EPERM);
239 	TRANSLATE_ERROR(ENOENT);
240 	TRANSLATE_ERROR(EINTR);
241 	TRANSLATE_ERROR(EAGAIN);
242 	TRANSLATE_ERROR(EACCES);
243 	TRANSLATE_ERROR(EBUSY);
244 	TRANSLATE_ERROR(EINVAL);
245 	TRANSLATE_ERROR(EDEADLK);
246 	TRANSLATE_ERROR(ENOSYS);
247 	TRANSLATE_ERROR(ETIME);
248 	TRANSLATE_ERROR(EALREADY);
249 	TRANSLATE_ERROR(ENOSPC);
250 #undef TRANSLATE_ERROR
251 	case MC_CMD_ERR_ENOTSUP:
252 		return -EOPNOTSUPP;
253 	case MC_CMD_ERR_ALLOC_FAIL:
254 		return -ENOBUFS;
255 	case MC_CMD_ERR_MAC_EXIST:
256 		return -EADDRINUSE;
257 	default:
258 		return -EPROTO;
259 	}
260 }
261 
262 static void efx_mcdi_read_response_header(struct efx_nic *efx)
263 {
264 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
265 	unsigned int respseq, respcmd, error;
266 #ifdef CONFIG_SFC_MCDI_LOGGING
267 	char *buf = mcdi->logging_buffer; /* page-sized */
268 #endif
269 	efx_dword_t hdr;
270 
271 	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
272 	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
273 	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
274 	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
275 
276 	if (respcmd != MC_CMD_V2_EXTN) {
277 		mcdi->resp_hdr_len = 4;
278 		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
279 	} else {
280 		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
281 		mcdi->resp_hdr_len = 8;
282 		mcdi->resp_data_len =
283 			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
284 	}
285 
286 #ifdef CONFIG_SFC_MCDI_LOGGING
287 	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
288 		size_t hdr_len, data_len;
289 		int bytes = 0;
290 		int i;
291 
292 		WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
293 		hdr_len = mcdi->resp_hdr_len / 4;
294 		/* MCDI_DECLARE_BUF ensures that underlying buffer is padded
295 		 * to dword size, and the MCDI buffer is always dword size
296 		 */
297 		data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
298 
299 		/* We own the logging buffer, as only one MCDI can be in
300 		 * progress on a NIC at any one time.  So no need for locking.
301 		 */
302 		for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
303 			efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
304 			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
305 					   " %08x", le32_to_cpu(hdr.u32[0]));
306 		}
307 
308 		for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
309 			efx->type->mcdi_read_response(efx, &hdr,
310 					mcdi->resp_hdr_len + (i * 4), 4);
311 			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
312 					   " %08x", le32_to_cpu(hdr.u32[0]));
313 		}
314 
315 		netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
316 	}
317 #endif
318 
319 	mcdi->resprc_raw = 0;
320 	if (error && mcdi->resp_data_len == 0) {
321 		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
322 		mcdi->resprc = -EIO;
323 	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
324 		netif_err(efx, hw, efx->net_dev,
325 			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
326 			  respseq, mcdi->seqno);
327 		mcdi->resprc = -EIO;
328 	} else if (error) {
329 		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
330 		mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
331 		mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
332 	} else {
333 		mcdi->resprc = 0;
334 	}
335 }
336 
337 static bool efx_mcdi_poll_once(struct efx_nic *efx)
338 {
339 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
340 
341 	rmb();
342 	if (!efx->type->mcdi_poll_response(efx))
343 		return false;
344 
345 	spin_lock_bh(&mcdi->iface_lock);
346 	efx_mcdi_read_response_header(efx);
347 	spin_unlock_bh(&mcdi->iface_lock);
348 
349 	return true;
350 }
351 
352 static int efx_mcdi_poll(struct efx_nic *efx)
353 {
354 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
355 	unsigned long time, finish;
356 	unsigned int spins;
357 	int rc;
358 
359 	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
360 	rc = efx_mcdi_poll_reboot(efx);
361 	if (rc) {
362 		spin_lock_bh(&mcdi->iface_lock);
363 		mcdi->resprc = rc;
364 		mcdi->resp_hdr_len = 0;
365 		mcdi->resp_data_len = 0;
366 		spin_unlock_bh(&mcdi->iface_lock);
367 		return 0;
368 	}
369 
370 	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
371 	 * because generally mcdi responses are fast. After that, back off
372 	 * and poll once a jiffy (approximately)
373 	 */
374 	spins = USER_TICK_USEC;
375 	finish = jiffies + MCDI_RPC_TIMEOUT;
376 
377 	while (1) {
378 		if (spins != 0) {
379 			--spins;
380 			udelay(1);
381 		} else {
382 			schedule_timeout_uninterruptible(1);
383 		}
384 
385 		time = jiffies;
386 
387 		if (efx_mcdi_poll_once(efx))
388 			break;
389 
390 		if (time_after(time, finish))
391 			return -ETIMEDOUT;
392 	}
393 
394 	/* Return rc=0 like wait_event_timeout() */
395 	return 0;
396 }
397 
398 /* Test and clear MC-rebooted flag for this port/function; reset
399  * software state as necessary.
400  */
401 int efx_mcdi_poll_reboot(struct efx_nic *efx)
402 {
403 	if (!efx->mcdi)
404 		return 0;
405 
406 	return efx->type->mcdi_poll_reboot(efx);
407 }
408 
409 static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
410 {
411 	return cmpxchg(&mcdi->state,
412 		       MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
413 		MCDI_STATE_QUIESCENT;
414 }
415 
416 static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
417 {
418 	/* Wait until the interface becomes QUIESCENT and we win the race
419 	 * to mark it RUNNING_SYNC.
420 	 */
421 	wait_event(mcdi->wq,
422 		   cmpxchg(&mcdi->state,
423 			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
424 		   MCDI_STATE_QUIESCENT);
425 }
426 
427 static int efx_mcdi_await_completion(struct efx_nic *efx)
428 {
429 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
430 
431 	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
432 			       MCDI_RPC_TIMEOUT) == 0)
433 		return -ETIMEDOUT;
434 
435 	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
436 	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
437 	 * completed the request first, then we'll just end up completing the
438 	 * request again, which is safe.
439 	 *
440 	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
441 	 * wait_event_timeout() implicitly provides.
442 	 */
443 	if (mcdi->mode == MCDI_MODE_POLL)
444 		return efx_mcdi_poll(efx);
445 
446 	return 0;
447 }
448 
449 /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
450  * requester.  Return whether this was done.  Does not take any locks.
451  */
452 static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
453 {
454 	if (cmpxchg(&mcdi->state,
455 		    MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
456 	    MCDI_STATE_RUNNING_SYNC) {
457 		wake_up(&mcdi->wq);
458 		return true;
459 	}
460 
461 	return false;
462 }
463 
464 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
465 {
466 	if (mcdi->mode == MCDI_MODE_EVENTS) {
467 		struct efx_mcdi_async_param *async;
468 		struct efx_nic *efx = mcdi->efx;
469 
470 		/* Process the asynchronous request queue */
471 		spin_lock_bh(&mcdi->async_lock);
472 		async = list_first_entry_or_null(
473 			&mcdi->async_list, struct efx_mcdi_async_param, list);
474 		if (async) {
475 			mcdi->state = MCDI_STATE_RUNNING_ASYNC;
476 			efx_mcdi_send_request(efx, async->cmd,
477 					      (const efx_dword_t *)(async + 1),
478 					      async->inlen);
479 			mod_timer(&mcdi->async_timer,
480 				  jiffies + MCDI_RPC_TIMEOUT);
481 		}
482 		spin_unlock_bh(&mcdi->async_lock);
483 
484 		if (async)
485 			return;
486 	}
487 
488 	mcdi->state = MCDI_STATE_QUIESCENT;
489 	wake_up(&mcdi->wq);
490 }
491 
492 /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
493  * asynchronous completion function, and release the interface.
494  * Return whether this was done.  Must be called in bh-disabled
495  * context.  Will take iface_lock and async_lock.
496  */
497 static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
498 {
499 	struct efx_nic *efx = mcdi->efx;
500 	struct efx_mcdi_async_param *async;
501 	size_t hdr_len, data_len, err_len;
502 	efx_dword_t *outbuf;
503 	MCDI_DECLARE_BUF_ERR(errbuf);
504 	int rc;
505 
506 	if (cmpxchg(&mcdi->state,
507 		    MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
508 	    MCDI_STATE_RUNNING_ASYNC)
509 		return false;
510 
511 	spin_lock(&mcdi->iface_lock);
512 	if (timeout) {
513 		/* Ensure that if the completion event arrives later,
514 		 * the seqno check in efx_mcdi_ev_cpl() will fail
515 		 */
516 		++mcdi->seqno;
517 		++mcdi->credits;
518 		rc = -ETIMEDOUT;
519 		hdr_len = 0;
520 		data_len = 0;
521 	} else {
522 		rc = mcdi->resprc;
523 		hdr_len = mcdi->resp_hdr_len;
524 		data_len = mcdi->resp_data_len;
525 	}
526 	spin_unlock(&mcdi->iface_lock);
527 
528 	/* Stop the timer.  In case the timer function is running, we
529 	 * must wait for it to return so that there is no possibility
530 	 * of it aborting the next request.
531 	 */
532 	if (!timeout)
533 		del_timer_sync(&mcdi->async_timer);
534 
535 	spin_lock(&mcdi->async_lock);
536 	async = list_first_entry(&mcdi->async_list,
537 				 struct efx_mcdi_async_param, list);
538 	list_del(&async->list);
539 	spin_unlock(&mcdi->async_lock);
540 
541 	outbuf = (efx_dword_t *)(async + 1);
542 	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
543 				      min(async->outlen, data_len));
544 	if (!timeout && rc && !async->quiet) {
545 		err_len = min(sizeof(errbuf), data_len);
546 		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
547 					      sizeof(errbuf));
548 		efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
549 				       err_len, rc);
550 	}
551 
552 	if (async->complete)
553 		async->complete(efx, async->cookie, rc, outbuf,
554 				min(async->outlen, data_len));
555 	kfree(async);
556 
557 	efx_mcdi_release(mcdi);
558 
559 	return true;
560 }
561 
562 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
563 			    unsigned int datalen, unsigned int mcdi_err)
564 {
565 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
566 	bool wake = false;
567 
568 	spin_lock(&mcdi->iface_lock);
569 
570 	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
571 		if (mcdi->credits)
572 			/* The request has been cancelled */
573 			--mcdi->credits;
574 		else
575 			netif_err(efx, hw, efx->net_dev,
576 				  "MC response mismatch tx seq 0x%x rx "
577 				  "seq 0x%x\n", seqno, mcdi->seqno);
578 	} else {
579 		if (efx->type->mcdi_max_ver >= 2) {
580 			/* MCDI v2 responses don't fit in an event */
581 			efx_mcdi_read_response_header(efx);
582 		} else {
583 			mcdi->resprc = efx_mcdi_errno(mcdi_err);
584 			mcdi->resp_hdr_len = 4;
585 			mcdi->resp_data_len = datalen;
586 		}
587 
588 		wake = true;
589 	}
590 
591 	spin_unlock(&mcdi->iface_lock);
592 
593 	if (wake) {
594 		if (!efx_mcdi_complete_async(mcdi, false))
595 			(void) efx_mcdi_complete_sync(mcdi);
596 
597 		/* If the interface isn't RUNNING_ASYNC or
598 		 * RUNNING_SYNC then we've received a duplicate
599 		 * completion after we've already transitioned back to
600 		 * QUIESCENT. [A subsequent invocation would increment
601 		 * seqno, so would have failed the seqno check].
602 		 */
603 	}
604 }
605 
606 static void efx_mcdi_timeout_async(struct timer_list *t)
607 {
608 	struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
609 
610 	efx_mcdi_complete_async(mcdi, true);
611 }
612 
613 static int
614 efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
615 {
616 	if (efx->type->mcdi_max_ver < 0 ||
617 	     (efx->type->mcdi_max_ver < 2 &&
618 	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
619 		return -EINVAL;
620 
621 	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
622 	    (efx->type->mcdi_max_ver < 2 &&
623 	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
624 		return -EMSGSIZE;
625 
626 	return 0;
627 }
628 
629 static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
630 				      size_t hdr_len, size_t data_len,
631 				      u32 *proxy_handle)
632 {
633 	MCDI_DECLARE_BUF_ERR(testbuf);
634 	const size_t buflen = sizeof(testbuf);
635 
636 	if (!proxy_handle || data_len < buflen)
637 		return false;
638 
639 	efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
640 	if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
641 		*proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
642 		return true;
643 	}
644 
645 	return false;
646 }
647 
648 static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
649 				size_t inlen,
650 				efx_dword_t *outbuf, size_t outlen,
651 				size_t *outlen_actual, bool quiet,
652 				u32 *proxy_handle, int *raw_rc)
653 {
654 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
655 	MCDI_DECLARE_BUF_ERR(errbuf);
656 	int rc;
657 
658 	if (mcdi->mode == MCDI_MODE_POLL)
659 		rc = efx_mcdi_poll(efx);
660 	else
661 		rc = efx_mcdi_await_completion(efx);
662 
663 	if (rc != 0) {
664 		netif_err(efx, hw, efx->net_dev,
665 			  "MC command 0x%x inlen %d mode %d timed out\n",
666 			  cmd, (int)inlen, mcdi->mode);
667 
668 		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
669 			netif_err(efx, hw, efx->net_dev,
670 				  "MCDI request was completed without an event\n");
671 			rc = 0;
672 		}
673 
674 		efx_mcdi_abandon(efx);
675 
676 		/* Close the race with efx_mcdi_ev_cpl() executing just too late
677 		 * and completing a request we've just cancelled, by ensuring
678 		 * that the seqno check therein fails.
679 		 */
680 		spin_lock_bh(&mcdi->iface_lock);
681 		++mcdi->seqno;
682 		++mcdi->credits;
683 		spin_unlock_bh(&mcdi->iface_lock);
684 	}
685 
686 	if (proxy_handle)
687 		*proxy_handle = 0;
688 
689 	if (rc != 0) {
690 		if (outlen_actual)
691 			*outlen_actual = 0;
692 	} else {
693 		size_t hdr_len, data_len, err_len;
694 
695 		/* At the very least we need a memory barrier here to ensure
696 		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
697 		 * a spurious efx_mcdi_ev_cpl() running concurrently by
698 		 * acquiring the iface_lock. */
699 		spin_lock_bh(&mcdi->iface_lock);
700 		rc = mcdi->resprc;
701 		if (raw_rc)
702 			*raw_rc = mcdi->resprc_raw;
703 		hdr_len = mcdi->resp_hdr_len;
704 		data_len = mcdi->resp_data_len;
705 		err_len = min(sizeof(errbuf), data_len);
706 		spin_unlock_bh(&mcdi->iface_lock);
707 
708 		BUG_ON(rc > 0);
709 
710 		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
711 					      min(outlen, data_len));
712 		if (outlen_actual)
713 			*outlen_actual = data_len;
714 
715 		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
716 
717 		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
718 			/* Don't reset if MC_CMD_REBOOT returns EIO */
719 		} else if (rc == -EIO || rc == -EINTR) {
720 			netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
721 			netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
722 				  cmd, -rc);
723 			if (efx->type->mcdi_reboot_detected)
724 				efx->type->mcdi_reboot_detected(efx);
725 			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
726 		} else if (proxy_handle && (rc == -EPROTO) &&
727 			   efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
728 						     proxy_handle)) {
729 			mcdi->proxy_rx_status = 0;
730 			mcdi->proxy_rx_handle = 0;
731 			mcdi->state = MCDI_STATE_PROXY_WAIT;
732 		} else if (rc && !quiet) {
733 			efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
734 					       rc);
735 		}
736 
737 		if (rc == -EIO || rc == -EINTR) {
738 			msleep(MCDI_STATUS_SLEEP_MS);
739 			efx_mcdi_poll_reboot(efx);
740 			mcdi->new_epoch = true;
741 		}
742 	}
743 
744 	if (!proxy_handle || !*proxy_handle)
745 		efx_mcdi_release(mcdi);
746 	return rc;
747 }
748 
749 static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
750 {
751 	if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
752 		/* Interrupt the proxy wait. */
753 		mcdi->proxy_rx_status = -EINTR;
754 		wake_up(&mcdi->proxy_rx_wq);
755 	}
756 }
757 
758 static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
759 				       u32 handle, int status)
760 {
761 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
762 
763 	WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
764 
765 	mcdi->proxy_rx_status = efx_mcdi_errno(status);
766 	/* Ensure the status is written before we update the handle, since the
767 	 * latter is used to check if we've finished.
768 	 */
769 	wmb();
770 	mcdi->proxy_rx_handle = handle;
771 	wake_up(&mcdi->proxy_rx_wq);
772 }
773 
774 static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
775 {
776 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
777 	int rc;
778 
779 	/* Wait for a proxy event, or timeout. */
780 	rc = wait_event_timeout(mcdi->proxy_rx_wq,
781 				mcdi->proxy_rx_handle != 0 ||
782 				mcdi->proxy_rx_status == -EINTR,
783 				MCDI_RPC_TIMEOUT);
784 
785 	if (rc <= 0) {
786 		netif_dbg(efx, hw, efx->net_dev,
787 			  "MCDI proxy timeout %d\n", handle);
788 		return -ETIMEDOUT;
789 	} else if (mcdi->proxy_rx_handle != handle) {
790 		netif_warn(efx, hw, efx->net_dev,
791 			   "MCDI proxy unexpected handle %d (expected %d)\n",
792 			   mcdi->proxy_rx_handle, handle);
793 		return -EINVAL;
794 	}
795 
796 	return mcdi->proxy_rx_status;
797 }
798 
799 static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
800 			 const efx_dword_t *inbuf, size_t inlen,
801 			 efx_dword_t *outbuf, size_t outlen,
802 			 size_t *outlen_actual, bool quiet, int *raw_rc)
803 {
804 	u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
805 	int rc;
806 
807 	if (inbuf && inlen && (inbuf == outbuf)) {
808 		/* The input buffer can't be aliased with the output. */
809 		WARN_ON(1);
810 		return -EINVAL;
811 	}
812 
813 	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
814 	if (rc)
815 		return rc;
816 
817 	rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
818 				  outlen_actual, quiet, &proxy_handle, raw_rc);
819 
820 	if (proxy_handle) {
821 		/* Handle proxy authorisation. This allows approval of MCDI
822 		 * operations to be delegated to the admin function, allowing
823 		 * fine control over (eg) multicast subscriptions.
824 		 */
825 		struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
826 
827 		netif_dbg(efx, hw, efx->net_dev,
828 			  "MCDI waiting for proxy auth %d\n",
829 			  proxy_handle);
830 		rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
831 
832 		if (rc == 0) {
833 			netif_dbg(efx, hw, efx->net_dev,
834 				  "MCDI proxy retry %d\n", proxy_handle);
835 
836 			/* We now retry the original request. */
837 			mcdi->state = MCDI_STATE_RUNNING_SYNC;
838 			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
839 
840 			rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
841 						  outbuf, outlen, outlen_actual,
842 						  quiet, NULL, raw_rc);
843 		} else {
844 			netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
845 				       "MC command 0x%x failed after proxy auth rc=%d\n",
846 				       cmd, rc);
847 
848 			if (rc == -EINTR || rc == -EIO)
849 				efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
850 			efx_mcdi_release(mcdi);
851 		}
852 	}
853 
854 	return rc;
855 }
856 
857 static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
858 				   const efx_dword_t *inbuf, size_t inlen,
859 				   efx_dword_t *outbuf, size_t outlen,
860 				   size_t *outlen_actual, bool quiet)
861 {
862 	int raw_rc = 0;
863 	int rc;
864 
865 	rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
866 			   outbuf, outlen, outlen_actual, true, &raw_rc);
867 
868 	if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
869 	    efx->type->is_vf) {
870 		/* If the EVB port isn't available within a VF this may
871 		 * mean the PF is still bringing the switch up. We should
872 		 * retry our request shortly.
873 		 */
874 		unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
875 		unsigned int delay_us = 10000;
876 
877 		netif_dbg(efx, hw, efx->net_dev,
878 			  "%s: NO_EVB_PORT; will retry request\n",
879 			  __func__);
880 
881 		do {
882 			usleep_range(delay_us, delay_us + 10000);
883 			rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
884 					   outbuf, outlen, outlen_actual,
885 					   true, &raw_rc);
886 			if (delay_us < 100000)
887 				delay_us <<= 1;
888 		} while ((rc == -EPROTO) &&
889 			 (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
890 			 time_before(jiffies, abort_time));
891 	}
892 
893 	if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
894 		efx_mcdi_display_error(efx, cmd, inlen,
895 				       outbuf, outlen, rc);
896 
897 	return rc;
898 }
899 
900 /**
901  * efx_mcdi_rpc - Issue an MCDI command and wait for completion
902  * @efx: NIC through which to issue the command
903  * @cmd: Command type number
904  * @inbuf: Command parameters
905  * @inlen: Length of command parameters, in bytes.  Must be a multiple
906  *	of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
907  * @outbuf: Response buffer.  May be %NULL if @outlen is 0.
908  * @outlen: Length of response buffer, in bytes.  If the actual
909  *	response is longer than @outlen & ~3, it will be truncated
910  *	to that length.
911  * @outlen_actual: Pointer through which to return the actual response
912  *	length.  May be %NULL if this is not needed.
913  *
914  * This function may sleep and therefore must be called in an appropriate
915  * context.
916  *
917  * Return: A negative error code, or zero if successful.  The error
918  *	code may come from the MCDI response or may indicate a failure
919  *	to communicate with the MC.  In the former case, the response
920  *	will still be copied to @outbuf and *@outlen_actual will be
921  *	set accordingly.  In the latter case, *@outlen_actual will be
922  *	set to zero.
923  */
924 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
925 		 const efx_dword_t *inbuf, size_t inlen,
926 		 efx_dword_t *outbuf, size_t outlen,
927 		 size_t *outlen_actual)
928 {
929 	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
930 				       outlen_actual, false);
931 }
932 
933 /* Normally, on receiving an error code in the MCDI response,
934  * efx_mcdi_rpc will log an error message containing (among other
935  * things) the raw error code, by means of efx_mcdi_display_error.
936  * This _quiet version suppresses that; if the caller wishes to log
937  * the error conditionally on the return code, it should call this
938  * function and is then responsible for calling efx_mcdi_display_error
939  * as needed.
940  */
941 int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
942 		       const efx_dword_t *inbuf, size_t inlen,
943 		       efx_dword_t *outbuf, size_t outlen,
944 		       size_t *outlen_actual)
945 {
946 	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
947 				       outlen_actual, true);
948 }
949 
950 int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
951 		       const efx_dword_t *inbuf, size_t inlen)
952 {
953 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
954 	int rc;
955 
956 	rc = efx_mcdi_check_supported(efx, cmd, inlen);
957 	if (rc)
958 		return rc;
959 
960 	if (efx->mc_bist_for_other_fn)
961 		return -ENETDOWN;
962 
963 	if (mcdi->mode == MCDI_MODE_FAIL)
964 		return -ENETDOWN;
965 
966 	efx_mcdi_acquire_sync(mcdi);
967 	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
968 	return 0;
969 }
970 
971 static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
972 			       const efx_dword_t *inbuf, size_t inlen,
973 			       size_t outlen,
974 			       efx_mcdi_async_completer *complete,
975 			       unsigned long cookie, bool quiet)
976 {
977 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
978 	struct efx_mcdi_async_param *async;
979 	int rc;
980 
981 	rc = efx_mcdi_check_supported(efx, cmd, inlen);
982 	if (rc)
983 		return rc;
984 
985 	if (efx->mc_bist_for_other_fn)
986 		return -ENETDOWN;
987 
988 	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
989 			GFP_ATOMIC);
990 	if (!async)
991 		return -ENOMEM;
992 
993 	async->cmd = cmd;
994 	async->inlen = inlen;
995 	async->outlen = outlen;
996 	async->quiet = quiet;
997 	async->complete = complete;
998 	async->cookie = cookie;
999 	memcpy(async + 1, inbuf, inlen);
1000 
1001 	spin_lock_bh(&mcdi->async_lock);
1002 
1003 	if (mcdi->mode == MCDI_MODE_EVENTS) {
1004 		list_add_tail(&async->list, &mcdi->async_list);
1005 
1006 		/* If this is at the front of the queue, try to start it
1007 		 * immediately
1008 		 */
1009 		if (mcdi->async_list.next == &async->list &&
1010 		    efx_mcdi_acquire_async(mcdi)) {
1011 			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
1012 			mod_timer(&mcdi->async_timer,
1013 				  jiffies + MCDI_RPC_TIMEOUT);
1014 		}
1015 	} else {
1016 		kfree(async);
1017 		rc = -ENETDOWN;
1018 	}
1019 
1020 	spin_unlock_bh(&mcdi->async_lock);
1021 
1022 	return rc;
1023 }
1024 
1025 /**
1026  * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
1027  * @efx: NIC through which to issue the command
1028  * @cmd: Command type number
1029  * @inbuf: Command parameters
1030  * @inlen: Length of command parameters, in bytes
1031  * @outlen: Length to allocate for response buffer, in bytes
1032  * @complete: Function to be called on completion or cancellation.
1033  * @cookie: Arbitrary value to be passed to @complete.
1034  *
1035  * This function does not sleep and therefore may be called in atomic
1036  * context.  It will fail if event queues are disabled or if MCDI
1037  * event completions have been disabled due to an error.
1038  *
1039  * If it succeeds, the @complete function will be called exactly once
1040  * in atomic context, when one of the following occurs:
1041  * (a) the completion event is received (in NAPI context)
1042  * (b) event queues are disabled (in the process that disables them)
1043  * (c) the request times-out (in timer context)
1044  */
1045 int
1046 efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
1047 		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
1048 		   efx_mcdi_async_completer *complete, unsigned long cookie)
1049 {
1050 	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
1051 				   cookie, false);
1052 }
1053 
1054 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
1055 			efx_dword_t *outbuf, size_t outlen,
1056 			size_t *outlen_actual)
1057 {
1058 	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
1059 				    outlen_actual, false, NULL, NULL);
1060 }
1061 
1062 void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
1063 			    size_t inlen, efx_dword_t *outbuf,
1064 			    size_t outlen, int rc)
1065 {
1066 	int code = 0, err_arg = 0;
1067 
1068 	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
1069 		code = MCDI_DWORD(outbuf, ERR_CODE);
1070 	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
1071 		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
1072 	netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
1073 		       "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
1074 		       cmd, inlen, rc, code, err_arg);
1075 }
1076 
1077 /* Switch to polled MCDI completions.  This can be called in various
1078  * error conditions with various locks held, so it must be lockless.
1079  * Caller is responsible for flushing asynchronous requests later.
1080  */
1081 void efx_mcdi_mode_poll(struct efx_nic *efx)
1082 {
1083 	struct efx_mcdi_iface *mcdi;
1084 
1085 	if (!efx->mcdi)
1086 		return;
1087 
1088 	mcdi = efx_mcdi(efx);
1089 	/* If already in polling mode, nothing to do.
1090 	 * If in fail-fast state, don't switch to polled completion.
1091 	 * FLR recovery will do that later.
1092 	 */
1093 	if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
1094 		return;
1095 
1096 	/* We can switch from event completion to polled completion, because
1097 	 * mcdi requests are always completed in shared memory. We do this by
1098 	 * switching the mode to POLL'd then completing the request.
1099 	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
1100 	 *
1101 	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
1102 	 * which efx_mcdi_complete_sync() provides for us.
1103 	 */
1104 	mcdi->mode = MCDI_MODE_POLL;
1105 
1106 	efx_mcdi_complete_sync(mcdi);
1107 }
1108 
1109 /* Flush any running or queued asynchronous requests, after event processing
1110  * is stopped
1111  */
1112 void efx_mcdi_flush_async(struct efx_nic *efx)
1113 {
1114 	struct efx_mcdi_async_param *async, *next;
1115 	struct efx_mcdi_iface *mcdi;
1116 
1117 	if (!efx->mcdi)
1118 		return;
1119 
1120 	mcdi = efx_mcdi(efx);
1121 
1122 	/* We must be in poll or fail mode so no more requests can be queued */
1123 	BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
1124 
1125 	del_timer_sync(&mcdi->async_timer);
1126 
1127 	/* If a request is still running, make sure we give the MC
1128 	 * time to complete it so that the response won't overwrite our
1129 	 * next request.
1130 	 */
1131 	if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
1132 		efx_mcdi_poll(efx);
1133 		mcdi->state = MCDI_STATE_QUIESCENT;
1134 	}
1135 
1136 	/* Nothing else will access the async list now, so it is safe
1137 	 * to walk it without holding async_lock.  If we hold it while
1138 	 * calling a completer then lockdep may warn that we have
1139 	 * acquired locks in the wrong order.
1140 	 */
1141 	list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
1142 		if (async->complete)
1143 			async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
1144 		list_del(&async->list);
1145 		kfree(async);
1146 	}
1147 }
1148 
1149 void efx_mcdi_mode_event(struct efx_nic *efx)
1150 {
1151 	struct efx_mcdi_iface *mcdi;
1152 
1153 	if (!efx->mcdi)
1154 		return;
1155 
1156 	mcdi = efx_mcdi(efx);
1157 	/* If already in event completion mode, nothing to do.
1158 	 * If in fail-fast state, don't switch to event completion.  FLR
1159 	 * recovery will do that later.
1160 	 */
1161 	if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
1162 		return;
1163 
1164 	/* We can't switch from polled to event completion in the middle of a
1165 	 * request, because the completion method is specified in the request.
1166 	 * So acquire the interface to serialise the requestors. We don't need
1167 	 * to acquire the iface_lock to change the mode here, but we do need a
1168 	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
1169 	 * efx_mcdi_acquire() provides.
1170 	 */
1171 	efx_mcdi_acquire_sync(mcdi);
1172 	mcdi->mode = MCDI_MODE_EVENTS;
1173 	efx_mcdi_release(mcdi);
1174 }
1175 
1176 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
1177 {
1178 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1179 
1180 	/* If there is an outstanding MCDI request, it has been terminated
1181 	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
1182 	 * in polled mode, then do nothing because the MC reboot handler will
1183 	 * set the header correctly. However, if the mcdi interface is waiting
1184 	 * for a CMDDONE event it won't receive it [and since all MCDI events
1185 	 * are sent to the same queue, we can't be racing with
1186 	 * efx_mcdi_ev_cpl()]
1187 	 *
1188 	 * If there is an outstanding asynchronous request, we can't
1189 	 * complete it now (efx_mcdi_complete() would deadlock).  The
1190 	 * reset process will take care of this.
1191 	 *
1192 	 * There's a race here with efx_mcdi_send_request(), because
1193 	 * we might receive a REBOOT event *before* the request has
1194 	 * been copied out. In polled mode (during startup) this is
1195 	 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
1196 	 * event mode, this condition is just an edge-case of
1197 	 * receiving a REBOOT event after posting the MCDI
1198 	 * request. Did the mc reboot before or after the copyout? The
1199 	 * best we can do always is just return failure.
1200 	 *
1201 	 * If there is an outstanding proxy response expected it is not going
1202 	 * to arrive. We should thus abort it.
1203 	 */
1204 	spin_lock(&mcdi->iface_lock);
1205 	efx_mcdi_proxy_abort(mcdi);
1206 
1207 	if (efx_mcdi_complete_sync(mcdi)) {
1208 		if (mcdi->mode == MCDI_MODE_EVENTS) {
1209 			mcdi->resprc = rc;
1210 			mcdi->resp_hdr_len = 0;
1211 			mcdi->resp_data_len = 0;
1212 			++mcdi->credits;
1213 		}
1214 	} else {
1215 		int count;
1216 
1217 		/* Consume the status word since efx_mcdi_rpc_finish() won't */
1218 		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
1219 			rc = efx_mcdi_poll_reboot(efx);
1220 			if (rc)
1221 				break;
1222 			udelay(MCDI_STATUS_DELAY_US);
1223 		}
1224 
1225 		/* On EF10, a CODE_MC_REBOOT event can be received without the
1226 		 * reboot detection in efx_mcdi_poll_reboot() being triggered.
1227 		 * If zero was returned from the final call to
1228 		 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
1229 		 * MC has definitely rebooted so prepare for the reset.
1230 		 */
1231 		if (!rc && efx->type->mcdi_reboot_detected)
1232 			efx->type->mcdi_reboot_detected(efx);
1233 
1234 		mcdi->new_epoch = true;
1235 
1236 		/* Nobody was waiting for an MCDI request, so trigger a reset */
1237 		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
1238 	}
1239 
1240 	spin_unlock(&mcdi->iface_lock);
1241 }
1242 
1243 /* The MC is going down in to BIST mode. set the BIST flag to block
1244  * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
1245  * (which doesn't actually execute a reset, it waits for the controlling
1246  * function to reset it).
1247  */
1248 static void efx_mcdi_ev_bist(struct efx_nic *efx)
1249 {
1250 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1251 
1252 	spin_lock(&mcdi->iface_lock);
1253 	efx->mc_bist_for_other_fn = true;
1254 	efx_mcdi_proxy_abort(mcdi);
1255 
1256 	if (efx_mcdi_complete_sync(mcdi)) {
1257 		if (mcdi->mode == MCDI_MODE_EVENTS) {
1258 			mcdi->resprc = -EIO;
1259 			mcdi->resp_hdr_len = 0;
1260 			mcdi->resp_data_len = 0;
1261 			++mcdi->credits;
1262 		}
1263 	}
1264 	mcdi->new_epoch = true;
1265 	efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
1266 	spin_unlock(&mcdi->iface_lock);
1267 }
1268 
1269 /* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
1270  * to recover.
1271  */
1272 static void efx_mcdi_abandon(struct efx_nic *efx)
1273 {
1274 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1275 
1276 	if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
1277 		return; /* it had already been done */
1278 	netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
1279 	efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
1280 }
1281 
1282 static void efx_handle_drain_event(struct efx_nic *efx)
1283 {
1284 	if (atomic_dec_and_test(&efx->active_queues))
1285 		wake_up(&efx->flush_wq);
1286 
1287 	WARN_ON(atomic_read(&efx->active_queues) < 0);
1288 }
1289 
1290 /* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
1291 void efx_mcdi_process_event(struct efx_channel *channel,
1292 			    efx_qword_t *event)
1293 {
1294 	struct efx_nic *efx = channel->efx;
1295 	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
1296 	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
1297 
1298 	switch (code) {
1299 	case MCDI_EVENT_CODE_BADSSERT:
1300 		netif_err(efx, hw, efx->net_dev,
1301 			  "MC watchdog or assertion failure at 0x%x\n", data);
1302 		efx_mcdi_ev_death(efx, -EINTR);
1303 		break;
1304 
1305 	case MCDI_EVENT_CODE_PMNOTICE:
1306 		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
1307 		break;
1308 
1309 	case MCDI_EVENT_CODE_CMDDONE:
1310 		efx_mcdi_ev_cpl(efx,
1311 				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
1312 				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
1313 				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
1314 		break;
1315 
1316 	case MCDI_EVENT_CODE_LINKCHANGE:
1317 		efx_mcdi_process_link_change(efx, event);
1318 		break;
1319 	case MCDI_EVENT_CODE_SENSOREVT:
1320 		efx_sensor_event(efx, event);
1321 		break;
1322 	case MCDI_EVENT_CODE_SCHEDERR:
1323 		netif_dbg(efx, hw, efx->net_dev,
1324 			  "MC Scheduler alert (0x%x)\n", data);
1325 		break;
1326 	case MCDI_EVENT_CODE_REBOOT:
1327 	case MCDI_EVENT_CODE_MC_REBOOT:
1328 		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
1329 		efx_mcdi_ev_death(efx, -EIO);
1330 		break;
1331 	case MCDI_EVENT_CODE_MC_BIST:
1332 		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
1333 		efx_mcdi_ev_bist(efx);
1334 		break;
1335 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1336 		/* MAC stats are gather lazily.  We can ignore this. */
1337 		break;
1338 	case MCDI_EVENT_CODE_PTP_FAULT:
1339 	case MCDI_EVENT_CODE_PTP_PPS:
1340 		efx_ptp_event(efx, event);
1341 		break;
1342 	case MCDI_EVENT_CODE_PTP_TIME:
1343 		efx_time_sync_event(channel, event);
1344 		break;
1345 	case MCDI_EVENT_CODE_TX_FLUSH:
1346 	case MCDI_EVENT_CODE_RX_FLUSH:
1347 		/* Two flush events will be sent: one to the same event
1348 		 * queue as completions, and one to event queue 0.
1349 		 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
1350 		 * flag will be set, and we should ignore the event
1351 		 * because we want to wait for all completions.
1352 		 */
1353 		BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
1354 			     MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
1355 		if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
1356 			efx_handle_drain_event(efx);
1357 		break;
1358 	case MCDI_EVENT_CODE_TX_ERR:
1359 	case MCDI_EVENT_CODE_RX_ERR:
1360 		netif_err(efx, hw, efx->net_dev,
1361 			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
1362 			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
1363 			  EFX_QWORD_VAL(*event));
1364 		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1365 		break;
1366 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1367 		efx_mcdi_ev_proxy_response(efx,
1368 				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
1369 				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
1370 		break;
1371 	default:
1372 		netif_err(efx, hw, efx->net_dev,
1373 			  "Unknown MCDI event " EFX_QWORD_FMT "\n",
1374 			  EFX_QWORD_VAL(*event));
1375 	}
1376 }
1377 
1378 /**************************************************************************
1379  *
1380  * Specific request functions
1381  *
1382  **************************************************************************
1383  */
1384 
1385 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
1386 {
1387 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
1388 	size_t outlength;
1389 	const __le16 *ver_words;
1390 	size_t offset;
1391 	int rc;
1392 
1393 	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
1394 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
1395 			  outbuf, sizeof(outbuf), &outlength);
1396 	if (rc)
1397 		goto fail;
1398 	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
1399 		rc = -EIO;
1400 		goto fail;
1401 	}
1402 
1403 	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
1404 	offset = scnprintf(buf, len, "%u.%u.%u.%u",
1405 			   le16_to_cpu(ver_words[0]),
1406 			   le16_to_cpu(ver_words[1]),
1407 			   le16_to_cpu(ver_words[2]),
1408 			   le16_to_cpu(ver_words[3]));
1409 
1410 	if (efx->type->print_additional_fwver)
1411 		offset += efx->type->print_additional_fwver(efx, buf + offset,
1412 							    len - offset);
1413 
1414 	/* It's theoretically possible for the string to exceed 31
1415 	 * characters, though in practice the first three version
1416 	 * components are short enough that this doesn't happen.
1417 	 */
1418 	if (WARN_ON(offset >= len))
1419 		buf[0] = 0;
1420 
1421 	return;
1422 
1423 fail:
1424 	pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
1425 	buf[0] = 0;
1426 }
1427 
1428 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
1429 			       bool *was_attached)
1430 {
1431 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
1432 	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
1433 	size_t outlen;
1434 	int rc;
1435 
1436 	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
1437 		       driver_operating ? 1 : 0);
1438 	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
1439 	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
1440 
1441 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
1442 				outbuf, sizeof(outbuf), &outlen);
1443 	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
1444 	 * specified will fail with EPERM, and we have to tell the MC we don't
1445 	 * care what firmware we get.
1446 	 */
1447 	if (rc == -EPERM) {
1448 		pci_dbg(efx->pci_dev,
1449 			"%s with fw-variant setting failed EPERM, trying without it\n",
1450 			__func__);
1451 		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
1452 			       MC_CMD_FW_DONT_CARE);
1453 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
1454 					sizeof(inbuf), outbuf, sizeof(outbuf),
1455 					&outlen);
1456 	}
1457 	if (rc) {
1458 		efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
1459 				       outbuf, outlen, rc);
1460 		goto fail;
1461 	}
1462 	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
1463 		rc = -EIO;
1464 		goto fail;
1465 	}
1466 
1467 	if (driver_operating) {
1468 		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
1469 			efx->mcdi->fn_flags =
1470 				MCDI_DWORD(outbuf,
1471 					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
1472 		} else {
1473 			/* Synthesise flags for Siena */
1474 			efx->mcdi->fn_flags =
1475 				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1476 				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
1477 				(efx_port_num(efx) == 0) <<
1478 				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
1479 		}
1480 	}
1481 
1482 	/* We currently assume we have control of the external link
1483 	 * and are completely trusted by firmware.  Abort probing
1484 	 * if that's not true for this function.
1485 	 */
1486 
1487 	if (was_attached != NULL)
1488 		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
1489 	return 0;
1490 
1491 fail:
1492 	pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
1493 	return rc;
1494 }
1495 
1496 int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1497 			   u16 *fw_subtype_list, u32 *capabilities)
1498 {
1499 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
1500 	size_t outlen, i;
1501 	int port_num = efx_port_num(efx);
1502 	int rc;
1503 
1504 	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
1505 	/* we need __aligned(2) for ether_addr_copy */
1506 	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
1507 	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
1508 
1509 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
1510 			  outbuf, sizeof(outbuf), &outlen);
1511 	if (rc)
1512 		goto fail;
1513 
1514 	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
1515 		rc = -EIO;
1516 		goto fail;
1517 	}
1518 
1519 	if (mac_address)
1520 		ether_addr_copy(mac_address,
1521 				port_num ?
1522 				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
1523 				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
1524 	if (fw_subtype_list) {
1525 		for (i = 0;
1526 		     i < MCDI_VAR_ARRAY_LEN(outlen,
1527 					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
1528 		     i++)
1529 			fw_subtype_list[i] = MCDI_ARRAY_WORD(
1530 				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
1531 		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
1532 			fw_subtype_list[i] = 0;
1533 	}
1534 	if (capabilities) {
1535 		if (port_num)
1536 			*capabilities = MCDI_DWORD(outbuf,
1537 					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
1538 		else
1539 			*capabilities = MCDI_DWORD(outbuf,
1540 					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
1541 	}
1542 
1543 	return 0;
1544 
1545 fail:
1546 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
1547 		  __func__, rc, (int)outlen);
1548 
1549 	return rc;
1550 }
1551 
1552 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
1553 {
1554 	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
1555 	u32 dest = 0;
1556 	int rc;
1557 
1558 	if (uart)
1559 		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
1560 	if (evq)
1561 		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
1562 
1563 	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
1564 	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
1565 
1566 	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
1567 
1568 	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
1569 			  NULL, 0, NULL);
1570 	return rc;
1571 }
1572 
1573 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
1574 {
1575 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
1576 	size_t outlen;
1577 	int rc;
1578 
1579 	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
1580 
1581 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
1582 			  outbuf, sizeof(outbuf), &outlen);
1583 	if (rc)
1584 		goto fail;
1585 	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
1586 		rc = -EIO;
1587 		goto fail;
1588 	}
1589 
1590 	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
1591 	return 0;
1592 
1593 fail:
1594 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1595 		  __func__, rc);
1596 	return rc;
1597 }
1598 
1599 /* This function finds types using the new NVRAM_PARTITIONS mcdi. */
1600 static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
1601 				    u32 *nvram_types)
1602 {
1603 	efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
1604 				      GFP_KERNEL);
1605 	size_t outlen;
1606 	int rc;
1607 
1608 	if (!outbuf)
1609 		return -ENOMEM;
1610 
1611 	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
1612 
1613 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
1614 			  outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
1615 	if (rc)
1616 		goto fail;
1617 
1618 	*number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
1619 
1620 	memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
1621 	       *number * sizeof(u32));
1622 
1623 fail:
1624 	kfree(outbuf);
1625 	return rc;
1626 }
1627 
1628 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
1629 			size_t *size_out, size_t *erase_size_out,
1630 			bool *protected_out)
1631 {
1632 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
1633 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
1634 	size_t outlen;
1635 	int rc;
1636 
1637 	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
1638 
1639 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
1640 			  outbuf, sizeof(outbuf), &outlen);
1641 	if (rc)
1642 		goto fail;
1643 	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
1644 		rc = -EIO;
1645 		goto fail;
1646 	}
1647 
1648 	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
1649 	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
1650 	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
1651 				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
1652 	return 0;
1653 
1654 fail:
1655 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1656 	return rc;
1657 }
1658 
1659 static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
1660 {
1661 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
1662 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
1663 	int rc;
1664 
1665 	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
1666 
1667 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
1668 			  outbuf, sizeof(outbuf), NULL);
1669 	if (rc)
1670 		return rc;
1671 
1672 	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
1673 	case MC_CMD_NVRAM_TEST_PASS:
1674 	case MC_CMD_NVRAM_TEST_NOTSUPP:
1675 		return 0;
1676 	default:
1677 		return -EIO;
1678 	}
1679 }
1680 
1681 /* This function tests nvram partitions using the new mcdi partition lookup scheme */
1682 int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
1683 {
1684 	u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
1685 				   GFP_KERNEL);
1686 	unsigned int number;
1687 	int rc, i;
1688 
1689 	if (!nvram_types)
1690 		return -ENOMEM;
1691 
1692 	rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
1693 	if (rc)
1694 		goto fail;
1695 
1696 	/* Require at least one check */
1697 	rc = -EAGAIN;
1698 
1699 	for (i = 0; i < number; i++) {
1700 		if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
1701 		    nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
1702 			continue;
1703 
1704 		rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
1705 		if (rc)
1706 			goto fail;
1707 	}
1708 
1709 fail:
1710 	kfree(nvram_types);
1711 	return rc;
1712 }
1713 
1714 int efx_mcdi_nvram_test_all(struct efx_nic *efx)
1715 {
1716 	u32 nvram_types;
1717 	unsigned int type;
1718 	int rc;
1719 
1720 	rc = efx_mcdi_nvram_types(efx, &nvram_types);
1721 	if (rc)
1722 		goto fail1;
1723 
1724 	type = 0;
1725 	while (nvram_types != 0) {
1726 		if (nvram_types & 1) {
1727 			rc = efx_mcdi_nvram_test(efx, type);
1728 			if (rc)
1729 				goto fail2;
1730 		}
1731 		type++;
1732 		nvram_types >>= 1;
1733 	}
1734 
1735 	return 0;
1736 
1737 fail2:
1738 	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
1739 		  __func__, type);
1740 fail1:
1741 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1742 	return rc;
1743 }
1744 
1745 /* Returns 1 if an assertion was read, 0 if no assertion had fired,
1746  * negative on error.
1747  */
1748 static int efx_mcdi_read_assertion(struct efx_nic *efx)
1749 {
1750 	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
1751 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
1752 	unsigned int flags, index;
1753 	const char *reason;
1754 	size_t outlen;
1755 	int retry;
1756 	int rc;
1757 
1758 	/* Attempt to read any stored assertion state before we reboot
1759 	 * the mcfw out of the assertion handler. Retry twice, once
1760 	 * because a boot-time assertion might cause this command to fail
1761 	 * with EINTR. And once again because GET_ASSERTS can race with
1762 	 * MC_CMD_REBOOT running on the other port. */
1763 	retry = 2;
1764 	do {
1765 		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
1766 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
1767 					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
1768 					outbuf, sizeof(outbuf), &outlen);
1769 		if (rc == -EPERM)
1770 			return 0;
1771 	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
1772 
1773 	if (rc) {
1774 		efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
1775 				       MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
1776 				       outlen, rc);
1777 		return rc;
1778 	}
1779 	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
1780 		return -EIO;
1781 
1782 	/* Print out any recorded assertion state */
1783 	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1784 	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1785 		return 0;
1786 
1787 	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1788 		? "system-level assertion"
1789 		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1790 		? "thread-level assertion"
1791 		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1792 		? "watchdog reset"
1793 		: "unknown assertion";
1794 	netif_err(efx, hw, efx->net_dev,
1795 		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
1796 		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1797 		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1798 
1799 	/* Print out the registers */
1800 	for (index = 0;
1801 	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1802 	     index++)
1803 		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1804 			  1 + index,
1805 			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1806 					   index));
1807 
1808 	return 1;
1809 }
1810 
1811 static int efx_mcdi_exit_assertion(struct efx_nic *efx)
1812 {
1813 	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1814 	int rc;
1815 
1816 	/* If the MC is running debug firmware, it might now be
1817 	 * waiting for a debugger to attach, but we just want it to
1818 	 * reboot.  We set a flag that makes the command a no-op if it
1819 	 * has already done so.
1820 	 * The MCDI will thus return either 0 or -EIO.
1821 	 */
1822 	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1823 	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1824 		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1825 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1826 				NULL, 0, NULL);
1827 	if (rc == -EIO)
1828 		rc = 0;
1829 	if (rc)
1830 		efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
1831 				       NULL, 0, rc);
1832 	return rc;
1833 }
1834 
1835 int efx_mcdi_handle_assertion(struct efx_nic *efx)
1836 {
1837 	int rc;
1838 
1839 	rc = efx_mcdi_read_assertion(efx);
1840 	if (rc <= 0)
1841 		return rc;
1842 
1843 	return efx_mcdi_exit_assertion(efx);
1844 }
1845 
1846 int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1847 {
1848 	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
1849 
1850 	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
1851 	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
1852 	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
1853 
1854 	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
1855 
1856 	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
1857 
1858 	return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
1859 }
1860 
1861 static int efx_mcdi_reset_func(struct efx_nic *efx)
1862 {
1863 	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
1864 	int rc;
1865 
1866 	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
1867 	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
1868 			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
1869 	rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
1870 			  NULL, 0, NULL);
1871 	return rc;
1872 }
1873 
1874 static int efx_mcdi_reset_mc(struct efx_nic *efx)
1875 {
1876 	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1877 	int rc;
1878 
1879 	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1880 	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1881 	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1882 			  NULL, 0, NULL);
1883 	/* White is black, and up is down */
1884 	if (rc == -EIO)
1885 		return 0;
1886 	if (rc == 0)
1887 		rc = -EIO;
1888 	return rc;
1889 }
1890 
1891 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1892 {
1893 	return RESET_TYPE_RECOVER_OR_ALL;
1894 }
1895 
1896 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1897 {
1898 	int rc;
1899 
1900 	/* If MCDI is down, we can't handle_assertion */
1901 	if (method == RESET_TYPE_MCDI_TIMEOUT) {
1902 		rc = pci_reset_function(efx->pci_dev);
1903 		if (rc)
1904 			return rc;
1905 		/* Re-enable polled MCDI completion */
1906 		if (efx->mcdi) {
1907 			struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1908 			mcdi->mode = MCDI_MODE_POLL;
1909 		}
1910 		return 0;
1911 	}
1912 
1913 	/* Recover from a failed assertion pre-reset */
1914 	rc = efx_mcdi_handle_assertion(efx);
1915 	if (rc)
1916 		return rc;
1917 
1918 	if (method == RESET_TYPE_DATAPATH)
1919 		return 0;
1920 	else if (method == RESET_TYPE_WORLD)
1921 		return efx_mcdi_reset_mc(efx);
1922 	else
1923 		return efx_mcdi_reset_func(efx);
1924 }
1925 
1926 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1927 				   const u8 *mac, int *id_out)
1928 {
1929 	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1930 	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1931 	size_t outlen;
1932 	int rc;
1933 
1934 	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1935 	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1936 		       MC_CMD_FILTER_MODE_SIMPLE);
1937 	ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
1938 
1939 	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1940 			  outbuf, sizeof(outbuf), &outlen);
1941 	if (rc)
1942 		goto fail;
1943 
1944 	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1945 		rc = -EIO;
1946 		goto fail;
1947 	}
1948 
1949 	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1950 
1951 	return 0;
1952 
1953 fail:
1954 	*id_out = -1;
1955 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1956 	return rc;
1957 
1958 }
1959 
1960 
1961 int
1962 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
1963 {
1964 	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1965 }
1966 
1967 
1968 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1969 {
1970 	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1971 	int rc;
1972 
1973 	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1974 
1975 	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1976 			  NULL, 0, NULL);
1977 	return rc;
1978 }
1979 
1980 int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1981 {
1982 	int rc;
1983 
1984 	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1985 	return rc;
1986 }
1987 
1988 int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
1989 			    unsigned int *flags)
1990 {
1991 	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
1992 	MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
1993 	size_t outlen;
1994 	int rc;
1995 
1996 	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
1997 	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
1998 	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
1999 	rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
2000 			  outbuf, sizeof(outbuf), &outlen);
2001 	if (rc)
2002 		return rc;
2003 
2004 	if (!flags)
2005 		return 0;
2006 
2007 	if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
2008 		*flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
2009 	else
2010 		*flags = 0;
2011 
2012 	return 0;
2013 }
2014 
2015 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
2016 			     unsigned int *enabled_out)
2017 {
2018 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
2019 	size_t outlen;
2020 	int rc;
2021 
2022 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
2023 			  outbuf, sizeof(outbuf), &outlen);
2024 	if (rc)
2025 		goto fail;
2026 
2027 	if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
2028 		rc = -EIO;
2029 		goto fail;
2030 	}
2031 
2032 	if (impl_out)
2033 		*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
2034 
2035 	if (enabled_out)
2036 		*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
2037 
2038 	return 0;
2039 
2040 fail:
2041 	/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
2042 	 * terrifying.  The call site will have to deal with it though.
2043 	 */
2044 	netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
2045 		       "%s: failed rc=%d\n", __func__, rc);
2046 	return rc;
2047 }
2048 
2049 /* Failure to read a privilege mask is never fatal, because we can always
2050  * carry on as though we didn't have the privilege we were interested in.
2051  * So use efx_mcdi_rpc_quiet().
2052  */
2053 int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
2054 {
2055 	MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
2056 	MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
2057 	MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
2058 	size_t outlen;
2059 	u16 pf, vf;
2060 	int rc;
2061 
2062 	if (!efx || !mask)
2063 		return -EINVAL;
2064 
2065 	/* Get our function number */
2066 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0,
2067 				fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN,
2068 				&outlen);
2069 	if (rc != 0)
2070 		return rc;
2071 	if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN)
2072 		return -EIO;
2073 
2074 	pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF);
2075 	vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF);
2076 
2077 	MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
2078 			      PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
2079 			      PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
2080 
2081 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PRIVILEGE_MASK,
2082 				pm_inbuf, sizeof(pm_inbuf),
2083 				pm_outbuf, sizeof(pm_outbuf), &outlen);
2084 
2085 	if (rc != 0)
2086 		return rc;
2087 	if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN)
2088 		return -EIO;
2089 
2090 	*mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
2091 
2092 	return 0;
2093 }
2094 
2095 int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
2096 			    u32 *subtype, u16 version[4], char *desc,
2097 			    size_t descsize)
2098 {
2099 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
2100 	efx_dword_t *outbuf;
2101 	size_t outlen;
2102 	u32 flags;
2103 	int rc;
2104 
2105 	outbuf = kzalloc(MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2, GFP_KERNEL);
2106 	if (!outbuf)
2107 		return -ENOMEM;
2108 
2109 	MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
2110 
2111 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_NVRAM_METADATA, inbuf,
2112 				sizeof(inbuf), outbuf,
2113 				MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2,
2114 				&outlen);
2115 	if (rc)
2116 		goto out_free;
2117 	if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
2118 		rc = -EIO;
2119 		goto out_free;
2120 	}
2121 
2122 	flags = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS);
2123 
2124 	if (desc && descsize > 0) {
2125 		if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN)) {
2126 			if (descsize <=
2127 			    MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)) {
2128 				rc = -E2BIG;
2129 				goto out_free;
2130 			}
2131 
2132 			strscpy(desc,
2133 				MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION),
2134 				MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen));
2135 		} else {
2136 			desc[0] = '\0';
2137 		}
2138 	}
2139 
2140 	if (subtype) {
2141 		if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
2142 			*subtype = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_SUBTYPE);
2143 		else
2144 			*subtype = 0;
2145 	}
2146 
2147 	if (version) {
2148 		if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN)) {
2149 			version[0] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_W);
2150 			version[1] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_X);
2151 			version[2] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Y);
2152 			version[3] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Z);
2153 		} else {
2154 			version[0] = 0;
2155 			version[1] = 0;
2156 			version[2] = 0;
2157 			version[3] = 0;
2158 		}
2159 	}
2160 
2161 out_free:
2162 	kfree(outbuf);
2163 	return rc;
2164 }
2165 
2166 #ifdef CONFIG_SFC_MTD
2167 
2168 #define EFX_MCDI_NVRAM_LEN_MAX 128
2169 
2170 static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
2171 {
2172 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
2173 	int rc;
2174 
2175 	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
2176 	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
2177 			      NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
2178 			      1);
2179 
2180 	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
2181 
2182 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
2183 			  NULL, 0, NULL);
2184 
2185 	return rc;
2186 }
2187 
2188 static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
2189 			       loff_t offset, u8 *buffer, size_t length)
2190 {
2191 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
2192 	MCDI_DECLARE_BUF(outbuf,
2193 			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
2194 	size_t outlen;
2195 	int rc;
2196 
2197 	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
2198 	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
2199 	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
2200 	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
2201 		       MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
2202 
2203 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
2204 			  outbuf, sizeof(outbuf), &outlen);
2205 	if (rc)
2206 		return rc;
2207 
2208 	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
2209 	return 0;
2210 }
2211 
2212 static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
2213 				loff_t offset, const u8 *buffer, size_t length)
2214 {
2215 	MCDI_DECLARE_BUF(inbuf,
2216 			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
2217 	int rc;
2218 
2219 	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
2220 	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
2221 	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
2222 	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
2223 
2224 	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
2225 
2226 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
2227 			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
2228 			  NULL, 0, NULL);
2229 	return rc;
2230 }
2231 
2232 static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
2233 				loff_t offset, size_t length)
2234 {
2235 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
2236 	int rc;
2237 
2238 	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
2239 	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
2240 	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
2241 
2242 	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
2243 
2244 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
2245 			  NULL, 0, NULL);
2246 	return rc;
2247 }
2248 
2249 static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
2250 {
2251 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
2252 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
2253 	size_t outlen;
2254 	int rc, rc2;
2255 
2256 	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
2257 	/* Always set this flag. Old firmware ignores it */
2258 	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
2259 			      NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
2260 			      1);
2261 
2262 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
2263 			  outbuf, sizeof(outbuf), &outlen);
2264 	if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
2265 		rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
2266 		if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
2267 			netif_err(efx, drv, efx->net_dev,
2268 				  "NVRAM update failed verification with code 0x%x\n",
2269 				  rc2);
2270 		switch (rc2) {
2271 		case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
2272 			break;
2273 		case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
2274 		case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
2275 		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
2276 		case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
2277 		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
2278 			rc = -EIO;
2279 			break;
2280 		case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
2281 		case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
2282 			rc = -EINVAL;
2283 			break;
2284 		case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
2285 		case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
2286 		case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
2287 			rc = -EPERM;
2288 			break;
2289 		default:
2290 			netif_err(efx, drv, efx->net_dev,
2291 				  "Unknown response to NVRAM_UPDATE_FINISH\n");
2292 			rc = -EIO;
2293 		}
2294 	}
2295 
2296 	return rc;
2297 }
2298 
2299 int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
2300 		      size_t len, size_t *retlen, u8 *buffer)
2301 {
2302 	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2303 	struct efx_nic *efx = mtd->priv;
2304 	loff_t offset = start;
2305 	loff_t end = min_t(loff_t, start + len, mtd->size);
2306 	size_t chunk;
2307 	int rc = 0;
2308 
2309 	while (offset < end) {
2310 		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
2311 		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
2312 					 buffer, chunk);
2313 		if (rc)
2314 			goto out;
2315 		offset += chunk;
2316 		buffer += chunk;
2317 	}
2318 out:
2319 	*retlen = offset - start;
2320 	return rc;
2321 }
2322 
2323 int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
2324 {
2325 	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2326 	struct efx_nic *efx = mtd->priv;
2327 	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
2328 	loff_t end = min_t(loff_t, start + len, mtd->size);
2329 	size_t chunk = part->common.mtd.erasesize;
2330 	int rc = 0;
2331 
2332 	if (!part->updating) {
2333 		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
2334 		if (rc)
2335 			goto out;
2336 		part->updating = true;
2337 	}
2338 
2339 	/* The MCDI interface can in fact do multiple erase blocks at once;
2340 	 * but erasing may be slow, so we make multiple calls here to avoid
2341 	 * tripping the MCDI RPC timeout. */
2342 	while (offset < end) {
2343 		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
2344 					  chunk);
2345 		if (rc)
2346 			goto out;
2347 		offset += chunk;
2348 	}
2349 out:
2350 	return rc;
2351 }
2352 
2353 int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
2354 		       size_t len, size_t *retlen, const u8 *buffer)
2355 {
2356 	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2357 	struct efx_nic *efx = mtd->priv;
2358 	loff_t offset = start;
2359 	loff_t end = min_t(loff_t, start + len, mtd->size);
2360 	size_t chunk;
2361 	int rc = 0;
2362 
2363 	if (!part->updating) {
2364 		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
2365 		if (rc)
2366 			goto out;
2367 		part->updating = true;
2368 	}
2369 
2370 	while (offset < end) {
2371 		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
2372 		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
2373 					  buffer, chunk);
2374 		if (rc)
2375 			goto out;
2376 		offset += chunk;
2377 		buffer += chunk;
2378 	}
2379 out:
2380 	*retlen = offset - start;
2381 	return rc;
2382 }
2383 
2384 int efx_mcdi_mtd_sync(struct mtd_info *mtd)
2385 {
2386 	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2387 	struct efx_nic *efx = mtd->priv;
2388 	int rc = 0;
2389 
2390 	if (part->updating) {
2391 		part->updating = false;
2392 		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
2393 	}
2394 
2395 	return rc;
2396 }
2397 
2398 void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
2399 {
2400 	struct efx_mcdi_mtd_partition *mcdi_part =
2401 		container_of(part, struct efx_mcdi_mtd_partition, common);
2402 	struct efx_nic *efx = part->mtd.priv;
2403 
2404 	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
2405 		 efx->name, part->type_name, mcdi_part->fw_subtype);
2406 }
2407 
2408 #endif /* CONFIG_SFC_MTD */
2409