xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision 6871d4882591c9a8fcab24d084c93f0a2972e1af)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_inet.h"
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 
37 #include "common.h"
38 #include "t4_regs.h"
39 #include "t4_regs_values.h"
40 #include "firmware/t4fw_interface.h"
41 
42 #undef msleep
43 #define msleep(x) do { \
44 	if (cold) \
45 		DELAY((x) * 1000); \
46 	else \
47 		pause("t4hw", (x) * hz / 1000); \
48 } while (0)
49 
50 /**
51  *	t4_wait_op_done_val - wait until an operation is completed
52  *	@adapter: the adapter performing the operation
53  *	@reg: the register to check for completion
54  *	@mask: a single-bit field within @reg that indicates completion
55  *	@polarity: the value of the field when the operation is completed
56  *	@attempts: number of check iterations
57  *	@delay: delay in usecs between iterations
58  *	@valp: where to store the value of the register at completion time
59  *
60  *	Wait until an operation is completed by checking a bit in a register
61  *	up to @attempts times.  If @valp is not NULL the value of the register
62  *	at the time it indicated completion is stored there.  Returns 0 if the
63  *	operation completes and	-EAGAIN	otherwise.
64  */
65 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
66 			       int polarity, int attempts, int delay, u32 *valp)
67 {
68 	while (1) {
69 		u32 val = t4_read_reg(adapter, reg);
70 
71 		if (!!(val & mask) == polarity) {
72 			if (valp)
73 				*valp = val;
74 			return 0;
75 		}
76 		if (--attempts == 0)
77 			return -EAGAIN;
78 		if (delay)
79 			udelay(delay);
80 	}
81 }
82 
83 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
84 				  int polarity, int attempts, int delay)
85 {
86 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
87 				   delay, NULL);
88 }
89 
90 /**
91  *	t4_set_reg_field - set a register field to a value
92  *	@adapter: the adapter to program
93  *	@addr: the register address
94  *	@mask: specifies the portion of the register to modify
95  *	@val: the new value for the register field
96  *
97  *	Sets a register field specified by the supplied mask to the
98  *	given value.
99  */
100 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
101 		      u32 val)
102 {
103 	u32 v = t4_read_reg(adapter, addr) & ~mask;
104 
105 	t4_write_reg(adapter, addr, v | val);
106 	(void) t4_read_reg(adapter, addr);      /* flush */
107 }
108 
109 /**
110  *	t4_read_indirect - read indirectly addressed registers
111  *	@adap: the adapter
112  *	@addr_reg: register holding the indirect address
113  *	@data_reg: register holding the value of the indirect register
114  *	@vals: where the read register values are stored
115  *	@nregs: how many indirect registers to read
116  *	@start_idx: index of first indirect register to read
117  *
118  *	Reads registers that are accessed indirectly through an address/data
119  *	register pair.
120  */
121 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
122 			     unsigned int data_reg, u32 *vals,
123 			     unsigned int nregs, unsigned int start_idx)
124 {
125 	while (nregs--) {
126 		t4_write_reg(adap, addr_reg, start_idx);
127 		*vals++ = t4_read_reg(adap, data_reg);
128 		start_idx++;
129 	}
130 }
131 
132 /**
133  *	t4_write_indirect - write indirectly addressed registers
134  *	@adap: the adapter
135  *	@addr_reg: register holding the indirect addresses
136  *	@data_reg: register holding the value for the indirect registers
137  *	@vals: values to write
138  *	@nregs: how many indirect registers to write
139  *	@start_idx: address of first indirect register to write
140  *
141  *	Writes a sequential block of registers that are accessed indirectly
142  *	through an address/data register pair.
143  */
144 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
145 		       unsigned int data_reg, const u32 *vals,
146 		       unsigned int nregs, unsigned int start_idx)
147 {
148 	while (nregs--) {
149 		t4_write_reg(adap, addr_reg, start_idx++);
150 		t4_write_reg(adap, data_reg, *vals++);
151 	}
152 }
153 
154 /*
155  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
156  * mechanism.  This guarantees that we get the real value even if we're
157  * operating within a Virtual Machine and the Hypervisor is trapping our
158  * Configuration Space accesses.
159  *
160  * N.B. This routine should only be used as a last resort: the firmware uses
161  *      the backdoor registers on a regular basis and we can end up
162  *      conflicting with it's uses!
163  */
164 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
165 {
166 	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
167 	u32 val;
168 
169 	if (chip_id(adap) <= CHELSIO_T5)
170 		req |= F_ENABLE;
171 	else
172 		req |= F_T6_ENABLE;
173 
174 	if (is_t4(adap))
175 		req |= F_LOCALCFG;
176 
177 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
178 	val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
179 
180 	/*
181 	 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
182 	 * Configuration Space read.  (None of the other fields matter when
183 	 * F_ENABLE is 0 so a simple register write is easier than a
184 	 * read-modify-write via t4_set_reg_field().)
185 	 */
186 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
187 
188 	return val;
189 }
190 
191 /*
192  * t4_report_fw_error - report firmware error
193  * @adap: the adapter
194  *
195  * The adapter firmware can indicate error conditions to the host.
196  * If the firmware has indicated an error, print out the reason for
197  * the firmware error.
198  */
199 static void t4_report_fw_error(struct adapter *adap)
200 {
201 	static const char *const reason[] = {
202 		"Crash",			/* PCIE_FW_EVAL_CRASH */
203 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
204 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
205 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
206 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
207 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
208 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
209 		"Reserved",			/* reserved */
210 	};
211 	u32 pcie_fw;
212 
213 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
214 	if (pcie_fw & F_PCIE_FW_ERR) {
215 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
216 			reason[G_PCIE_FW_EVAL(pcie_fw)]);
217 		adap->flags &= ~FW_OK;
218 	}
219 }
220 
221 /*
222  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
223  */
224 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
225 			 u32 mbox_addr)
226 {
227 	for ( ; nflit; nflit--, mbox_addr += 8)
228 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
229 }
230 
231 /*
232  * Handle a FW assertion reported in a mailbox.
233  */
234 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
235 {
236 	CH_ALERT(adap,
237 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
238 		  asrt->u.assert.filename_0_7,
239 		  be32_to_cpu(asrt->u.assert.line),
240 		  be32_to_cpu(asrt->u.assert.x),
241 		  be32_to_cpu(asrt->u.assert.y));
242 }
243 
244 struct port_tx_state {
245 	uint64_t rx_pause;
246 	uint64_t tx_frames;
247 };
248 
249 static void
250 read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
251 {
252 	uint32_t rx_pause_reg, tx_frames_reg;
253 
254 	if (is_t4(sc)) {
255 		tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
256 		rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
257 	} else {
258 		tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
259 		rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
260 	}
261 
262 	tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
263 	tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
264 }
265 
266 static void
267 read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
268 {
269 	int i;
270 
271 	for_each_port(sc, i)
272 		read_tx_state_one(sc, i, &tx_state[i]);
273 }
274 
275 static void
276 check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
277 {
278 	uint32_t port_ctl_reg;
279 	uint64_t tx_frames, rx_pause;
280 	int i;
281 
282 	for_each_port(sc, i) {
283 		rx_pause = tx_state[i].rx_pause;
284 		tx_frames = tx_state[i].tx_frames;
285 		read_tx_state_one(sc, i, &tx_state[i]);	/* update */
286 
287 		if (is_t4(sc))
288 			port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL);
289 		else
290 			port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL);
291 		if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
292 		    rx_pause != tx_state[i].rx_pause &&
293 		    tx_frames == tx_state[i].tx_frames) {
294 			t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
295 			mdelay(1);
296 			t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
297 		}
298 	}
299 }
300 
301 #define X_CIM_PF_NOACCESS 0xeeeeeeee
302 /**
303  *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
304  *	@adap: the adapter
305  *	@mbox: index of the mailbox to use
306  *	@cmd: the command to write
307  *	@size: command length in bytes
308  *	@rpl: where to optionally store the reply
309  *	@sleep_ok: if true we may sleep while awaiting command completion
310  *	@timeout: time to wait for command to finish before timing out
311  *		(negative implies @sleep_ok=false)
312  *
313  *	Sends the given command to FW through the selected mailbox and waits
314  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
315  *	store the FW's reply to the command.  The command and its optional
316  *	reply are of the same length.  Some FW commands like RESET and
317  *	INITIALIZE can take a considerable amount of time to execute.
318  *	@sleep_ok determines whether we may sleep while awaiting the response.
319  *	If sleeping is allowed we use progressive backoff otherwise we spin.
320  *	Note that passing in a negative @timeout is an alternate mechanism
321  *	for specifying @sleep_ok=false.  This is useful when a higher level
322  *	interface allows for specification of @timeout but not @sleep_ok ...
323  *
324  *	The return value is 0 on success or a negative errno on failure.  A
325  *	failure can happen either because we are not able to execute the
326  *	command or FW executes it but signals an error.  In the latter case
327  *	the return value is the error code indicated by FW (negated).
328  */
329 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
330 			    int size, void *rpl, bool sleep_ok, int timeout)
331 {
332 	/*
333 	 * We delay in small increments at first in an effort to maintain
334 	 * responsiveness for simple, fast executing commands but then back
335 	 * off to larger delays to a maximum retry delay.
336 	 */
337 	static const int delay[] = {
338 		1, 1, 3, 5, 10, 10, 20, 50, 100
339 	};
340 	u32 v;
341 	u64 res;
342 	int i, ms, delay_idx, ret, next_tx_check;
343 	const __be64 *p = cmd;
344 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
345 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
346 	u32 ctl;
347 	__be64 cmd_rpl[MBOX_LEN/8];
348 	u32 pcie_fw;
349 	struct port_tx_state tx_state[MAX_NPORTS];
350 
351 	if (adap->flags & CHK_MBOX_ACCESS)
352 		ASSERT_SYNCHRONIZED_OP(adap);
353 
354 	if ((size & 15) || size > MBOX_LEN)
355 		return -EINVAL;
356 
357 	if (adap->flags & IS_VF) {
358 		if (is_t6(adap))
359 			data_reg = FW_T6VF_MBDATA_BASE_ADDR;
360 		else
361 			data_reg = FW_T4VF_MBDATA_BASE_ADDR;
362 		ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
363 	}
364 
365 	/*
366 	 * If we have a negative timeout, that implies that we can't sleep.
367 	 */
368 	if (timeout < 0) {
369 		sleep_ok = false;
370 		timeout = -timeout;
371 	}
372 
373 	/*
374 	 * Attempt to gain access to the mailbox.
375 	 */
376 	for (i = 0; i < 4; i++) {
377 		ctl = t4_read_reg(adap, ctl_reg);
378 		v = G_MBOWNER(ctl);
379 		if (v != X_MBOWNER_NONE)
380 			break;
381 	}
382 
383 	/*
384 	 * If we were unable to gain access, dequeue ourselves from the
385 	 * mailbox atomic access list and report the error to our caller.
386 	 */
387 	if (v != X_MBOWNER_PL) {
388 		t4_report_fw_error(adap);
389 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
390 		return ret;
391 	}
392 
393 	/*
394 	 * If we gain ownership of the mailbox and there's a "valid" message
395 	 * in it, this is likely an asynchronous error message from the
396 	 * firmware.  So we'll report that and then proceed on with attempting
397 	 * to issue our own command ... which may well fail if the error
398 	 * presaged the firmware crashing ...
399 	 */
400 	if (ctl & F_MBMSGVALID) {
401 		CH_ERR(adap, "found VALID command in mbox %u: %016llx %016llx "
402 		       "%016llx %016llx %016llx %016llx %016llx %016llx\n",
403 		       mbox, (unsigned long long)t4_read_reg64(adap, data_reg),
404 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
405 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
406 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
407 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
408 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
409 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
410 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
411 	}
412 
413 	/*
414 	 * Copy in the new mailbox command and send it on its way ...
415 	 */
416 	for (i = 0; i < size; i += 8, p++)
417 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
418 
419 	if (adap->flags & IS_VF) {
420 		/*
421 		 * For the VFs, the Mailbox Data "registers" are
422 		 * actually backed by T4's "MA" interface rather than
423 		 * PL Registers (as is the case for the PFs).  Because
424 		 * these are in different coherency domains, the write
425 		 * to the VF's PL-register-backed Mailbox Control can
426 		 * race in front of the writes to the MA-backed VF
427 		 * Mailbox Data "registers".  So we need to do a
428 		 * read-back on at least one byte of the VF Mailbox
429 		 * Data registers before doing the write to the VF
430 		 * Mailbox Control register.
431 		 */
432 		t4_read_reg(adap, data_reg);
433 	}
434 
435 	CH_DUMP_MBOX(adap, mbox, data_reg);
436 
437 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
438 	read_tx_state(adap, &tx_state[0]);	/* also flushes the write_reg */
439 	next_tx_check = 1000;
440 	delay_idx = 0;
441 	ms = delay[0];
442 
443 	/*
444 	 * Loop waiting for the reply; bail out if we time out or the firmware
445 	 * reports an error.
446 	 */
447 	pcie_fw = 0;
448 	for (i = 0; i < timeout; i += ms) {
449 		if (!(adap->flags & IS_VF)) {
450 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
451 			if (pcie_fw & F_PCIE_FW_ERR)
452 				break;
453 		}
454 
455 		if (i >= next_tx_check) {
456 			check_tx_state(adap, &tx_state[0]);
457 			next_tx_check = i + 1000;
458 		}
459 
460 		if (sleep_ok) {
461 			ms = delay[delay_idx];  /* last element may repeat */
462 			if (delay_idx < ARRAY_SIZE(delay) - 1)
463 				delay_idx++;
464 			msleep(ms);
465 		} else {
466 			mdelay(ms);
467 		}
468 
469 		v = t4_read_reg(adap, ctl_reg);
470 		if (v == X_CIM_PF_NOACCESS)
471 			continue;
472 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
473 			if (!(v & F_MBMSGVALID)) {
474 				t4_write_reg(adap, ctl_reg,
475 					     V_MBOWNER(X_MBOWNER_NONE));
476 				continue;
477 			}
478 
479 			/*
480 			 * Retrieve the command reply and release the mailbox.
481 			 */
482 			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
483 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
484 
485 			CH_DUMP_MBOX(adap, mbox, data_reg);
486 
487 			res = be64_to_cpu(cmd_rpl[0]);
488 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
489 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
490 				res = V_FW_CMD_RETVAL(EIO);
491 			} else if (rpl)
492 				memcpy(rpl, cmd_rpl, size);
493 			return -G_FW_CMD_RETVAL((int)res);
494 		}
495 	}
496 
497 	/*
498 	 * We timed out waiting for a reply to our mailbox command.  Report
499 	 * the error and also check to see if the firmware reported any
500 	 * errors ...
501 	 */
502 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
503 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
504 	       *(const u8 *)cmd, mbox);
505 
506 	/* If DUMP_MBOX is set the mbox has already been dumped */
507 	if ((adap->debug_flags & DF_DUMP_MBOX) == 0) {
508 		p = cmd;
509 		CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx "
510 		    "%016llx %016llx %016llx %016llx\n",
511 		    (unsigned long long)be64_to_cpu(p[0]),
512 		    (unsigned long long)be64_to_cpu(p[1]),
513 		    (unsigned long long)be64_to_cpu(p[2]),
514 		    (unsigned long long)be64_to_cpu(p[3]),
515 		    (unsigned long long)be64_to_cpu(p[4]),
516 		    (unsigned long long)be64_to_cpu(p[5]),
517 		    (unsigned long long)be64_to_cpu(p[6]),
518 		    (unsigned long long)be64_to_cpu(p[7]));
519 	}
520 
521 	t4_report_fw_error(adap);
522 	t4_fatal_err(adap);
523 	return ret;
524 }
525 
526 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
527 		    void *rpl, bool sleep_ok)
528 {
529 		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
530 					       sleep_ok, FW_CMD_MAX_TIMEOUT);
531 
532 }
533 
534 static int t4_edc_err_read(struct adapter *adap, int idx)
535 {
536 	u32 edc_ecc_err_addr_reg;
537 	u32 edc_bist_status_rdata_reg;
538 
539 	if (is_t4(adap)) {
540 		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
541 		return 0;
542 	}
543 	if (idx != MEM_EDC0 && idx != MEM_EDC1) {
544 		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
545 		return 0;
546 	}
547 
548 	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
549 	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
550 
551 	CH_WARN(adap,
552 		"edc%d err addr 0x%x: 0x%x.\n",
553 		idx, edc_ecc_err_addr_reg,
554 		t4_read_reg(adap, edc_ecc_err_addr_reg));
555 	CH_WARN(adap,
556 	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
557 		edc_bist_status_rdata_reg,
558 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
559 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
560 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
561 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
562 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
563 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
564 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
565 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
566 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
567 
568 	return 0;
569 }
570 
571 /**
572  *	t4_mc_read - read from MC through backdoor accesses
573  *	@adap: the adapter
574  *	@idx: which MC to access
575  *	@addr: address of first byte requested
576  *	@data: 64 bytes of data containing the requested address
577  *	@ecc: where to store the corresponding 64-bit ECC word
578  *
579  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
580  *	that covers the requested address @addr.  If @parity is not %NULL it
581  *	is assigned the 64-bit ECC word for the read data.
582  */
583 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
584 {
585 	int i;
586 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
587 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
588 
589 	if (is_t4(adap)) {
590 		mc_bist_cmd_reg = A_MC_BIST_CMD;
591 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
592 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
593 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
594 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
595 	} else {
596 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
597 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
598 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
599 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
600 						  idx);
601 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
602 						  idx);
603 	}
604 
605 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
606 		return -EBUSY;
607 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
608 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
609 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
610 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
611 		     F_START_BIST | V_BIST_CMD_GAP(1));
612 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
613 	if (i)
614 		return i;
615 
616 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
617 
618 	for (i = 15; i >= 0; i--)
619 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
620 	if (ecc)
621 		*ecc = t4_read_reg64(adap, MC_DATA(16));
622 #undef MC_DATA
623 	return 0;
624 }
625 
626 /**
627  *	t4_edc_read - read from EDC through backdoor accesses
628  *	@adap: the adapter
629  *	@idx: which EDC to access
630  *	@addr: address of first byte requested
631  *	@data: 64 bytes of data containing the requested address
632  *	@ecc: where to store the corresponding 64-bit ECC word
633  *
634  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
635  *	that covers the requested address @addr.  If @parity is not %NULL it
636  *	is assigned the 64-bit ECC word for the read data.
637  */
638 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
639 {
640 	int i;
641 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
642 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
643 
644 	if (is_t4(adap)) {
645 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
646 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
647 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
648 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
649 						    idx);
650 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
651 						    idx);
652 	} else {
653 /*
654  * These macro are missing in t4_regs.h file.
655  * Added temporarily for testing.
656  */
657 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
658 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
659 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
660 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
661 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
662 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
663 						    idx);
664 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
665 						    idx);
666 #undef EDC_REG_T5
667 #undef EDC_STRIDE_T5
668 	}
669 
670 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
671 		return -EBUSY;
672 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
673 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
674 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
675 	t4_write_reg(adap, edc_bist_cmd_reg,
676 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
677 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
678 	if (i)
679 		return i;
680 
681 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
682 
683 	for (i = 15; i >= 0; i--)
684 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
685 	if (ecc)
686 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
687 #undef EDC_DATA
688 	return 0;
689 }
690 
691 /**
692  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
693  *	@adap: the adapter
694  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
695  *	@addr: address within indicated memory type
696  *	@len: amount of memory to read
697  *	@buf: host memory buffer
698  *
699  *	Reads an [almost] arbitrary memory region in the firmware: the
700  *	firmware memory address, length and host buffer must be aligned on
701  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
702  *	the firmware's memory.  If this memory contains data structures which
703  *	contain multi-byte integers, it's the callers responsibility to
704  *	perform appropriate byte order conversions.
705  */
706 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
707 		__be32 *buf)
708 {
709 	u32 pos, start, end, offset;
710 	int ret;
711 
712 	/*
713 	 * Argument sanity checks ...
714 	 */
715 	if ((addr & 0x3) || (len & 0x3))
716 		return -EINVAL;
717 
718 	/*
719 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
720 	 * need to round down the start and round up the end.  We'll start
721 	 * copying out of the first line at (addr - start) a word at a time.
722 	 */
723 	start = rounddown2(addr, 64);
724 	end = roundup2(addr + len, 64);
725 	offset = (addr - start)/sizeof(__be32);
726 
727 	for (pos = start; pos < end; pos += 64, offset = 0) {
728 		__be32 data[16];
729 
730 		/*
731 		 * Read the chip's memory block and bail if there's an error.
732 		 */
733 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
734 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
735 		else
736 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
737 		if (ret)
738 			return ret;
739 
740 		/*
741 		 * Copy the data into the caller's memory buffer.
742 		 */
743 		while (offset < 16 && len > 0) {
744 			*buf++ = data[offset++];
745 			len -= sizeof(__be32);
746 		}
747 	}
748 
749 	return 0;
750 }
751 
752 /*
753  * Return the specified PCI-E Configuration Space register from our Physical
754  * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
755  * since we prefer to let the firmware own all of these registers, but if that
756  * fails we go for it directly ourselves.
757  */
758 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
759 {
760 
761 	/*
762 	 * If fw_attach != 0, construct and send the Firmware LDST Command to
763 	 * retrieve the specified PCI-E Configuration Space register.
764 	 */
765 	if (drv_fw_attach != 0) {
766 		struct fw_ldst_cmd ldst_cmd;
767 		int ret;
768 
769 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
770 		ldst_cmd.op_to_addrspace =
771 			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
772 				    F_FW_CMD_REQUEST |
773 				    F_FW_CMD_READ |
774 				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
775 		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
776 		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
777 		ldst_cmd.u.pcie.ctrl_to_fn =
778 			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
779 		ldst_cmd.u.pcie.r = reg;
780 
781 		/*
782 		 * If the LDST Command succeeds, return the result, otherwise
783 		 * fall through to reading it directly ourselves ...
784 		 */
785 		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
786 				 &ldst_cmd);
787 		if (ret == 0)
788 			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
789 
790 		CH_WARN(adap, "Firmware failed to return "
791 			"Configuration Space register %d, err = %d\n",
792 			reg, -ret);
793 	}
794 
795 	/*
796 	 * Read the desired Configuration Space register via the PCI-E
797 	 * Backdoor mechanism.
798 	 */
799 	return t4_hw_pci_read_cfg4(adap, reg);
800 }
801 
802 /**
803  *	t4_get_regs_len - return the size of the chips register set
804  *	@adapter: the adapter
805  *
806  *	Returns the size of the chip's BAR0 register space.
807  */
808 unsigned int t4_get_regs_len(struct adapter *adapter)
809 {
810 	unsigned int chip_version = chip_id(adapter);
811 
812 	switch (chip_version) {
813 	case CHELSIO_T4:
814 		if (adapter->flags & IS_VF)
815 			return FW_T4VF_REGMAP_SIZE;
816 		return T4_REGMAP_SIZE;
817 
818 	case CHELSIO_T5:
819 	case CHELSIO_T6:
820 		if (adapter->flags & IS_VF)
821 			return FW_T4VF_REGMAP_SIZE;
822 		return T5_REGMAP_SIZE;
823 	}
824 
825 	CH_ERR(adapter,
826 		"Unsupported chip version %d\n", chip_version);
827 	return 0;
828 }
829 
830 /**
831  *	t4_get_regs - read chip registers into provided buffer
832  *	@adap: the adapter
833  *	@buf: register buffer
834  *	@buf_size: size (in bytes) of register buffer
835  *
836  *	If the provided register buffer isn't large enough for the chip's
837  *	full register range, the register dump will be truncated to the
838  *	register buffer's size.
839  */
840 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
841 {
842 	static const unsigned int t4_reg_ranges[] = {
843 		0x1008, 0x1108,
844 		0x1180, 0x1184,
845 		0x1190, 0x1194,
846 		0x11a0, 0x11a4,
847 		0x11b0, 0x11b4,
848 		0x11fc, 0x123c,
849 		0x1300, 0x173c,
850 		0x1800, 0x18fc,
851 		0x3000, 0x30d8,
852 		0x30e0, 0x30e4,
853 		0x30ec, 0x5910,
854 		0x5920, 0x5924,
855 		0x5960, 0x5960,
856 		0x5968, 0x5968,
857 		0x5970, 0x5970,
858 		0x5978, 0x5978,
859 		0x5980, 0x5980,
860 		0x5988, 0x5988,
861 		0x5990, 0x5990,
862 		0x5998, 0x5998,
863 		0x59a0, 0x59d4,
864 		0x5a00, 0x5ae0,
865 		0x5ae8, 0x5ae8,
866 		0x5af0, 0x5af0,
867 		0x5af8, 0x5af8,
868 		0x6000, 0x6098,
869 		0x6100, 0x6150,
870 		0x6200, 0x6208,
871 		0x6240, 0x6248,
872 		0x6280, 0x62b0,
873 		0x62c0, 0x6338,
874 		0x6370, 0x638c,
875 		0x6400, 0x643c,
876 		0x6500, 0x6524,
877 		0x6a00, 0x6a04,
878 		0x6a14, 0x6a38,
879 		0x6a60, 0x6a70,
880 		0x6a78, 0x6a78,
881 		0x6b00, 0x6b0c,
882 		0x6b1c, 0x6b84,
883 		0x6bf0, 0x6bf8,
884 		0x6c00, 0x6c0c,
885 		0x6c1c, 0x6c84,
886 		0x6cf0, 0x6cf8,
887 		0x6d00, 0x6d0c,
888 		0x6d1c, 0x6d84,
889 		0x6df0, 0x6df8,
890 		0x6e00, 0x6e0c,
891 		0x6e1c, 0x6e84,
892 		0x6ef0, 0x6ef8,
893 		0x6f00, 0x6f0c,
894 		0x6f1c, 0x6f84,
895 		0x6ff0, 0x6ff8,
896 		0x7000, 0x700c,
897 		0x701c, 0x7084,
898 		0x70f0, 0x70f8,
899 		0x7100, 0x710c,
900 		0x711c, 0x7184,
901 		0x71f0, 0x71f8,
902 		0x7200, 0x720c,
903 		0x721c, 0x7284,
904 		0x72f0, 0x72f8,
905 		0x7300, 0x730c,
906 		0x731c, 0x7384,
907 		0x73f0, 0x73f8,
908 		0x7400, 0x7450,
909 		0x7500, 0x7530,
910 		0x7600, 0x760c,
911 		0x7614, 0x761c,
912 		0x7680, 0x76cc,
913 		0x7700, 0x7798,
914 		0x77c0, 0x77fc,
915 		0x7900, 0x79fc,
916 		0x7b00, 0x7b58,
917 		0x7b60, 0x7b84,
918 		0x7b8c, 0x7c38,
919 		0x7d00, 0x7d38,
920 		0x7d40, 0x7d80,
921 		0x7d8c, 0x7ddc,
922 		0x7de4, 0x7e04,
923 		0x7e10, 0x7e1c,
924 		0x7e24, 0x7e38,
925 		0x7e40, 0x7e44,
926 		0x7e4c, 0x7e78,
927 		0x7e80, 0x7ea4,
928 		0x7eac, 0x7edc,
929 		0x7ee8, 0x7efc,
930 		0x8dc0, 0x8e04,
931 		0x8e10, 0x8e1c,
932 		0x8e30, 0x8e78,
933 		0x8ea0, 0x8eb8,
934 		0x8ec0, 0x8f6c,
935 		0x8fc0, 0x9008,
936 		0x9010, 0x9058,
937 		0x9060, 0x9060,
938 		0x9068, 0x9074,
939 		0x90fc, 0x90fc,
940 		0x9400, 0x9408,
941 		0x9410, 0x9458,
942 		0x9600, 0x9600,
943 		0x9608, 0x9638,
944 		0x9640, 0x96bc,
945 		0x9800, 0x9808,
946 		0x9820, 0x983c,
947 		0x9850, 0x9864,
948 		0x9c00, 0x9c6c,
949 		0x9c80, 0x9cec,
950 		0x9d00, 0x9d6c,
951 		0x9d80, 0x9dec,
952 		0x9e00, 0x9e6c,
953 		0x9e80, 0x9eec,
954 		0x9f00, 0x9f6c,
955 		0x9f80, 0x9fec,
956 		0xd004, 0xd004,
957 		0xd010, 0xd03c,
958 		0xdfc0, 0xdfe0,
959 		0xe000, 0xea7c,
960 		0xf000, 0x11110,
961 		0x11118, 0x11190,
962 		0x19040, 0x1906c,
963 		0x19078, 0x19080,
964 		0x1908c, 0x190e4,
965 		0x190f0, 0x190f8,
966 		0x19100, 0x19110,
967 		0x19120, 0x19124,
968 		0x19150, 0x19194,
969 		0x1919c, 0x191b0,
970 		0x191d0, 0x191e8,
971 		0x19238, 0x1924c,
972 		0x193f8, 0x1943c,
973 		0x1944c, 0x19474,
974 		0x19490, 0x194e0,
975 		0x194f0, 0x194f8,
976 		0x19800, 0x19c08,
977 		0x19c10, 0x19c90,
978 		0x19ca0, 0x19ce4,
979 		0x19cf0, 0x19d40,
980 		0x19d50, 0x19d94,
981 		0x19da0, 0x19de8,
982 		0x19df0, 0x19e40,
983 		0x19e50, 0x19e90,
984 		0x19ea0, 0x19f4c,
985 		0x1a000, 0x1a004,
986 		0x1a010, 0x1a06c,
987 		0x1a0b0, 0x1a0e4,
988 		0x1a0ec, 0x1a0f4,
989 		0x1a100, 0x1a108,
990 		0x1a114, 0x1a120,
991 		0x1a128, 0x1a130,
992 		0x1a138, 0x1a138,
993 		0x1a190, 0x1a1c4,
994 		0x1a1fc, 0x1a1fc,
995 		0x1e040, 0x1e04c,
996 		0x1e284, 0x1e28c,
997 		0x1e2c0, 0x1e2c0,
998 		0x1e2e0, 0x1e2e0,
999 		0x1e300, 0x1e384,
1000 		0x1e3c0, 0x1e3c8,
1001 		0x1e440, 0x1e44c,
1002 		0x1e684, 0x1e68c,
1003 		0x1e6c0, 0x1e6c0,
1004 		0x1e6e0, 0x1e6e0,
1005 		0x1e700, 0x1e784,
1006 		0x1e7c0, 0x1e7c8,
1007 		0x1e840, 0x1e84c,
1008 		0x1ea84, 0x1ea8c,
1009 		0x1eac0, 0x1eac0,
1010 		0x1eae0, 0x1eae0,
1011 		0x1eb00, 0x1eb84,
1012 		0x1ebc0, 0x1ebc8,
1013 		0x1ec40, 0x1ec4c,
1014 		0x1ee84, 0x1ee8c,
1015 		0x1eec0, 0x1eec0,
1016 		0x1eee0, 0x1eee0,
1017 		0x1ef00, 0x1ef84,
1018 		0x1efc0, 0x1efc8,
1019 		0x1f040, 0x1f04c,
1020 		0x1f284, 0x1f28c,
1021 		0x1f2c0, 0x1f2c0,
1022 		0x1f2e0, 0x1f2e0,
1023 		0x1f300, 0x1f384,
1024 		0x1f3c0, 0x1f3c8,
1025 		0x1f440, 0x1f44c,
1026 		0x1f684, 0x1f68c,
1027 		0x1f6c0, 0x1f6c0,
1028 		0x1f6e0, 0x1f6e0,
1029 		0x1f700, 0x1f784,
1030 		0x1f7c0, 0x1f7c8,
1031 		0x1f840, 0x1f84c,
1032 		0x1fa84, 0x1fa8c,
1033 		0x1fac0, 0x1fac0,
1034 		0x1fae0, 0x1fae0,
1035 		0x1fb00, 0x1fb84,
1036 		0x1fbc0, 0x1fbc8,
1037 		0x1fc40, 0x1fc4c,
1038 		0x1fe84, 0x1fe8c,
1039 		0x1fec0, 0x1fec0,
1040 		0x1fee0, 0x1fee0,
1041 		0x1ff00, 0x1ff84,
1042 		0x1ffc0, 0x1ffc8,
1043 		0x20000, 0x2002c,
1044 		0x20100, 0x2013c,
1045 		0x20190, 0x201a0,
1046 		0x201a8, 0x201b8,
1047 		0x201c4, 0x201c8,
1048 		0x20200, 0x20318,
1049 		0x20400, 0x204b4,
1050 		0x204c0, 0x20528,
1051 		0x20540, 0x20614,
1052 		0x21000, 0x21040,
1053 		0x2104c, 0x21060,
1054 		0x210c0, 0x210ec,
1055 		0x21200, 0x21268,
1056 		0x21270, 0x21284,
1057 		0x212fc, 0x21388,
1058 		0x21400, 0x21404,
1059 		0x21500, 0x21500,
1060 		0x21510, 0x21518,
1061 		0x2152c, 0x21530,
1062 		0x2153c, 0x2153c,
1063 		0x21550, 0x21554,
1064 		0x21600, 0x21600,
1065 		0x21608, 0x2161c,
1066 		0x21624, 0x21628,
1067 		0x21630, 0x21634,
1068 		0x2163c, 0x2163c,
1069 		0x21700, 0x2171c,
1070 		0x21780, 0x2178c,
1071 		0x21800, 0x21818,
1072 		0x21820, 0x21828,
1073 		0x21830, 0x21848,
1074 		0x21850, 0x21854,
1075 		0x21860, 0x21868,
1076 		0x21870, 0x21870,
1077 		0x21878, 0x21898,
1078 		0x218a0, 0x218a8,
1079 		0x218b0, 0x218c8,
1080 		0x218d0, 0x218d4,
1081 		0x218e0, 0x218e8,
1082 		0x218f0, 0x218f0,
1083 		0x218f8, 0x21a18,
1084 		0x21a20, 0x21a28,
1085 		0x21a30, 0x21a48,
1086 		0x21a50, 0x21a54,
1087 		0x21a60, 0x21a68,
1088 		0x21a70, 0x21a70,
1089 		0x21a78, 0x21a98,
1090 		0x21aa0, 0x21aa8,
1091 		0x21ab0, 0x21ac8,
1092 		0x21ad0, 0x21ad4,
1093 		0x21ae0, 0x21ae8,
1094 		0x21af0, 0x21af0,
1095 		0x21af8, 0x21c18,
1096 		0x21c20, 0x21c20,
1097 		0x21c28, 0x21c30,
1098 		0x21c38, 0x21c38,
1099 		0x21c80, 0x21c98,
1100 		0x21ca0, 0x21ca8,
1101 		0x21cb0, 0x21cc8,
1102 		0x21cd0, 0x21cd4,
1103 		0x21ce0, 0x21ce8,
1104 		0x21cf0, 0x21cf0,
1105 		0x21cf8, 0x21d7c,
1106 		0x21e00, 0x21e04,
1107 		0x22000, 0x2202c,
1108 		0x22100, 0x2213c,
1109 		0x22190, 0x221a0,
1110 		0x221a8, 0x221b8,
1111 		0x221c4, 0x221c8,
1112 		0x22200, 0x22318,
1113 		0x22400, 0x224b4,
1114 		0x224c0, 0x22528,
1115 		0x22540, 0x22614,
1116 		0x23000, 0x23040,
1117 		0x2304c, 0x23060,
1118 		0x230c0, 0x230ec,
1119 		0x23200, 0x23268,
1120 		0x23270, 0x23284,
1121 		0x232fc, 0x23388,
1122 		0x23400, 0x23404,
1123 		0x23500, 0x23500,
1124 		0x23510, 0x23518,
1125 		0x2352c, 0x23530,
1126 		0x2353c, 0x2353c,
1127 		0x23550, 0x23554,
1128 		0x23600, 0x23600,
1129 		0x23608, 0x2361c,
1130 		0x23624, 0x23628,
1131 		0x23630, 0x23634,
1132 		0x2363c, 0x2363c,
1133 		0x23700, 0x2371c,
1134 		0x23780, 0x2378c,
1135 		0x23800, 0x23818,
1136 		0x23820, 0x23828,
1137 		0x23830, 0x23848,
1138 		0x23850, 0x23854,
1139 		0x23860, 0x23868,
1140 		0x23870, 0x23870,
1141 		0x23878, 0x23898,
1142 		0x238a0, 0x238a8,
1143 		0x238b0, 0x238c8,
1144 		0x238d0, 0x238d4,
1145 		0x238e0, 0x238e8,
1146 		0x238f0, 0x238f0,
1147 		0x238f8, 0x23a18,
1148 		0x23a20, 0x23a28,
1149 		0x23a30, 0x23a48,
1150 		0x23a50, 0x23a54,
1151 		0x23a60, 0x23a68,
1152 		0x23a70, 0x23a70,
1153 		0x23a78, 0x23a98,
1154 		0x23aa0, 0x23aa8,
1155 		0x23ab0, 0x23ac8,
1156 		0x23ad0, 0x23ad4,
1157 		0x23ae0, 0x23ae8,
1158 		0x23af0, 0x23af0,
1159 		0x23af8, 0x23c18,
1160 		0x23c20, 0x23c20,
1161 		0x23c28, 0x23c30,
1162 		0x23c38, 0x23c38,
1163 		0x23c80, 0x23c98,
1164 		0x23ca0, 0x23ca8,
1165 		0x23cb0, 0x23cc8,
1166 		0x23cd0, 0x23cd4,
1167 		0x23ce0, 0x23ce8,
1168 		0x23cf0, 0x23cf0,
1169 		0x23cf8, 0x23d7c,
1170 		0x23e00, 0x23e04,
1171 		0x24000, 0x2402c,
1172 		0x24100, 0x2413c,
1173 		0x24190, 0x241a0,
1174 		0x241a8, 0x241b8,
1175 		0x241c4, 0x241c8,
1176 		0x24200, 0x24318,
1177 		0x24400, 0x244b4,
1178 		0x244c0, 0x24528,
1179 		0x24540, 0x24614,
1180 		0x25000, 0x25040,
1181 		0x2504c, 0x25060,
1182 		0x250c0, 0x250ec,
1183 		0x25200, 0x25268,
1184 		0x25270, 0x25284,
1185 		0x252fc, 0x25388,
1186 		0x25400, 0x25404,
1187 		0x25500, 0x25500,
1188 		0x25510, 0x25518,
1189 		0x2552c, 0x25530,
1190 		0x2553c, 0x2553c,
1191 		0x25550, 0x25554,
1192 		0x25600, 0x25600,
1193 		0x25608, 0x2561c,
1194 		0x25624, 0x25628,
1195 		0x25630, 0x25634,
1196 		0x2563c, 0x2563c,
1197 		0x25700, 0x2571c,
1198 		0x25780, 0x2578c,
1199 		0x25800, 0x25818,
1200 		0x25820, 0x25828,
1201 		0x25830, 0x25848,
1202 		0x25850, 0x25854,
1203 		0x25860, 0x25868,
1204 		0x25870, 0x25870,
1205 		0x25878, 0x25898,
1206 		0x258a0, 0x258a8,
1207 		0x258b0, 0x258c8,
1208 		0x258d0, 0x258d4,
1209 		0x258e0, 0x258e8,
1210 		0x258f0, 0x258f0,
1211 		0x258f8, 0x25a18,
1212 		0x25a20, 0x25a28,
1213 		0x25a30, 0x25a48,
1214 		0x25a50, 0x25a54,
1215 		0x25a60, 0x25a68,
1216 		0x25a70, 0x25a70,
1217 		0x25a78, 0x25a98,
1218 		0x25aa0, 0x25aa8,
1219 		0x25ab0, 0x25ac8,
1220 		0x25ad0, 0x25ad4,
1221 		0x25ae0, 0x25ae8,
1222 		0x25af0, 0x25af0,
1223 		0x25af8, 0x25c18,
1224 		0x25c20, 0x25c20,
1225 		0x25c28, 0x25c30,
1226 		0x25c38, 0x25c38,
1227 		0x25c80, 0x25c98,
1228 		0x25ca0, 0x25ca8,
1229 		0x25cb0, 0x25cc8,
1230 		0x25cd0, 0x25cd4,
1231 		0x25ce0, 0x25ce8,
1232 		0x25cf0, 0x25cf0,
1233 		0x25cf8, 0x25d7c,
1234 		0x25e00, 0x25e04,
1235 		0x26000, 0x2602c,
1236 		0x26100, 0x2613c,
1237 		0x26190, 0x261a0,
1238 		0x261a8, 0x261b8,
1239 		0x261c4, 0x261c8,
1240 		0x26200, 0x26318,
1241 		0x26400, 0x264b4,
1242 		0x264c0, 0x26528,
1243 		0x26540, 0x26614,
1244 		0x27000, 0x27040,
1245 		0x2704c, 0x27060,
1246 		0x270c0, 0x270ec,
1247 		0x27200, 0x27268,
1248 		0x27270, 0x27284,
1249 		0x272fc, 0x27388,
1250 		0x27400, 0x27404,
1251 		0x27500, 0x27500,
1252 		0x27510, 0x27518,
1253 		0x2752c, 0x27530,
1254 		0x2753c, 0x2753c,
1255 		0x27550, 0x27554,
1256 		0x27600, 0x27600,
1257 		0x27608, 0x2761c,
1258 		0x27624, 0x27628,
1259 		0x27630, 0x27634,
1260 		0x2763c, 0x2763c,
1261 		0x27700, 0x2771c,
1262 		0x27780, 0x2778c,
1263 		0x27800, 0x27818,
1264 		0x27820, 0x27828,
1265 		0x27830, 0x27848,
1266 		0x27850, 0x27854,
1267 		0x27860, 0x27868,
1268 		0x27870, 0x27870,
1269 		0x27878, 0x27898,
1270 		0x278a0, 0x278a8,
1271 		0x278b0, 0x278c8,
1272 		0x278d0, 0x278d4,
1273 		0x278e0, 0x278e8,
1274 		0x278f0, 0x278f0,
1275 		0x278f8, 0x27a18,
1276 		0x27a20, 0x27a28,
1277 		0x27a30, 0x27a48,
1278 		0x27a50, 0x27a54,
1279 		0x27a60, 0x27a68,
1280 		0x27a70, 0x27a70,
1281 		0x27a78, 0x27a98,
1282 		0x27aa0, 0x27aa8,
1283 		0x27ab0, 0x27ac8,
1284 		0x27ad0, 0x27ad4,
1285 		0x27ae0, 0x27ae8,
1286 		0x27af0, 0x27af0,
1287 		0x27af8, 0x27c18,
1288 		0x27c20, 0x27c20,
1289 		0x27c28, 0x27c30,
1290 		0x27c38, 0x27c38,
1291 		0x27c80, 0x27c98,
1292 		0x27ca0, 0x27ca8,
1293 		0x27cb0, 0x27cc8,
1294 		0x27cd0, 0x27cd4,
1295 		0x27ce0, 0x27ce8,
1296 		0x27cf0, 0x27cf0,
1297 		0x27cf8, 0x27d7c,
1298 		0x27e00, 0x27e04,
1299 	};
1300 
1301 	static const unsigned int t4vf_reg_ranges[] = {
1302 		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1303 		VF_MPS_REG(A_MPS_VF_CTL),
1304 		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1305 		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1306 		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1307 		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1308 		FW_T4VF_MBDATA_BASE_ADDR,
1309 		FW_T4VF_MBDATA_BASE_ADDR +
1310 		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1311 	};
1312 
1313 	static const unsigned int t5_reg_ranges[] = {
1314 		0x1008, 0x10c0,
1315 		0x10cc, 0x10f8,
1316 		0x1100, 0x1100,
1317 		0x110c, 0x1148,
1318 		0x1180, 0x1184,
1319 		0x1190, 0x1194,
1320 		0x11a0, 0x11a4,
1321 		0x11b0, 0x11b4,
1322 		0x11fc, 0x123c,
1323 		0x1280, 0x173c,
1324 		0x1800, 0x18fc,
1325 		0x3000, 0x3028,
1326 		0x3060, 0x30b0,
1327 		0x30b8, 0x30d8,
1328 		0x30e0, 0x30fc,
1329 		0x3140, 0x357c,
1330 		0x35a8, 0x35cc,
1331 		0x35ec, 0x35ec,
1332 		0x3600, 0x5624,
1333 		0x56cc, 0x56ec,
1334 		0x56f4, 0x5720,
1335 		0x5728, 0x575c,
1336 		0x580c, 0x5814,
1337 		0x5890, 0x589c,
1338 		0x58a4, 0x58ac,
1339 		0x58b8, 0x58bc,
1340 		0x5940, 0x59c8,
1341 		0x59d0, 0x59dc,
1342 		0x59fc, 0x5a18,
1343 		0x5a60, 0x5a70,
1344 		0x5a80, 0x5a9c,
1345 		0x5b94, 0x5bfc,
1346 		0x6000, 0x6020,
1347 		0x6028, 0x6040,
1348 		0x6058, 0x609c,
1349 		0x60a8, 0x614c,
1350 		0x7700, 0x7798,
1351 		0x77c0, 0x78fc,
1352 		0x7b00, 0x7b58,
1353 		0x7b60, 0x7b84,
1354 		0x7b8c, 0x7c54,
1355 		0x7d00, 0x7d38,
1356 		0x7d40, 0x7d80,
1357 		0x7d8c, 0x7ddc,
1358 		0x7de4, 0x7e04,
1359 		0x7e10, 0x7e1c,
1360 		0x7e24, 0x7e38,
1361 		0x7e40, 0x7e44,
1362 		0x7e4c, 0x7e78,
1363 		0x7e80, 0x7edc,
1364 		0x7ee8, 0x7efc,
1365 		0x8dc0, 0x8de0,
1366 		0x8df8, 0x8e04,
1367 		0x8e10, 0x8e84,
1368 		0x8ea0, 0x8f84,
1369 		0x8fc0, 0x9058,
1370 		0x9060, 0x9060,
1371 		0x9068, 0x90f8,
1372 		0x9400, 0x9408,
1373 		0x9410, 0x9470,
1374 		0x9600, 0x9600,
1375 		0x9608, 0x9638,
1376 		0x9640, 0x96f4,
1377 		0x9800, 0x9808,
1378 		0x9820, 0x983c,
1379 		0x9850, 0x9864,
1380 		0x9c00, 0x9c6c,
1381 		0x9c80, 0x9cec,
1382 		0x9d00, 0x9d6c,
1383 		0x9d80, 0x9dec,
1384 		0x9e00, 0x9e6c,
1385 		0x9e80, 0x9eec,
1386 		0x9f00, 0x9f6c,
1387 		0x9f80, 0xa020,
1388 		0xd004, 0xd004,
1389 		0xd010, 0xd03c,
1390 		0xdfc0, 0xdfe0,
1391 		0xe000, 0x1106c,
1392 		0x11074, 0x11088,
1393 		0x1109c, 0x1117c,
1394 		0x11190, 0x11204,
1395 		0x19040, 0x1906c,
1396 		0x19078, 0x19080,
1397 		0x1908c, 0x190e8,
1398 		0x190f0, 0x190f8,
1399 		0x19100, 0x19110,
1400 		0x19120, 0x19124,
1401 		0x19150, 0x19194,
1402 		0x1919c, 0x191b0,
1403 		0x191d0, 0x191e8,
1404 		0x19238, 0x19290,
1405 		0x193f8, 0x19428,
1406 		0x19430, 0x19444,
1407 		0x1944c, 0x1946c,
1408 		0x19474, 0x19474,
1409 		0x19490, 0x194cc,
1410 		0x194f0, 0x194f8,
1411 		0x19c00, 0x19c08,
1412 		0x19c10, 0x19c60,
1413 		0x19c94, 0x19ce4,
1414 		0x19cf0, 0x19d40,
1415 		0x19d50, 0x19d94,
1416 		0x19da0, 0x19de8,
1417 		0x19df0, 0x19e10,
1418 		0x19e50, 0x19e90,
1419 		0x19ea0, 0x19f24,
1420 		0x19f34, 0x19f34,
1421 		0x19f40, 0x19f50,
1422 		0x19f90, 0x19fb4,
1423 		0x19fc4, 0x19fe4,
1424 		0x1a000, 0x1a004,
1425 		0x1a010, 0x1a06c,
1426 		0x1a0b0, 0x1a0e4,
1427 		0x1a0ec, 0x1a0f8,
1428 		0x1a100, 0x1a108,
1429 		0x1a114, 0x1a120,
1430 		0x1a128, 0x1a130,
1431 		0x1a138, 0x1a138,
1432 		0x1a190, 0x1a1c4,
1433 		0x1a1fc, 0x1a1fc,
1434 		0x1e008, 0x1e00c,
1435 		0x1e040, 0x1e044,
1436 		0x1e04c, 0x1e04c,
1437 		0x1e284, 0x1e290,
1438 		0x1e2c0, 0x1e2c0,
1439 		0x1e2e0, 0x1e2e0,
1440 		0x1e300, 0x1e384,
1441 		0x1e3c0, 0x1e3c8,
1442 		0x1e408, 0x1e40c,
1443 		0x1e440, 0x1e444,
1444 		0x1e44c, 0x1e44c,
1445 		0x1e684, 0x1e690,
1446 		0x1e6c0, 0x1e6c0,
1447 		0x1e6e0, 0x1e6e0,
1448 		0x1e700, 0x1e784,
1449 		0x1e7c0, 0x1e7c8,
1450 		0x1e808, 0x1e80c,
1451 		0x1e840, 0x1e844,
1452 		0x1e84c, 0x1e84c,
1453 		0x1ea84, 0x1ea90,
1454 		0x1eac0, 0x1eac0,
1455 		0x1eae0, 0x1eae0,
1456 		0x1eb00, 0x1eb84,
1457 		0x1ebc0, 0x1ebc8,
1458 		0x1ec08, 0x1ec0c,
1459 		0x1ec40, 0x1ec44,
1460 		0x1ec4c, 0x1ec4c,
1461 		0x1ee84, 0x1ee90,
1462 		0x1eec0, 0x1eec0,
1463 		0x1eee0, 0x1eee0,
1464 		0x1ef00, 0x1ef84,
1465 		0x1efc0, 0x1efc8,
1466 		0x1f008, 0x1f00c,
1467 		0x1f040, 0x1f044,
1468 		0x1f04c, 0x1f04c,
1469 		0x1f284, 0x1f290,
1470 		0x1f2c0, 0x1f2c0,
1471 		0x1f2e0, 0x1f2e0,
1472 		0x1f300, 0x1f384,
1473 		0x1f3c0, 0x1f3c8,
1474 		0x1f408, 0x1f40c,
1475 		0x1f440, 0x1f444,
1476 		0x1f44c, 0x1f44c,
1477 		0x1f684, 0x1f690,
1478 		0x1f6c0, 0x1f6c0,
1479 		0x1f6e0, 0x1f6e0,
1480 		0x1f700, 0x1f784,
1481 		0x1f7c0, 0x1f7c8,
1482 		0x1f808, 0x1f80c,
1483 		0x1f840, 0x1f844,
1484 		0x1f84c, 0x1f84c,
1485 		0x1fa84, 0x1fa90,
1486 		0x1fac0, 0x1fac0,
1487 		0x1fae0, 0x1fae0,
1488 		0x1fb00, 0x1fb84,
1489 		0x1fbc0, 0x1fbc8,
1490 		0x1fc08, 0x1fc0c,
1491 		0x1fc40, 0x1fc44,
1492 		0x1fc4c, 0x1fc4c,
1493 		0x1fe84, 0x1fe90,
1494 		0x1fec0, 0x1fec0,
1495 		0x1fee0, 0x1fee0,
1496 		0x1ff00, 0x1ff84,
1497 		0x1ffc0, 0x1ffc8,
1498 		0x30000, 0x30030,
1499 		0x30100, 0x30144,
1500 		0x30190, 0x301a0,
1501 		0x301a8, 0x301b8,
1502 		0x301c4, 0x301c8,
1503 		0x301d0, 0x301d0,
1504 		0x30200, 0x30318,
1505 		0x30400, 0x304b4,
1506 		0x304c0, 0x3052c,
1507 		0x30540, 0x3061c,
1508 		0x30800, 0x30828,
1509 		0x30834, 0x30834,
1510 		0x308c0, 0x30908,
1511 		0x30910, 0x309ac,
1512 		0x30a00, 0x30a14,
1513 		0x30a1c, 0x30a2c,
1514 		0x30a44, 0x30a50,
1515 		0x30a74, 0x30a74,
1516 		0x30a7c, 0x30afc,
1517 		0x30b08, 0x30c24,
1518 		0x30d00, 0x30d00,
1519 		0x30d08, 0x30d14,
1520 		0x30d1c, 0x30d20,
1521 		0x30d3c, 0x30d3c,
1522 		0x30d48, 0x30d50,
1523 		0x31200, 0x3120c,
1524 		0x31220, 0x31220,
1525 		0x31240, 0x31240,
1526 		0x31600, 0x3160c,
1527 		0x31a00, 0x31a1c,
1528 		0x31e00, 0x31e20,
1529 		0x31e38, 0x31e3c,
1530 		0x31e80, 0x31e80,
1531 		0x31e88, 0x31ea8,
1532 		0x31eb0, 0x31eb4,
1533 		0x31ec8, 0x31ed4,
1534 		0x31fb8, 0x32004,
1535 		0x32200, 0x32200,
1536 		0x32208, 0x32240,
1537 		0x32248, 0x32280,
1538 		0x32288, 0x322c0,
1539 		0x322c8, 0x322fc,
1540 		0x32600, 0x32630,
1541 		0x32a00, 0x32abc,
1542 		0x32b00, 0x32b10,
1543 		0x32b20, 0x32b30,
1544 		0x32b40, 0x32b50,
1545 		0x32b60, 0x32b70,
1546 		0x33000, 0x33028,
1547 		0x33030, 0x33048,
1548 		0x33060, 0x33068,
1549 		0x33070, 0x3309c,
1550 		0x330f0, 0x33128,
1551 		0x33130, 0x33148,
1552 		0x33160, 0x33168,
1553 		0x33170, 0x3319c,
1554 		0x331f0, 0x33238,
1555 		0x33240, 0x33240,
1556 		0x33248, 0x33250,
1557 		0x3325c, 0x33264,
1558 		0x33270, 0x332b8,
1559 		0x332c0, 0x332e4,
1560 		0x332f8, 0x33338,
1561 		0x33340, 0x33340,
1562 		0x33348, 0x33350,
1563 		0x3335c, 0x33364,
1564 		0x33370, 0x333b8,
1565 		0x333c0, 0x333e4,
1566 		0x333f8, 0x33428,
1567 		0x33430, 0x33448,
1568 		0x33460, 0x33468,
1569 		0x33470, 0x3349c,
1570 		0x334f0, 0x33528,
1571 		0x33530, 0x33548,
1572 		0x33560, 0x33568,
1573 		0x33570, 0x3359c,
1574 		0x335f0, 0x33638,
1575 		0x33640, 0x33640,
1576 		0x33648, 0x33650,
1577 		0x3365c, 0x33664,
1578 		0x33670, 0x336b8,
1579 		0x336c0, 0x336e4,
1580 		0x336f8, 0x33738,
1581 		0x33740, 0x33740,
1582 		0x33748, 0x33750,
1583 		0x3375c, 0x33764,
1584 		0x33770, 0x337b8,
1585 		0x337c0, 0x337e4,
1586 		0x337f8, 0x337fc,
1587 		0x33814, 0x33814,
1588 		0x3382c, 0x3382c,
1589 		0x33880, 0x3388c,
1590 		0x338e8, 0x338ec,
1591 		0x33900, 0x33928,
1592 		0x33930, 0x33948,
1593 		0x33960, 0x33968,
1594 		0x33970, 0x3399c,
1595 		0x339f0, 0x33a38,
1596 		0x33a40, 0x33a40,
1597 		0x33a48, 0x33a50,
1598 		0x33a5c, 0x33a64,
1599 		0x33a70, 0x33ab8,
1600 		0x33ac0, 0x33ae4,
1601 		0x33af8, 0x33b10,
1602 		0x33b28, 0x33b28,
1603 		0x33b3c, 0x33b50,
1604 		0x33bf0, 0x33c10,
1605 		0x33c28, 0x33c28,
1606 		0x33c3c, 0x33c50,
1607 		0x33cf0, 0x33cfc,
1608 		0x34000, 0x34030,
1609 		0x34100, 0x34144,
1610 		0x34190, 0x341a0,
1611 		0x341a8, 0x341b8,
1612 		0x341c4, 0x341c8,
1613 		0x341d0, 0x341d0,
1614 		0x34200, 0x34318,
1615 		0x34400, 0x344b4,
1616 		0x344c0, 0x3452c,
1617 		0x34540, 0x3461c,
1618 		0x34800, 0x34828,
1619 		0x34834, 0x34834,
1620 		0x348c0, 0x34908,
1621 		0x34910, 0x349ac,
1622 		0x34a00, 0x34a14,
1623 		0x34a1c, 0x34a2c,
1624 		0x34a44, 0x34a50,
1625 		0x34a74, 0x34a74,
1626 		0x34a7c, 0x34afc,
1627 		0x34b08, 0x34c24,
1628 		0x34d00, 0x34d00,
1629 		0x34d08, 0x34d14,
1630 		0x34d1c, 0x34d20,
1631 		0x34d3c, 0x34d3c,
1632 		0x34d48, 0x34d50,
1633 		0x35200, 0x3520c,
1634 		0x35220, 0x35220,
1635 		0x35240, 0x35240,
1636 		0x35600, 0x3560c,
1637 		0x35a00, 0x35a1c,
1638 		0x35e00, 0x35e20,
1639 		0x35e38, 0x35e3c,
1640 		0x35e80, 0x35e80,
1641 		0x35e88, 0x35ea8,
1642 		0x35eb0, 0x35eb4,
1643 		0x35ec8, 0x35ed4,
1644 		0x35fb8, 0x36004,
1645 		0x36200, 0x36200,
1646 		0x36208, 0x36240,
1647 		0x36248, 0x36280,
1648 		0x36288, 0x362c0,
1649 		0x362c8, 0x362fc,
1650 		0x36600, 0x36630,
1651 		0x36a00, 0x36abc,
1652 		0x36b00, 0x36b10,
1653 		0x36b20, 0x36b30,
1654 		0x36b40, 0x36b50,
1655 		0x36b60, 0x36b70,
1656 		0x37000, 0x37028,
1657 		0x37030, 0x37048,
1658 		0x37060, 0x37068,
1659 		0x37070, 0x3709c,
1660 		0x370f0, 0x37128,
1661 		0x37130, 0x37148,
1662 		0x37160, 0x37168,
1663 		0x37170, 0x3719c,
1664 		0x371f0, 0x37238,
1665 		0x37240, 0x37240,
1666 		0x37248, 0x37250,
1667 		0x3725c, 0x37264,
1668 		0x37270, 0x372b8,
1669 		0x372c0, 0x372e4,
1670 		0x372f8, 0x37338,
1671 		0x37340, 0x37340,
1672 		0x37348, 0x37350,
1673 		0x3735c, 0x37364,
1674 		0x37370, 0x373b8,
1675 		0x373c0, 0x373e4,
1676 		0x373f8, 0x37428,
1677 		0x37430, 0x37448,
1678 		0x37460, 0x37468,
1679 		0x37470, 0x3749c,
1680 		0x374f0, 0x37528,
1681 		0x37530, 0x37548,
1682 		0x37560, 0x37568,
1683 		0x37570, 0x3759c,
1684 		0x375f0, 0x37638,
1685 		0x37640, 0x37640,
1686 		0x37648, 0x37650,
1687 		0x3765c, 0x37664,
1688 		0x37670, 0x376b8,
1689 		0x376c0, 0x376e4,
1690 		0x376f8, 0x37738,
1691 		0x37740, 0x37740,
1692 		0x37748, 0x37750,
1693 		0x3775c, 0x37764,
1694 		0x37770, 0x377b8,
1695 		0x377c0, 0x377e4,
1696 		0x377f8, 0x377fc,
1697 		0x37814, 0x37814,
1698 		0x3782c, 0x3782c,
1699 		0x37880, 0x3788c,
1700 		0x378e8, 0x378ec,
1701 		0x37900, 0x37928,
1702 		0x37930, 0x37948,
1703 		0x37960, 0x37968,
1704 		0x37970, 0x3799c,
1705 		0x379f0, 0x37a38,
1706 		0x37a40, 0x37a40,
1707 		0x37a48, 0x37a50,
1708 		0x37a5c, 0x37a64,
1709 		0x37a70, 0x37ab8,
1710 		0x37ac0, 0x37ae4,
1711 		0x37af8, 0x37b10,
1712 		0x37b28, 0x37b28,
1713 		0x37b3c, 0x37b50,
1714 		0x37bf0, 0x37c10,
1715 		0x37c28, 0x37c28,
1716 		0x37c3c, 0x37c50,
1717 		0x37cf0, 0x37cfc,
1718 		0x38000, 0x38030,
1719 		0x38100, 0x38144,
1720 		0x38190, 0x381a0,
1721 		0x381a8, 0x381b8,
1722 		0x381c4, 0x381c8,
1723 		0x381d0, 0x381d0,
1724 		0x38200, 0x38318,
1725 		0x38400, 0x384b4,
1726 		0x384c0, 0x3852c,
1727 		0x38540, 0x3861c,
1728 		0x38800, 0x38828,
1729 		0x38834, 0x38834,
1730 		0x388c0, 0x38908,
1731 		0x38910, 0x389ac,
1732 		0x38a00, 0x38a14,
1733 		0x38a1c, 0x38a2c,
1734 		0x38a44, 0x38a50,
1735 		0x38a74, 0x38a74,
1736 		0x38a7c, 0x38afc,
1737 		0x38b08, 0x38c24,
1738 		0x38d00, 0x38d00,
1739 		0x38d08, 0x38d14,
1740 		0x38d1c, 0x38d20,
1741 		0x38d3c, 0x38d3c,
1742 		0x38d48, 0x38d50,
1743 		0x39200, 0x3920c,
1744 		0x39220, 0x39220,
1745 		0x39240, 0x39240,
1746 		0x39600, 0x3960c,
1747 		0x39a00, 0x39a1c,
1748 		0x39e00, 0x39e20,
1749 		0x39e38, 0x39e3c,
1750 		0x39e80, 0x39e80,
1751 		0x39e88, 0x39ea8,
1752 		0x39eb0, 0x39eb4,
1753 		0x39ec8, 0x39ed4,
1754 		0x39fb8, 0x3a004,
1755 		0x3a200, 0x3a200,
1756 		0x3a208, 0x3a240,
1757 		0x3a248, 0x3a280,
1758 		0x3a288, 0x3a2c0,
1759 		0x3a2c8, 0x3a2fc,
1760 		0x3a600, 0x3a630,
1761 		0x3aa00, 0x3aabc,
1762 		0x3ab00, 0x3ab10,
1763 		0x3ab20, 0x3ab30,
1764 		0x3ab40, 0x3ab50,
1765 		0x3ab60, 0x3ab70,
1766 		0x3b000, 0x3b028,
1767 		0x3b030, 0x3b048,
1768 		0x3b060, 0x3b068,
1769 		0x3b070, 0x3b09c,
1770 		0x3b0f0, 0x3b128,
1771 		0x3b130, 0x3b148,
1772 		0x3b160, 0x3b168,
1773 		0x3b170, 0x3b19c,
1774 		0x3b1f0, 0x3b238,
1775 		0x3b240, 0x3b240,
1776 		0x3b248, 0x3b250,
1777 		0x3b25c, 0x3b264,
1778 		0x3b270, 0x3b2b8,
1779 		0x3b2c0, 0x3b2e4,
1780 		0x3b2f8, 0x3b338,
1781 		0x3b340, 0x3b340,
1782 		0x3b348, 0x3b350,
1783 		0x3b35c, 0x3b364,
1784 		0x3b370, 0x3b3b8,
1785 		0x3b3c0, 0x3b3e4,
1786 		0x3b3f8, 0x3b428,
1787 		0x3b430, 0x3b448,
1788 		0x3b460, 0x3b468,
1789 		0x3b470, 0x3b49c,
1790 		0x3b4f0, 0x3b528,
1791 		0x3b530, 0x3b548,
1792 		0x3b560, 0x3b568,
1793 		0x3b570, 0x3b59c,
1794 		0x3b5f0, 0x3b638,
1795 		0x3b640, 0x3b640,
1796 		0x3b648, 0x3b650,
1797 		0x3b65c, 0x3b664,
1798 		0x3b670, 0x3b6b8,
1799 		0x3b6c0, 0x3b6e4,
1800 		0x3b6f8, 0x3b738,
1801 		0x3b740, 0x3b740,
1802 		0x3b748, 0x3b750,
1803 		0x3b75c, 0x3b764,
1804 		0x3b770, 0x3b7b8,
1805 		0x3b7c0, 0x3b7e4,
1806 		0x3b7f8, 0x3b7fc,
1807 		0x3b814, 0x3b814,
1808 		0x3b82c, 0x3b82c,
1809 		0x3b880, 0x3b88c,
1810 		0x3b8e8, 0x3b8ec,
1811 		0x3b900, 0x3b928,
1812 		0x3b930, 0x3b948,
1813 		0x3b960, 0x3b968,
1814 		0x3b970, 0x3b99c,
1815 		0x3b9f0, 0x3ba38,
1816 		0x3ba40, 0x3ba40,
1817 		0x3ba48, 0x3ba50,
1818 		0x3ba5c, 0x3ba64,
1819 		0x3ba70, 0x3bab8,
1820 		0x3bac0, 0x3bae4,
1821 		0x3baf8, 0x3bb10,
1822 		0x3bb28, 0x3bb28,
1823 		0x3bb3c, 0x3bb50,
1824 		0x3bbf0, 0x3bc10,
1825 		0x3bc28, 0x3bc28,
1826 		0x3bc3c, 0x3bc50,
1827 		0x3bcf0, 0x3bcfc,
1828 		0x3c000, 0x3c030,
1829 		0x3c100, 0x3c144,
1830 		0x3c190, 0x3c1a0,
1831 		0x3c1a8, 0x3c1b8,
1832 		0x3c1c4, 0x3c1c8,
1833 		0x3c1d0, 0x3c1d0,
1834 		0x3c200, 0x3c318,
1835 		0x3c400, 0x3c4b4,
1836 		0x3c4c0, 0x3c52c,
1837 		0x3c540, 0x3c61c,
1838 		0x3c800, 0x3c828,
1839 		0x3c834, 0x3c834,
1840 		0x3c8c0, 0x3c908,
1841 		0x3c910, 0x3c9ac,
1842 		0x3ca00, 0x3ca14,
1843 		0x3ca1c, 0x3ca2c,
1844 		0x3ca44, 0x3ca50,
1845 		0x3ca74, 0x3ca74,
1846 		0x3ca7c, 0x3cafc,
1847 		0x3cb08, 0x3cc24,
1848 		0x3cd00, 0x3cd00,
1849 		0x3cd08, 0x3cd14,
1850 		0x3cd1c, 0x3cd20,
1851 		0x3cd3c, 0x3cd3c,
1852 		0x3cd48, 0x3cd50,
1853 		0x3d200, 0x3d20c,
1854 		0x3d220, 0x3d220,
1855 		0x3d240, 0x3d240,
1856 		0x3d600, 0x3d60c,
1857 		0x3da00, 0x3da1c,
1858 		0x3de00, 0x3de20,
1859 		0x3de38, 0x3de3c,
1860 		0x3de80, 0x3de80,
1861 		0x3de88, 0x3dea8,
1862 		0x3deb0, 0x3deb4,
1863 		0x3dec8, 0x3ded4,
1864 		0x3dfb8, 0x3e004,
1865 		0x3e200, 0x3e200,
1866 		0x3e208, 0x3e240,
1867 		0x3e248, 0x3e280,
1868 		0x3e288, 0x3e2c0,
1869 		0x3e2c8, 0x3e2fc,
1870 		0x3e600, 0x3e630,
1871 		0x3ea00, 0x3eabc,
1872 		0x3eb00, 0x3eb10,
1873 		0x3eb20, 0x3eb30,
1874 		0x3eb40, 0x3eb50,
1875 		0x3eb60, 0x3eb70,
1876 		0x3f000, 0x3f028,
1877 		0x3f030, 0x3f048,
1878 		0x3f060, 0x3f068,
1879 		0x3f070, 0x3f09c,
1880 		0x3f0f0, 0x3f128,
1881 		0x3f130, 0x3f148,
1882 		0x3f160, 0x3f168,
1883 		0x3f170, 0x3f19c,
1884 		0x3f1f0, 0x3f238,
1885 		0x3f240, 0x3f240,
1886 		0x3f248, 0x3f250,
1887 		0x3f25c, 0x3f264,
1888 		0x3f270, 0x3f2b8,
1889 		0x3f2c0, 0x3f2e4,
1890 		0x3f2f8, 0x3f338,
1891 		0x3f340, 0x3f340,
1892 		0x3f348, 0x3f350,
1893 		0x3f35c, 0x3f364,
1894 		0x3f370, 0x3f3b8,
1895 		0x3f3c0, 0x3f3e4,
1896 		0x3f3f8, 0x3f428,
1897 		0x3f430, 0x3f448,
1898 		0x3f460, 0x3f468,
1899 		0x3f470, 0x3f49c,
1900 		0x3f4f0, 0x3f528,
1901 		0x3f530, 0x3f548,
1902 		0x3f560, 0x3f568,
1903 		0x3f570, 0x3f59c,
1904 		0x3f5f0, 0x3f638,
1905 		0x3f640, 0x3f640,
1906 		0x3f648, 0x3f650,
1907 		0x3f65c, 0x3f664,
1908 		0x3f670, 0x3f6b8,
1909 		0x3f6c0, 0x3f6e4,
1910 		0x3f6f8, 0x3f738,
1911 		0x3f740, 0x3f740,
1912 		0x3f748, 0x3f750,
1913 		0x3f75c, 0x3f764,
1914 		0x3f770, 0x3f7b8,
1915 		0x3f7c0, 0x3f7e4,
1916 		0x3f7f8, 0x3f7fc,
1917 		0x3f814, 0x3f814,
1918 		0x3f82c, 0x3f82c,
1919 		0x3f880, 0x3f88c,
1920 		0x3f8e8, 0x3f8ec,
1921 		0x3f900, 0x3f928,
1922 		0x3f930, 0x3f948,
1923 		0x3f960, 0x3f968,
1924 		0x3f970, 0x3f99c,
1925 		0x3f9f0, 0x3fa38,
1926 		0x3fa40, 0x3fa40,
1927 		0x3fa48, 0x3fa50,
1928 		0x3fa5c, 0x3fa64,
1929 		0x3fa70, 0x3fab8,
1930 		0x3fac0, 0x3fae4,
1931 		0x3faf8, 0x3fb10,
1932 		0x3fb28, 0x3fb28,
1933 		0x3fb3c, 0x3fb50,
1934 		0x3fbf0, 0x3fc10,
1935 		0x3fc28, 0x3fc28,
1936 		0x3fc3c, 0x3fc50,
1937 		0x3fcf0, 0x3fcfc,
1938 		0x40000, 0x4000c,
1939 		0x40040, 0x40050,
1940 		0x40060, 0x40068,
1941 		0x4007c, 0x4008c,
1942 		0x40094, 0x400b0,
1943 		0x400c0, 0x40144,
1944 		0x40180, 0x4018c,
1945 		0x40200, 0x40254,
1946 		0x40260, 0x40264,
1947 		0x40270, 0x40288,
1948 		0x40290, 0x40298,
1949 		0x402ac, 0x402c8,
1950 		0x402d0, 0x402e0,
1951 		0x402f0, 0x402f0,
1952 		0x40300, 0x4033c,
1953 		0x403f8, 0x403fc,
1954 		0x41304, 0x413c4,
1955 		0x41400, 0x4140c,
1956 		0x41414, 0x4141c,
1957 		0x41480, 0x414d0,
1958 		0x44000, 0x44054,
1959 		0x4405c, 0x44078,
1960 		0x440c0, 0x44174,
1961 		0x44180, 0x441ac,
1962 		0x441b4, 0x441b8,
1963 		0x441c0, 0x44254,
1964 		0x4425c, 0x44278,
1965 		0x442c0, 0x44374,
1966 		0x44380, 0x443ac,
1967 		0x443b4, 0x443b8,
1968 		0x443c0, 0x44454,
1969 		0x4445c, 0x44478,
1970 		0x444c0, 0x44574,
1971 		0x44580, 0x445ac,
1972 		0x445b4, 0x445b8,
1973 		0x445c0, 0x44654,
1974 		0x4465c, 0x44678,
1975 		0x446c0, 0x44774,
1976 		0x44780, 0x447ac,
1977 		0x447b4, 0x447b8,
1978 		0x447c0, 0x44854,
1979 		0x4485c, 0x44878,
1980 		0x448c0, 0x44974,
1981 		0x44980, 0x449ac,
1982 		0x449b4, 0x449b8,
1983 		0x449c0, 0x449fc,
1984 		0x45000, 0x45004,
1985 		0x45010, 0x45030,
1986 		0x45040, 0x45060,
1987 		0x45068, 0x45068,
1988 		0x45080, 0x45084,
1989 		0x450a0, 0x450b0,
1990 		0x45200, 0x45204,
1991 		0x45210, 0x45230,
1992 		0x45240, 0x45260,
1993 		0x45268, 0x45268,
1994 		0x45280, 0x45284,
1995 		0x452a0, 0x452b0,
1996 		0x460c0, 0x460e4,
1997 		0x47000, 0x4703c,
1998 		0x47044, 0x4708c,
1999 		0x47200, 0x47250,
2000 		0x47400, 0x47408,
2001 		0x47414, 0x47420,
2002 		0x47600, 0x47618,
2003 		0x47800, 0x47814,
2004 		0x48000, 0x4800c,
2005 		0x48040, 0x48050,
2006 		0x48060, 0x48068,
2007 		0x4807c, 0x4808c,
2008 		0x48094, 0x480b0,
2009 		0x480c0, 0x48144,
2010 		0x48180, 0x4818c,
2011 		0x48200, 0x48254,
2012 		0x48260, 0x48264,
2013 		0x48270, 0x48288,
2014 		0x48290, 0x48298,
2015 		0x482ac, 0x482c8,
2016 		0x482d0, 0x482e0,
2017 		0x482f0, 0x482f0,
2018 		0x48300, 0x4833c,
2019 		0x483f8, 0x483fc,
2020 		0x49304, 0x493c4,
2021 		0x49400, 0x4940c,
2022 		0x49414, 0x4941c,
2023 		0x49480, 0x494d0,
2024 		0x4c000, 0x4c054,
2025 		0x4c05c, 0x4c078,
2026 		0x4c0c0, 0x4c174,
2027 		0x4c180, 0x4c1ac,
2028 		0x4c1b4, 0x4c1b8,
2029 		0x4c1c0, 0x4c254,
2030 		0x4c25c, 0x4c278,
2031 		0x4c2c0, 0x4c374,
2032 		0x4c380, 0x4c3ac,
2033 		0x4c3b4, 0x4c3b8,
2034 		0x4c3c0, 0x4c454,
2035 		0x4c45c, 0x4c478,
2036 		0x4c4c0, 0x4c574,
2037 		0x4c580, 0x4c5ac,
2038 		0x4c5b4, 0x4c5b8,
2039 		0x4c5c0, 0x4c654,
2040 		0x4c65c, 0x4c678,
2041 		0x4c6c0, 0x4c774,
2042 		0x4c780, 0x4c7ac,
2043 		0x4c7b4, 0x4c7b8,
2044 		0x4c7c0, 0x4c854,
2045 		0x4c85c, 0x4c878,
2046 		0x4c8c0, 0x4c974,
2047 		0x4c980, 0x4c9ac,
2048 		0x4c9b4, 0x4c9b8,
2049 		0x4c9c0, 0x4c9fc,
2050 		0x4d000, 0x4d004,
2051 		0x4d010, 0x4d030,
2052 		0x4d040, 0x4d060,
2053 		0x4d068, 0x4d068,
2054 		0x4d080, 0x4d084,
2055 		0x4d0a0, 0x4d0b0,
2056 		0x4d200, 0x4d204,
2057 		0x4d210, 0x4d230,
2058 		0x4d240, 0x4d260,
2059 		0x4d268, 0x4d268,
2060 		0x4d280, 0x4d284,
2061 		0x4d2a0, 0x4d2b0,
2062 		0x4e0c0, 0x4e0e4,
2063 		0x4f000, 0x4f03c,
2064 		0x4f044, 0x4f08c,
2065 		0x4f200, 0x4f250,
2066 		0x4f400, 0x4f408,
2067 		0x4f414, 0x4f420,
2068 		0x4f600, 0x4f618,
2069 		0x4f800, 0x4f814,
2070 		0x50000, 0x50084,
2071 		0x50090, 0x500cc,
2072 		0x50400, 0x50400,
2073 		0x50800, 0x50884,
2074 		0x50890, 0x508cc,
2075 		0x50c00, 0x50c00,
2076 		0x51000, 0x5101c,
2077 		0x51300, 0x51308,
2078 	};
2079 
2080 	static const unsigned int t5vf_reg_ranges[] = {
2081 		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2082 		VF_MPS_REG(A_MPS_VF_CTL),
2083 		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2084 		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2085 		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2086 		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2087 		FW_T4VF_MBDATA_BASE_ADDR,
2088 		FW_T4VF_MBDATA_BASE_ADDR +
2089 		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2090 	};
2091 
2092 	static const unsigned int t6_reg_ranges[] = {
2093 		0x1008, 0x101c,
2094 		0x1024, 0x10a8,
2095 		0x10b4, 0x10f8,
2096 		0x1100, 0x1114,
2097 		0x111c, 0x112c,
2098 		0x1138, 0x113c,
2099 		0x1144, 0x114c,
2100 		0x1180, 0x1184,
2101 		0x1190, 0x1194,
2102 		0x11a0, 0x11a4,
2103 		0x11b0, 0x11b4,
2104 		0x11fc, 0x1274,
2105 		0x1280, 0x133c,
2106 		0x1800, 0x18fc,
2107 		0x3000, 0x302c,
2108 		0x3060, 0x30b0,
2109 		0x30b8, 0x30d8,
2110 		0x30e0, 0x30fc,
2111 		0x3140, 0x357c,
2112 		0x35a8, 0x35cc,
2113 		0x35ec, 0x35ec,
2114 		0x3600, 0x5624,
2115 		0x56cc, 0x56ec,
2116 		0x56f4, 0x5720,
2117 		0x5728, 0x575c,
2118 		0x580c, 0x5814,
2119 		0x5890, 0x589c,
2120 		0x58a4, 0x58ac,
2121 		0x58b8, 0x58bc,
2122 		0x5940, 0x595c,
2123 		0x5980, 0x598c,
2124 		0x59b0, 0x59c8,
2125 		0x59d0, 0x59dc,
2126 		0x59fc, 0x5a18,
2127 		0x5a60, 0x5a6c,
2128 		0x5a80, 0x5a8c,
2129 		0x5a94, 0x5a9c,
2130 		0x5b94, 0x5bfc,
2131 		0x5c10, 0x5e48,
2132 		0x5e50, 0x5e94,
2133 		0x5ea0, 0x5eb0,
2134 		0x5ec0, 0x5ec0,
2135 		0x5ec8, 0x5ed0,
2136 		0x5ee0, 0x5ee0,
2137 		0x5ef0, 0x5ef0,
2138 		0x5f00, 0x5f00,
2139 		0x6000, 0x6020,
2140 		0x6028, 0x6040,
2141 		0x6058, 0x609c,
2142 		0x60a8, 0x619c,
2143 		0x7700, 0x7798,
2144 		0x77c0, 0x7880,
2145 		0x78cc, 0x78fc,
2146 		0x7b00, 0x7b58,
2147 		0x7b60, 0x7b84,
2148 		0x7b8c, 0x7c54,
2149 		0x7d00, 0x7d38,
2150 		0x7d40, 0x7d84,
2151 		0x7d8c, 0x7ddc,
2152 		0x7de4, 0x7e04,
2153 		0x7e10, 0x7e1c,
2154 		0x7e24, 0x7e38,
2155 		0x7e40, 0x7e44,
2156 		0x7e4c, 0x7e78,
2157 		0x7e80, 0x7edc,
2158 		0x7ee8, 0x7efc,
2159 		0x8dc0, 0x8de4,
2160 		0x8df8, 0x8e04,
2161 		0x8e10, 0x8e84,
2162 		0x8ea0, 0x8f88,
2163 		0x8fb8, 0x9058,
2164 		0x9060, 0x9060,
2165 		0x9068, 0x90f8,
2166 		0x9100, 0x9124,
2167 		0x9400, 0x9470,
2168 		0x9600, 0x9600,
2169 		0x9608, 0x9638,
2170 		0x9640, 0x9704,
2171 		0x9710, 0x971c,
2172 		0x9800, 0x9808,
2173 		0x9820, 0x983c,
2174 		0x9850, 0x9864,
2175 		0x9c00, 0x9c6c,
2176 		0x9c80, 0x9cec,
2177 		0x9d00, 0x9d6c,
2178 		0x9d80, 0x9dec,
2179 		0x9e00, 0x9e6c,
2180 		0x9e80, 0x9eec,
2181 		0x9f00, 0x9f6c,
2182 		0x9f80, 0xa020,
2183 		0xd004, 0xd03c,
2184 		0xd100, 0xd118,
2185 		0xd200, 0xd214,
2186 		0xd220, 0xd234,
2187 		0xd240, 0xd254,
2188 		0xd260, 0xd274,
2189 		0xd280, 0xd294,
2190 		0xd2a0, 0xd2b4,
2191 		0xd2c0, 0xd2d4,
2192 		0xd2e0, 0xd2f4,
2193 		0xd300, 0xd31c,
2194 		0xdfc0, 0xdfe0,
2195 		0xe000, 0xf008,
2196 		0xf010, 0xf018,
2197 		0xf020, 0xf028,
2198 		0x11000, 0x11014,
2199 		0x11048, 0x1106c,
2200 		0x11074, 0x11088,
2201 		0x11098, 0x11120,
2202 		0x1112c, 0x1117c,
2203 		0x11190, 0x112e0,
2204 		0x11300, 0x1130c,
2205 		0x12000, 0x1206c,
2206 		0x19040, 0x1906c,
2207 		0x19078, 0x19080,
2208 		0x1908c, 0x190e8,
2209 		0x190f0, 0x190f8,
2210 		0x19100, 0x19110,
2211 		0x19120, 0x19124,
2212 		0x19150, 0x19194,
2213 		0x1919c, 0x191b0,
2214 		0x191d0, 0x191e8,
2215 		0x19238, 0x19290,
2216 		0x192a4, 0x192b0,
2217 		0x192bc, 0x192bc,
2218 		0x19348, 0x1934c,
2219 		0x193f8, 0x19418,
2220 		0x19420, 0x19428,
2221 		0x19430, 0x19444,
2222 		0x1944c, 0x1946c,
2223 		0x19474, 0x19474,
2224 		0x19490, 0x194cc,
2225 		0x194f0, 0x194f8,
2226 		0x19c00, 0x19c48,
2227 		0x19c50, 0x19c80,
2228 		0x19c94, 0x19c98,
2229 		0x19ca0, 0x19cbc,
2230 		0x19ce4, 0x19ce4,
2231 		0x19cf0, 0x19cf8,
2232 		0x19d00, 0x19d28,
2233 		0x19d50, 0x19d78,
2234 		0x19d94, 0x19d98,
2235 		0x19da0, 0x19dc8,
2236 		0x19df0, 0x19e10,
2237 		0x19e50, 0x19e6c,
2238 		0x19ea0, 0x19ebc,
2239 		0x19ec4, 0x19ef4,
2240 		0x19f04, 0x19f2c,
2241 		0x19f34, 0x19f34,
2242 		0x19f40, 0x19f50,
2243 		0x19f90, 0x19fac,
2244 		0x19fc4, 0x19fc8,
2245 		0x19fd0, 0x19fe4,
2246 		0x1a000, 0x1a004,
2247 		0x1a010, 0x1a06c,
2248 		0x1a0b0, 0x1a0e4,
2249 		0x1a0ec, 0x1a0f8,
2250 		0x1a100, 0x1a108,
2251 		0x1a114, 0x1a120,
2252 		0x1a128, 0x1a130,
2253 		0x1a138, 0x1a138,
2254 		0x1a190, 0x1a1c4,
2255 		0x1a1fc, 0x1a1fc,
2256 		0x1e008, 0x1e00c,
2257 		0x1e040, 0x1e044,
2258 		0x1e04c, 0x1e04c,
2259 		0x1e284, 0x1e290,
2260 		0x1e2c0, 0x1e2c0,
2261 		0x1e2e0, 0x1e2e0,
2262 		0x1e300, 0x1e384,
2263 		0x1e3c0, 0x1e3c8,
2264 		0x1e408, 0x1e40c,
2265 		0x1e440, 0x1e444,
2266 		0x1e44c, 0x1e44c,
2267 		0x1e684, 0x1e690,
2268 		0x1e6c0, 0x1e6c0,
2269 		0x1e6e0, 0x1e6e0,
2270 		0x1e700, 0x1e784,
2271 		0x1e7c0, 0x1e7c8,
2272 		0x1e808, 0x1e80c,
2273 		0x1e840, 0x1e844,
2274 		0x1e84c, 0x1e84c,
2275 		0x1ea84, 0x1ea90,
2276 		0x1eac0, 0x1eac0,
2277 		0x1eae0, 0x1eae0,
2278 		0x1eb00, 0x1eb84,
2279 		0x1ebc0, 0x1ebc8,
2280 		0x1ec08, 0x1ec0c,
2281 		0x1ec40, 0x1ec44,
2282 		0x1ec4c, 0x1ec4c,
2283 		0x1ee84, 0x1ee90,
2284 		0x1eec0, 0x1eec0,
2285 		0x1eee0, 0x1eee0,
2286 		0x1ef00, 0x1ef84,
2287 		0x1efc0, 0x1efc8,
2288 		0x1f008, 0x1f00c,
2289 		0x1f040, 0x1f044,
2290 		0x1f04c, 0x1f04c,
2291 		0x1f284, 0x1f290,
2292 		0x1f2c0, 0x1f2c0,
2293 		0x1f2e0, 0x1f2e0,
2294 		0x1f300, 0x1f384,
2295 		0x1f3c0, 0x1f3c8,
2296 		0x1f408, 0x1f40c,
2297 		0x1f440, 0x1f444,
2298 		0x1f44c, 0x1f44c,
2299 		0x1f684, 0x1f690,
2300 		0x1f6c0, 0x1f6c0,
2301 		0x1f6e0, 0x1f6e0,
2302 		0x1f700, 0x1f784,
2303 		0x1f7c0, 0x1f7c8,
2304 		0x1f808, 0x1f80c,
2305 		0x1f840, 0x1f844,
2306 		0x1f84c, 0x1f84c,
2307 		0x1fa84, 0x1fa90,
2308 		0x1fac0, 0x1fac0,
2309 		0x1fae0, 0x1fae0,
2310 		0x1fb00, 0x1fb84,
2311 		0x1fbc0, 0x1fbc8,
2312 		0x1fc08, 0x1fc0c,
2313 		0x1fc40, 0x1fc44,
2314 		0x1fc4c, 0x1fc4c,
2315 		0x1fe84, 0x1fe90,
2316 		0x1fec0, 0x1fec0,
2317 		0x1fee0, 0x1fee0,
2318 		0x1ff00, 0x1ff84,
2319 		0x1ffc0, 0x1ffc8,
2320 		0x30000, 0x30030,
2321 		0x30100, 0x30168,
2322 		0x30190, 0x301a0,
2323 		0x301a8, 0x301b8,
2324 		0x301c4, 0x301c8,
2325 		0x301d0, 0x301d0,
2326 		0x30200, 0x30320,
2327 		0x30400, 0x304b4,
2328 		0x304c0, 0x3052c,
2329 		0x30540, 0x3061c,
2330 		0x30800, 0x308a0,
2331 		0x308c0, 0x30908,
2332 		0x30910, 0x309b8,
2333 		0x30a00, 0x30a04,
2334 		0x30a0c, 0x30a14,
2335 		0x30a1c, 0x30a2c,
2336 		0x30a44, 0x30a50,
2337 		0x30a74, 0x30a74,
2338 		0x30a7c, 0x30afc,
2339 		0x30b08, 0x30c24,
2340 		0x30d00, 0x30d14,
2341 		0x30d1c, 0x30d3c,
2342 		0x30d44, 0x30d4c,
2343 		0x30d54, 0x30d74,
2344 		0x30d7c, 0x30d7c,
2345 		0x30de0, 0x30de0,
2346 		0x30e00, 0x30ed4,
2347 		0x30f00, 0x30fa4,
2348 		0x30fc0, 0x30fc4,
2349 		0x31000, 0x31004,
2350 		0x31080, 0x310fc,
2351 		0x31208, 0x31220,
2352 		0x3123c, 0x31254,
2353 		0x31300, 0x31300,
2354 		0x31308, 0x3131c,
2355 		0x31338, 0x3133c,
2356 		0x31380, 0x31380,
2357 		0x31388, 0x313a8,
2358 		0x313b4, 0x313b4,
2359 		0x31400, 0x31420,
2360 		0x31438, 0x3143c,
2361 		0x31480, 0x31480,
2362 		0x314a8, 0x314a8,
2363 		0x314b0, 0x314b4,
2364 		0x314c8, 0x314d4,
2365 		0x31a40, 0x31a4c,
2366 		0x31af0, 0x31b20,
2367 		0x31b38, 0x31b3c,
2368 		0x31b80, 0x31b80,
2369 		0x31ba8, 0x31ba8,
2370 		0x31bb0, 0x31bb4,
2371 		0x31bc8, 0x31bd4,
2372 		0x32140, 0x3218c,
2373 		0x321f0, 0x321f4,
2374 		0x32200, 0x32200,
2375 		0x32218, 0x32218,
2376 		0x32400, 0x32400,
2377 		0x32408, 0x3241c,
2378 		0x32618, 0x32620,
2379 		0x32664, 0x32664,
2380 		0x326a8, 0x326a8,
2381 		0x326ec, 0x326ec,
2382 		0x32a00, 0x32abc,
2383 		0x32b00, 0x32b18,
2384 		0x32b20, 0x32b38,
2385 		0x32b40, 0x32b58,
2386 		0x32b60, 0x32b78,
2387 		0x32c00, 0x32c00,
2388 		0x32c08, 0x32c3c,
2389 		0x33000, 0x3302c,
2390 		0x33034, 0x33050,
2391 		0x33058, 0x33058,
2392 		0x33060, 0x3308c,
2393 		0x3309c, 0x330ac,
2394 		0x330c0, 0x330c0,
2395 		0x330c8, 0x330d0,
2396 		0x330d8, 0x330e0,
2397 		0x330ec, 0x3312c,
2398 		0x33134, 0x33150,
2399 		0x33158, 0x33158,
2400 		0x33160, 0x3318c,
2401 		0x3319c, 0x331ac,
2402 		0x331c0, 0x331c0,
2403 		0x331c8, 0x331d0,
2404 		0x331d8, 0x331e0,
2405 		0x331ec, 0x33290,
2406 		0x33298, 0x332c4,
2407 		0x332e4, 0x33390,
2408 		0x33398, 0x333c4,
2409 		0x333e4, 0x3342c,
2410 		0x33434, 0x33450,
2411 		0x33458, 0x33458,
2412 		0x33460, 0x3348c,
2413 		0x3349c, 0x334ac,
2414 		0x334c0, 0x334c0,
2415 		0x334c8, 0x334d0,
2416 		0x334d8, 0x334e0,
2417 		0x334ec, 0x3352c,
2418 		0x33534, 0x33550,
2419 		0x33558, 0x33558,
2420 		0x33560, 0x3358c,
2421 		0x3359c, 0x335ac,
2422 		0x335c0, 0x335c0,
2423 		0x335c8, 0x335d0,
2424 		0x335d8, 0x335e0,
2425 		0x335ec, 0x33690,
2426 		0x33698, 0x336c4,
2427 		0x336e4, 0x33790,
2428 		0x33798, 0x337c4,
2429 		0x337e4, 0x337fc,
2430 		0x33814, 0x33814,
2431 		0x33854, 0x33868,
2432 		0x33880, 0x3388c,
2433 		0x338c0, 0x338d0,
2434 		0x338e8, 0x338ec,
2435 		0x33900, 0x3392c,
2436 		0x33934, 0x33950,
2437 		0x33958, 0x33958,
2438 		0x33960, 0x3398c,
2439 		0x3399c, 0x339ac,
2440 		0x339c0, 0x339c0,
2441 		0x339c8, 0x339d0,
2442 		0x339d8, 0x339e0,
2443 		0x339ec, 0x33a90,
2444 		0x33a98, 0x33ac4,
2445 		0x33ae4, 0x33b10,
2446 		0x33b24, 0x33b28,
2447 		0x33b38, 0x33b50,
2448 		0x33bf0, 0x33c10,
2449 		0x33c24, 0x33c28,
2450 		0x33c38, 0x33c50,
2451 		0x33cf0, 0x33cfc,
2452 		0x34000, 0x34030,
2453 		0x34100, 0x34168,
2454 		0x34190, 0x341a0,
2455 		0x341a8, 0x341b8,
2456 		0x341c4, 0x341c8,
2457 		0x341d0, 0x341d0,
2458 		0x34200, 0x34320,
2459 		0x34400, 0x344b4,
2460 		0x344c0, 0x3452c,
2461 		0x34540, 0x3461c,
2462 		0x34800, 0x348a0,
2463 		0x348c0, 0x34908,
2464 		0x34910, 0x349b8,
2465 		0x34a00, 0x34a04,
2466 		0x34a0c, 0x34a14,
2467 		0x34a1c, 0x34a2c,
2468 		0x34a44, 0x34a50,
2469 		0x34a74, 0x34a74,
2470 		0x34a7c, 0x34afc,
2471 		0x34b08, 0x34c24,
2472 		0x34d00, 0x34d14,
2473 		0x34d1c, 0x34d3c,
2474 		0x34d44, 0x34d4c,
2475 		0x34d54, 0x34d74,
2476 		0x34d7c, 0x34d7c,
2477 		0x34de0, 0x34de0,
2478 		0x34e00, 0x34ed4,
2479 		0x34f00, 0x34fa4,
2480 		0x34fc0, 0x34fc4,
2481 		0x35000, 0x35004,
2482 		0x35080, 0x350fc,
2483 		0x35208, 0x35220,
2484 		0x3523c, 0x35254,
2485 		0x35300, 0x35300,
2486 		0x35308, 0x3531c,
2487 		0x35338, 0x3533c,
2488 		0x35380, 0x35380,
2489 		0x35388, 0x353a8,
2490 		0x353b4, 0x353b4,
2491 		0x35400, 0x35420,
2492 		0x35438, 0x3543c,
2493 		0x35480, 0x35480,
2494 		0x354a8, 0x354a8,
2495 		0x354b0, 0x354b4,
2496 		0x354c8, 0x354d4,
2497 		0x35a40, 0x35a4c,
2498 		0x35af0, 0x35b20,
2499 		0x35b38, 0x35b3c,
2500 		0x35b80, 0x35b80,
2501 		0x35ba8, 0x35ba8,
2502 		0x35bb0, 0x35bb4,
2503 		0x35bc8, 0x35bd4,
2504 		0x36140, 0x3618c,
2505 		0x361f0, 0x361f4,
2506 		0x36200, 0x36200,
2507 		0x36218, 0x36218,
2508 		0x36400, 0x36400,
2509 		0x36408, 0x3641c,
2510 		0x36618, 0x36620,
2511 		0x36664, 0x36664,
2512 		0x366a8, 0x366a8,
2513 		0x366ec, 0x366ec,
2514 		0x36a00, 0x36abc,
2515 		0x36b00, 0x36b18,
2516 		0x36b20, 0x36b38,
2517 		0x36b40, 0x36b58,
2518 		0x36b60, 0x36b78,
2519 		0x36c00, 0x36c00,
2520 		0x36c08, 0x36c3c,
2521 		0x37000, 0x3702c,
2522 		0x37034, 0x37050,
2523 		0x37058, 0x37058,
2524 		0x37060, 0x3708c,
2525 		0x3709c, 0x370ac,
2526 		0x370c0, 0x370c0,
2527 		0x370c8, 0x370d0,
2528 		0x370d8, 0x370e0,
2529 		0x370ec, 0x3712c,
2530 		0x37134, 0x37150,
2531 		0x37158, 0x37158,
2532 		0x37160, 0x3718c,
2533 		0x3719c, 0x371ac,
2534 		0x371c0, 0x371c0,
2535 		0x371c8, 0x371d0,
2536 		0x371d8, 0x371e0,
2537 		0x371ec, 0x37290,
2538 		0x37298, 0x372c4,
2539 		0x372e4, 0x37390,
2540 		0x37398, 0x373c4,
2541 		0x373e4, 0x3742c,
2542 		0x37434, 0x37450,
2543 		0x37458, 0x37458,
2544 		0x37460, 0x3748c,
2545 		0x3749c, 0x374ac,
2546 		0x374c0, 0x374c0,
2547 		0x374c8, 0x374d0,
2548 		0x374d8, 0x374e0,
2549 		0x374ec, 0x3752c,
2550 		0x37534, 0x37550,
2551 		0x37558, 0x37558,
2552 		0x37560, 0x3758c,
2553 		0x3759c, 0x375ac,
2554 		0x375c0, 0x375c0,
2555 		0x375c8, 0x375d0,
2556 		0x375d8, 0x375e0,
2557 		0x375ec, 0x37690,
2558 		0x37698, 0x376c4,
2559 		0x376e4, 0x37790,
2560 		0x37798, 0x377c4,
2561 		0x377e4, 0x377fc,
2562 		0x37814, 0x37814,
2563 		0x37854, 0x37868,
2564 		0x37880, 0x3788c,
2565 		0x378c0, 0x378d0,
2566 		0x378e8, 0x378ec,
2567 		0x37900, 0x3792c,
2568 		0x37934, 0x37950,
2569 		0x37958, 0x37958,
2570 		0x37960, 0x3798c,
2571 		0x3799c, 0x379ac,
2572 		0x379c0, 0x379c0,
2573 		0x379c8, 0x379d0,
2574 		0x379d8, 0x379e0,
2575 		0x379ec, 0x37a90,
2576 		0x37a98, 0x37ac4,
2577 		0x37ae4, 0x37b10,
2578 		0x37b24, 0x37b28,
2579 		0x37b38, 0x37b50,
2580 		0x37bf0, 0x37c10,
2581 		0x37c24, 0x37c28,
2582 		0x37c38, 0x37c50,
2583 		0x37cf0, 0x37cfc,
2584 		0x40040, 0x40040,
2585 		0x40080, 0x40084,
2586 		0x40100, 0x40100,
2587 		0x40140, 0x401bc,
2588 		0x40200, 0x40214,
2589 		0x40228, 0x40228,
2590 		0x40240, 0x40258,
2591 		0x40280, 0x40280,
2592 		0x40304, 0x40304,
2593 		0x40330, 0x4033c,
2594 		0x41304, 0x413c8,
2595 		0x413d0, 0x413dc,
2596 		0x413f0, 0x413f0,
2597 		0x41400, 0x4140c,
2598 		0x41414, 0x4141c,
2599 		0x41480, 0x414d0,
2600 		0x44000, 0x4407c,
2601 		0x440c0, 0x441ac,
2602 		0x441b4, 0x4427c,
2603 		0x442c0, 0x443ac,
2604 		0x443b4, 0x4447c,
2605 		0x444c0, 0x445ac,
2606 		0x445b4, 0x4467c,
2607 		0x446c0, 0x447ac,
2608 		0x447b4, 0x4487c,
2609 		0x448c0, 0x449ac,
2610 		0x449b4, 0x44a7c,
2611 		0x44ac0, 0x44bac,
2612 		0x44bb4, 0x44c7c,
2613 		0x44cc0, 0x44dac,
2614 		0x44db4, 0x44e7c,
2615 		0x44ec0, 0x44fac,
2616 		0x44fb4, 0x4507c,
2617 		0x450c0, 0x451ac,
2618 		0x451b4, 0x451fc,
2619 		0x45800, 0x45804,
2620 		0x45810, 0x45830,
2621 		0x45840, 0x45860,
2622 		0x45868, 0x45868,
2623 		0x45880, 0x45884,
2624 		0x458a0, 0x458b0,
2625 		0x45a00, 0x45a04,
2626 		0x45a10, 0x45a30,
2627 		0x45a40, 0x45a60,
2628 		0x45a68, 0x45a68,
2629 		0x45a80, 0x45a84,
2630 		0x45aa0, 0x45ab0,
2631 		0x460c0, 0x460e4,
2632 		0x47000, 0x4703c,
2633 		0x47044, 0x4708c,
2634 		0x47200, 0x47250,
2635 		0x47400, 0x47408,
2636 		0x47414, 0x47420,
2637 		0x47600, 0x47618,
2638 		0x47800, 0x47814,
2639 		0x47820, 0x4782c,
2640 		0x50000, 0x50084,
2641 		0x50090, 0x500cc,
2642 		0x50300, 0x50384,
2643 		0x50400, 0x50400,
2644 		0x50800, 0x50884,
2645 		0x50890, 0x508cc,
2646 		0x50b00, 0x50b84,
2647 		0x50c00, 0x50c00,
2648 		0x51000, 0x51020,
2649 		0x51028, 0x510b0,
2650 		0x51300, 0x51324,
2651 	};
2652 
2653 	static const unsigned int t6vf_reg_ranges[] = {
2654 		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2655 		VF_MPS_REG(A_MPS_VF_CTL),
2656 		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2657 		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2658 		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2659 		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2660 		FW_T6VF_MBDATA_BASE_ADDR,
2661 		FW_T6VF_MBDATA_BASE_ADDR +
2662 		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2663 	};
2664 
2665 	u32 *buf_end = (u32 *)(buf + buf_size);
2666 	const unsigned int *reg_ranges;
2667 	int reg_ranges_size, range;
2668 	unsigned int chip_version = chip_id(adap);
2669 
2670 	/*
2671 	 * Select the right set of register ranges to dump depending on the
2672 	 * adapter chip type.
2673 	 */
2674 	switch (chip_version) {
2675 	case CHELSIO_T4:
2676 		if (adap->flags & IS_VF) {
2677 			reg_ranges = t4vf_reg_ranges;
2678 			reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2679 		} else {
2680 			reg_ranges = t4_reg_ranges;
2681 			reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2682 		}
2683 		break;
2684 
2685 	case CHELSIO_T5:
2686 		if (adap->flags & IS_VF) {
2687 			reg_ranges = t5vf_reg_ranges;
2688 			reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2689 		} else {
2690 			reg_ranges = t5_reg_ranges;
2691 			reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2692 		}
2693 		break;
2694 
2695 	case CHELSIO_T6:
2696 		if (adap->flags & IS_VF) {
2697 			reg_ranges = t6vf_reg_ranges;
2698 			reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2699 		} else {
2700 			reg_ranges = t6_reg_ranges;
2701 			reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2702 		}
2703 		break;
2704 
2705 	default:
2706 		CH_ERR(adap,
2707 			"Unsupported chip version %d\n", chip_version);
2708 		return;
2709 	}
2710 
2711 	/*
2712 	 * Clear the register buffer and insert the appropriate register
2713 	 * values selected by the above register ranges.
2714 	 */
2715 	memset(buf, 0, buf_size);
2716 	for (range = 0; range < reg_ranges_size; range += 2) {
2717 		unsigned int reg = reg_ranges[range];
2718 		unsigned int last_reg = reg_ranges[range + 1];
2719 		u32 *bufp = (u32 *)(buf + reg);
2720 
2721 		/*
2722 		 * Iterate across the register range filling in the register
2723 		 * buffer but don't write past the end of the register buffer.
2724 		 */
2725 		while (reg <= last_reg && bufp < buf_end) {
2726 			*bufp++ = t4_read_reg(adap, reg);
2727 			reg += sizeof(u32);
2728 		}
2729 	}
2730 }
2731 
2732 /*
2733  * Partial EEPROM Vital Product Data structure.  The VPD starts with one ID
2734  * header followed by one or more VPD-R sections, each with its own header.
2735  */
2736 struct t4_vpd_hdr {
2737 	u8  id_tag;
2738 	u8  id_len[2];
2739 	u8  id_data[ID_LEN];
2740 };
2741 
2742 struct t4_vpdr_hdr {
2743 	u8  vpdr_tag;
2744 	u8  vpdr_len[2];
2745 };
2746 
2747 /*
2748  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2749  */
2750 #define EEPROM_DELAY		10		/* 10us per poll spin */
2751 #define EEPROM_MAX_POLL		5000		/* x 5000 == 50ms */
2752 
2753 #define EEPROM_STAT_ADDR	0x7bfc
2754 #define VPD_SIZE		0x800
2755 #define VPD_BASE		0x400
2756 #define VPD_BASE_OLD		0
2757 #define VPD_LEN			1024
2758 #define VPD_INFO_FLD_HDR_SIZE	3
2759 #define CHELSIO_VPD_UNIQUE_ID	0x82
2760 
2761 /*
2762  * Small utility function to wait till any outstanding VPD Access is complete.
2763  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2764  * VPD Access in flight.  This allows us to handle the problem of having a
2765  * previous VPD Access time out and prevent an attempt to inject a new VPD
2766  * Request before any in-flight VPD reguest has completed.
2767  */
2768 static int t4_seeprom_wait(struct adapter *adapter)
2769 {
2770 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2771 	int max_poll;
2772 
2773 	/*
2774 	 * If no VPD Access is in flight, we can just return success right
2775 	 * away.
2776 	 */
2777 	if (!adapter->vpd_busy)
2778 		return 0;
2779 
2780 	/*
2781 	 * Poll the VPD Capability Address/Flag register waiting for it
2782 	 * to indicate that the operation is complete.
2783 	 */
2784 	max_poll = EEPROM_MAX_POLL;
2785 	do {
2786 		u16 val;
2787 
2788 		udelay(EEPROM_DELAY);
2789 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2790 
2791 		/*
2792 		 * If the operation is complete, mark the VPD as no longer
2793 		 * busy and return success.
2794 		 */
2795 		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2796 			adapter->vpd_busy = 0;
2797 			return 0;
2798 		}
2799 	} while (--max_poll);
2800 
2801 	/*
2802 	 * Failure!  Note that we leave the VPD Busy status set in order to
2803 	 * avoid pushing a new VPD Access request into the VPD Capability till
2804 	 * the current operation eventually succeeds.  It's a bug to issue a
2805 	 * new request when an existing request is in flight and will result
2806 	 * in corrupt hardware state.
2807 	 */
2808 	return -ETIMEDOUT;
2809 }
2810 
2811 /**
2812  *	t4_seeprom_read - read a serial EEPROM location
2813  *	@adapter: adapter to read
2814  *	@addr: EEPROM virtual address
2815  *	@data: where to store the read data
2816  *
2817  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
2818  *	VPD capability.  Note that this function must be called with a virtual
2819  *	address.
2820  */
2821 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2822 {
2823 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2824 	int ret;
2825 
2826 	/*
2827 	 * VPD Accesses must alway be 4-byte aligned!
2828 	 */
2829 	if (addr >= EEPROMVSIZE || (addr & 3))
2830 		return -EINVAL;
2831 
2832 	/*
2833 	 * Wait for any previous operation which may still be in flight to
2834 	 * complete.
2835 	 */
2836 	ret = t4_seeprom_wait(adapter);
2837 	if (ret) {
2838 		CH_ERR(adapter, "VPD still busy from previous operation\n");
2839 		return ret;
2840 	}
2841 
2842 	/*
2843 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2844 	 * for our request to complete.  If it doesn't complete, note the
2845 	 * error and return it to our caller.  Note that we do not reset the
2846 	 * VPD Busy status!
2847 	 */
2848 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2849 	adapter->vpd_busy = 1;
2850 	adapter->vpd_flag = PCI_VPD_ADDR_F;
2851 	ret = t4_seeprom_wait(adapter);
2852 	if (ret) {
2853 		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2854 		return ret;
2855 	}
2856 
2857 	/*
2858 	 * Grab the returned data, swizzle it into our endianness and
2859 	 * return success.
2860 	 */
2861 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2862 	*data = le32_to_cpu(*data);
2863 	return 0;
2864 }
2865 
2866 /**
2867  *	t4_seeprom_write - write a serial EEPROM location
2868  *	@adapter: adapter to write
2869  *	@addr: virtual EEPROM address
2870  *	@data: value to write
2871  *
2872  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
2873  *	VPD capability.  Note that this function must be called with a virtual
2874  *	address.
2875  */
2876 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2877 {
2878 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2879 	int ret;
2880 	u32 stats_reg;
2881 	int max_poll;
2882 
2883 	/*
2884 	 * VPD Accesses must alway be 4-byte aligned!
2885 	 */
2886 	if (addr >= EEPROMVSIZE || (addr & 3))
2887 		return -EINVAL;
2888 
2889 	/*
2890 	 * Wait for any previous operation which may still be in flight to
2891 	 * complete.
2892 	 */
2893 	ret = t4_seeprom_wait(adapter);
2894 	if (ret) {
2895 		CH_ERR(adapter, "VPD still busy from previous operation\n");
2896 		return ret;
2897 	}
2898 
2899 	/*
2900 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2901 	 * for our request to complete.  If it doesn't complete, note the
2902 	 * error and return it to our caller.  Note that we do not reset the
2903 	 * VPD Busy status!
2904 	 */
2905 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2906 				 cpu_to_le32(data));
2907 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2908 				 (u16)addr | PCI_VPD_ADDR_F);
2909 	adapter->vpd_busy = 1;
2910 	adapter->vpd_flag = 0;
2911 	ret = t4_seeprom_wait(adapter);
2912 	if (ret) {
2913 		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2914 		return ret;
2915 	}
2916 
2917 	/*
2918 	 * Reset PCI_VPD_DATA register after a transaction and wait for our
2919 	 * request to complete. If it doesn't complete, return error.
2920 	 */
2921 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2922 	max_poll = EEPROM_MAX_POLL;
2923 	do {
2924 		udelay(EEPROM_DELAY);
2925 		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2926 	} while ((stats_reg & 0x1) && --max_poll);
2927 	if (!max_poll)
2928 		return -ETIMEDOUT;
2929 
2930 	/* Return success! */
2931 	return 0;
2932 }
2933 
2934 /**
2935  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
2936  *	@phys_addr: the physical EEPROM address
2937  *	@fn: the PCI function number
2938  *	@sz: size of function-specific area
2939  *
2940  *	Translate a physical EEPROM address to virtual.  The first 1K is
2941  *	accessed through virtual addresses starting at 31K, the rest is
2942  *	accessed through virtual addresses starting at 0.
2943  *
2944  *	The mapping is as follows:
2945  *	[0..1K) -> [31K..32K)
2946  *	[1K..1K+A) -> [ES-A..ES)
2947  *	[1K+A..ES) -> [0..ES-A-1K)
2948  *
2949  *	where A = @fn * @sz, and ES = EEPROM size.
2950  */
2951 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2952 {
2953 	fn *= sz;
2954 	if (phys_addr < 1024)
2955 		return phys_addr + (31 << 10);
2956 	if (phys_addr < 1024 + fn)
2957 		return EEPROMSIZE - fn + phys_addr - 1024;
2958 	if (phys_addr < EEPROMSIZE)
2959 		return phys_addr - 1024 - fn;
2960 	return -EINVAL;
2961 }
2962 
2963 /**
2964  *	t4_seeprom_wp - enable/disable EEPROM write protection
2965  *	@adapter: the adapter
2966  *	@enable: whether to enable or disable write protection
2967  *
2968  *	Enables or disables write protection on the serial EEPROM.
2969  */
2970 int t4_seeprom_wp(struct adapter *adapter, int enable)
2971 {
2972 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2973 }
2974 
2975 /**
2976  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
2977  *	@vpd: Pointer to buffered vpd data structure
2978  *	@kw: The keyword to search for
2979  *	@region: VPD region to search (starting from 0)
2980  *
2981  *	Returns the value of the information field keyword or
2982  *	-ENOENT otherwise.
2983  */
2984 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
2985 {
2986 	int i, tag;
2987 	unsigned int offset, len;
2988 	const struct t4_vpdr_hdr *vpdr;
2989 
2990 	offset = sizeof(struct t4_vpd_hdr);
2991 	vpdr = (const void *)(vpd + offset);
2992 	tag = vpdr->vpdr_tag;
2993 	len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2994 	while (region--) {
2995 		offset += sizeof(struct t4_vpdr_hdr) + len;
2996 		vpdr = (const void *)(vpd + offset);
2997 		if (++tag != vpdr->vpdr_tag)
2998 			return -ENOENT;
2999 		len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
3000 	}
3001 	offset += sizeof(struct t4_vpdr_hdr);
3002 
3003 	if (offset + len > VPD_LEN) {
3004 		return -ENOENT;
3005 	}
3006 
3007 	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3008 		if (memcmp(vpd + i , kw , 2) == 0){
3009 			i += VPD_INFO_FLD_HDR_SIZE;
3010 			return i;
3011 		}
3012 
3013 		i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
3014 	}
3015 
3016 	return -ENOENT;
3017 }
3018 
3019 
3020 /**
3021  *	get_vpd_params - read VPD parameters from VPD EEPROM
3022  *	@adapter: adapter to read
3023  *	@p: where to store the parameters
3024  *	@vpd: caller provided temporary space to read the VPD into
3025  *
3026  *	Reads card parameters stored in VPD EEPROM.
3027  */
3028 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
3029     uint16_t device_id, u32 *buf)
3030 {
3031 	int i, ret, addr;
3032 	int ec, sn, pn, na, md;
3033 	u8 csum;
3034 	const u8 *vpd = (const u8 *)buf;
3035 
3036 	/*
3037 	 * Card information normally starts at VPD_BASE but early cards had
3038 	 * it at 0.
3039 	 */
3040 	ret = t4_seeprom_read(adapter, VPD_BASE, buf);
3041 	if (ret)
3042 		return (ret);
3043 
3044 	/*
3045 	 * The VPD shall have a unique identifier specified by the PCI SIG.
3046 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3047 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3048 	 * is expected to automatically put this entry at the
3049 	 * beginning of the VPD.
3050 	 */
3051 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3052 
3053 	for (i = 0; i < VPD_LEN; i += 4) {
3054 		ret = t4_seeprom_read(adapter, addr + i, buf++);
3055 		if (ret)
3056 			return ret;
3057 	}
3058 
3059 #define FIND_VPD_KW(var,name) do { \
3060 	var = get_vpd_keyword_val(vpd, name, 0); \
3061 	if (var < 0) { \
3062 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3063 		return -EINVAL; \
3064 	} \
3065 } while (0)
3066 
3067 	FIND_VPD_KW(i, "RV");
3068 	for (csum = 0; i >= 0; i--)
3069 		csum += vpd[i];
3070 
3071 	if (csum) {
3072 		CH_ERR(adapter,
3073 			"corrupted VPD EEPROM, actual csum %u\n", csum);
3074 		return -EINVAL;
3075 	}
3076 
3077 	FIND_VPD_KW(ec, "EC");
3078 	FIND_VPD_KW(sn, "SN");
3079 	FIND_VPD_KW(pn, "PN");
3080 	FIND_VPD_KW(na, "NA");
3081 #undef FIND_VPD_KW
3082 
3083 	memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
3084 	strstrip(p->id);
3085 	memcpy(p->ec, vpd + ec, EC_LEN);
3086 	strstrip(p->ec);
3087 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3088 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3089 	strstrip(p->sn);
3090 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3091 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3092 	strstrip((char *)p->pn);
3093 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3094 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3095 	strstrip((char *)p->na);
3096 
3097 	if (device_id & 0x80)
3098 		return 0;	/* Custom card */
3099 
3100 	md = get_vpd_keyword_val(vpd, "VF", 1);
3101 	if (md < 0) {
3102 		snprintf(p->md, sizeof(p->md), "unknown");
3103 	} else {
3104 		i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
3105 		memcpy(p->md, vpd + md, min(i, MD_LEN));
3106 		strstrip((char *)p->md);
3107 	}
3108 
3109 	return 0;
3110 }
3111 
3112 /* serial flash and firmware constants and flash config file constants */
3113 enum {
3114 	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3115 
3116 	/* flash command opcodes */
3117 	SF_PROG_PAGE    = 2,	/* program 256B page */
3118 	SF_WR_DISABLE   = 4,	/* disable writes */
3119 	SF_RD_STATUS    = 5,	/* read status register */
3120 	SF_WR_ENABLE    = 6,	/* enable writes */
3121 	SF_RD_DATA_FAST = 0xb,	/* read flash */
3122 	SF_RD_ID	= 0x9f,	/* read ID */
3123 	SF_ERASE_SECTOR = 0xd8,	/* erase 64KB sector */
3124 };
3125 
3126 /**
3127  *	sf1_read - read data from the serial flash
3128  *	@adapter: the adapter
3129  *	@byte_cnt: number of bytes to read
3130  *	@cont: whether another operation will be chained
3131  *	@lock: whether to lock SF for PL access only
3132  *	@valp: where to store the read data
3133  *
3134  *	Reads up to 4 bytes of data from the serial flash.  The location of
3135  *	the read needs to be specified prior to calling this by issuing the
3136  *	appropriate commands to the serial flash.
3137  */
3138 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3139 		    int lock, u32 *valp)
3140 {
3141 	int ret;
3142 
3143 	if (!byte_cnt || byte_cnt > 4)
3144 		return -EINVAL;
3145 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3146 		return -EBUSY;
3147 	t4_write_reg(adapter, A_SF_OP,
3148 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3149 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3150 	if (!ret)
3151 		*valp = t4_read_reg(adapter, A_SF_DATA);
3152 	return ret;
3153 }
3154 
3155 /**
3156  *	sf1_write - write data to the serial flash
3157  *	@adapter: the adapter
3158  *	@byte_cnt: number of bytes to write
3159  *	@cont: whether another operation will be chained
3160  *	@lock: whether to lock SF for PL access only
3161  *	@val: value to write
3162  *
3163  *	Writes up to 4 bytes of data to the serial flash.  The location of
3164  *	the write needs to be specified prior to calling this by issuing the
3165  *	appropriate commands to the serial flash.
3166  */
3167 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3168 		     int lock, u32 val)
3169 {
3170 	if (!byte_cnt || byte_cnt > 4)
3171 		return -EINVAL;
3172 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3173 		return -EBUSY;
3174 	t4_write_reg(adapter, A_SF_DATA, val);
3175 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3176 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3177 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3178 }
3179 
3180 /**
3181  *	flash_wait_op - wait for a flash operation to complete
3182  *	@adapter: the adapter
3183  *	@attempts: max number of polls of the status register
3184  *	@delay: delay between polls in ms
3185  *
3186  *	Wait for a flash operation to complete by polling the status register.
3187  */
3188 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3189 {
3190 	int ret;
3191 	u32 status;
3192 
3193 	while (1) {
3194 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3195 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3196 			return ret;
3197 		if (!(status & 1))
3198 			return 0;
3199 		if (--attempts == 0)
3200 			return -EAGAIN;
3201 		if (delay)
3202 			msleep(delay);
3203 	}
3204 }
3205 
3206 /**
3207  *	t4_read_flash - read words from serial flash
3208  *	@adapter: the adapter
3209  *	@addr: the start address for the read
3210  *	@nwords: how many 32-bit words to read
3211  *	@data: where to store the read data
3212  *	@byte_oriented: whether to store data as bytes or as words
3213  *
3214  *	Read the specified number of 32-bit words from the serial flash.
3215  *	If @byte_oriented is set the read data is stored as a byte array
3216  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3217  *	natural endianness.
3218  */
3219 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3220 		  unsigned int nwords, u32 *data, int byte_oriented)
3221 {
3222 	int ret;
3223 
3224 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3225 		return -EINVAL;
3226 
3227 	addr = swab32(addr) | SF_RD_DATA_FAST;
3228 
3229 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3230 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3231 		return ret;
3232 
3233 	for ( ; nwords; nwords--, data++) {
3234 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3235 		if (nwords == 1)
3236 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3237 		if (ret)
3238 			return ret;
3239 		if (byte_oriented)
3240 			*data = (__force __u32)(cpu_to_be32(*data));
3241 	}
3242 	return 0;
3243 }
3244 
3245 /**
3246  *	t4_write_flash - write up to a page of data to the serial flash
3247  *	@adapter: the adapter
3248  *	@addr: the start address to write
3249  *	@n: length of data to write in bytes
3250  *	@data: the data to write
3251  *	@byte_oriented: whether to store data as bytes or as words
3252  *
3253  *	Writes up to a page of data (256 bytes) to the serial flash starting
3254  *	at the given address.  All the data must be written to the same page.
3255  *	If @byte_oriented is set the write data is stored as byte stream
3256  *	(i.e. matches what on disk), otherwise in big-endian.
3257  */
3258 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3259 			  unsigned int n, const u8 *data, int byte_oriented)
3260 {
3261 	int ret;
3262 	u32 buf[SF_PAGE_SIZE / 4];
3263 	unsigned int i, c, left, val, offset = addr & 0xff;
3264 
3265 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3266 		return -EINVAL;
3267 
3268 	val = swab32(addr) | SF_PROG_PAGE;
3269 
3270 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3271 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3272 		goto unlock;
3273 
3274 	for (left = n; left; left -= c) {
3275 		c = min(left, 4U);
3276 		for (val = 0, i = 0; i < c; ++i)
3277 			val = (val << 8) + *data++;
3278 
3279 		if (!byte_oriented)
3280 			val = cpu_to_be32(val);
3281 
3282 		ret = sf1_write(adapter, c, c != left, 1, val);
3283 		if (ret)
3284 			goto unlock;
3285 	}
3286 	ret = flash_wait_op(adapter, 8, 1);
3287 	if (ret)
3288 		goto unlock;
3289 
3290 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3291 
3292 	/* Read the page to verify the write succeeded */
3293 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3294 			    byte_oriented);
3295 	if (ret)
3296 		return ret;
3297 
3298 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3299 		CH_ERR(adapter,
3300 			"failed to correctly write the flash page at %#x\n",
3301 			addr);
3302 		return -EIO;
3303 	}
3304 	return 0;
3305 
3306 unlock:
3307 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3308 	return ret;
3309 }
3310 
3311 /**
3312  *	t4_get_fw_version - read the firmware version
3313  *	@adapter: the adapter
3314  *	@vers: where to place the version
3315  *
3316  *	Reads the FW version from flash.
3317  */
3318 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3319 {
3320 	return t4_read_flash(adapter, FLASH_FW_START +
3321 			     offsetof(struct fw_hdr, fw_ver), 1,
3322 			     vers, 0);
3323 }
3324 
3325 /**
3326  *	t4_get_fw_hdr - read the firmware header
3327  *	@adapter: the adapter
3328  *	@hdr: where to place the version
3329  *
3330  *	Reads the FW header from flash into caller provided buffer.
3331  */
3332 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
3333 {
3334 	return t4_read_flash(adapter, FLASH_FW_START,
3335 	    sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
3336 }
3337 
3338 /**
3339  *	t4_get_bs_version - read the firmware bootstrap version
3340  *	@adapter: the adapter
3341  *	@vers: where to place the version
3342  *
3343  *	Reads the FW Bootstrap version from flash.
3344  */
3345 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3346 {
3347 	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3348 			     offsetof(struct fw_hdr, fw_ver), 1,
3349 			     vers, 0);
3350 }
3351 
3352 /**
3353  *	t4_get_tp_version - read the TP microcode version
3354  *	@adapter: the adapter
3355  *	@vers: where to place the version
3356  *
3357  *	Reads the TP microcode version from flash.
3358  */
3359 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3360 {
3361 	return t4_read_flash(adapter, FLASH_FW_START +
3362 			     offsetof(struct fw_hdr, tp_microcode_ver),
3363 			     1, vers, 0);
3364 }
3365 
3366 /**
3367  *	t4_get_exprom_version - return the Expansion ROM version (if any)
3368  *	@adapter: the adapter
3369  *	@vers: where to place the version
3370  *
3371  *	Reads the Expansion ROM header from FLASH and returns the version
3372  *	number (if present) through the @vers return value pointer.  We return
3373  *	this in the Firmware Version Format since it's convenient.  Return
3374  *	0 on success, -ENOENT if no Expansion ROM is present.
3375  */
3376 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3377 {
3378 	struct exprom_header {
3379 		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3380 		unsigned char hdr_ver[4];	/* Expansion ROM version */
3381 	} *hdr;
3382 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3383 					   sizeof(u32))];
3384 	int ret;
3385 
3386 	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3387 			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3388 			    0);
3389 	if (ret)
3390 		return ret;
3391 
3392 	hdr = (struct exprom_header *)exprom_header_buf;
3393 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3394 		return -ENOENT;
3395 
3396 	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3397 		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3398 		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3399 		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3400 	return 0;
3401 }
3402 
3403 /**
3404  *	t4_get_scfg_version - return the Serial Configuration version
3405  *	@adapter: the adapter
3406  *	@vers: where to place the version
3407  *
3408  *	Reads the Serial Configuration Version via the Firmware interface
3409  *	(thus this can only be called once we're ready to issue Firmware
3410  *	commands).  The format of the Serial Configuration version is
3411  *	adapter specific.  Returns 0 on success, an error on failure.
3412  *
3413  *	Note that early versions of the Firmware didn't include the ability
3414  *	to retrieve the Serial Configuration version, so we zero-out the
3415  *	return-value parameter in that case to avoid leaving it with
3416  *	garbage in it.
3417  *
3418  *	Also note that the Firmware will return its cached copy of the Serial
3419  *	Initialization Revision ID, not the actual Revision ID as written in
3420  *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3421  *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3422  *	it's best to defer calling this routine till after a FW_RESET_CMD has
3423  *	been issued if the Host Driver will be performing a full adapter
3424  *	initialization.
3425  */
3426 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3427 {
3428 	u32 scfgrev_param;
3429 	int ret;
3430 
3431 	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3432 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3433 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3434 			      1, &scfgrev_param, vers);
3435 	if (ret)
3436 		*vers = 0;
3437 	return ret;
3438 }
3439 
3440 /**
3441  *	t4_get_vpd_version - return the VPD version
3442  *	@adapter: the adapter
3443  *	@vers: where to place the version
3444  *
3445  *	Reads the VPD via the Firmware interface (thus this can only be called
3446  *	once we're ready to issue Firmware commands).  The format of the
3447  *	VPD version is adapter specific.  Returns 0 on success, an error on
3448  *	failure.
3449  *
3450  *	Note that early versions of the Firmware didn't include the ability
3451  *	to retrieve the VPD version, so we zero-out the return-value parameter
3452  *	in that case to avoid leaving it with garbage in it.
3453  *
3454  *	Also note that the Firmware will return its cached copy of the VPD
3455  *	Revision ID, not the actual Revision ID as written in the Serial
3456  *	EEPROM.  This is only an issue if a new VPD has been written and the
3457  *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3458  *	to defer calling this routine till after a FW_RESET_CMD has been issued
3459  *	if the Host Driver will be performing a full adapter initialization.
3460  */
3461 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3462 {
3463 	u32 vpdrev_param;
3464 	int ret;
3465 
3466 	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3467 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3468 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3469 			      1, &vpdrev_param, vers);
3470 	if (ret)
3471 		*vers = 0;
3472 	return ret;
3473 }
3474 
3475 /**
3476  *	t4_get_version_info - extract various chip/firmware version information
3477  *	@adapter: the adapter
3478  *
3479  *	Reads various chip/firmware version numbers and stores them into the
3480  *	adapter Adapter Parameters structure.  If any of the efforts fails
3481  *	the first failure will be returned, but all of the version numbers
3482  *	will be read.
3483  */
3484 int t4_get_version_info(struct adapter *adapter)
3485 {
3486 	int ret = 0;
3487 
3488 	#define FIRST_RET(__getvinfo) \
3489 	do { \
3490 		int __ret = __getvinfo; \
3491 		if (__ret && !ret) \
3492 			ret = __ret; \
3493 	} while (0)
3494 
3495 	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3496 	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3497 	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3498 	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3499 	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3500 	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3501 
3502 	#undef FIRST_RET
3503 
3504 	return ret;
3505 }
3506 
3507 /**
3508  *	t4_flash_erase_sectors - erase a range of flash sectors
3509  *	@adapter: the adapter
3510  *	@start: the first sector to erase
3511  *	@end: the last sector to erase
3512  *
3513  *	Erases the sectors in the given inclusive range.
3514  */
3515 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3516 {
3517 	int ret = 0;
3518 
3519 	if (end >= adapter->params.sf_nsec)
3520 		return -EINVAL;
3521 
3522 	while (start <= end) {
3523 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3524 		    (ret = sf1_write(adapter, 4, 0, 1,
3525 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
3526 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3527 			CH_ERR(adapter,
3528 				"erase of flash sector %d failed, error %d\n",
3529 				start, ret);
3530 			break;
3531 		}
3532 		start++;
3533 	}
3534 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3535 	return ret;
3536 }
3537 
3538 /**
3539  *	t4_flash_cfg_addr - return the address of the flash configuration file
3540  *	@adapter: the adapter
3541  *
3542  *	Return the address within the flash where the Firmware Configuration
3543  *	File is stored, or an error if the device FLASH is too small to contain
3544  *	a Firmware Configuration File.
3545  */
3546 int t4_flash_cfg_addr(struct adapter *adapter)
3547 {
3548 	/*
3549 	 * If the device FLASH isn't large enough to hold a Firmware
3550 	 * Configuration File, return an error.
3551 	 */
3552 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3553 		return -ENOSPC;
3554 
3555 	return FLASH_CFG_START;
3556 }
3557 
3558 /*
3559  * Return TRUE if the specified firmware matches the adapter.  I.e. T4
3560  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
3561  * and emit an error message for mismatched firmware to save our caller the
3562  * effort ...
3563  */
3564 static int t4_fw_matches_chip(struct adapter *adap,
3565 			      const struct fw_hdr *hdr)
3566 {
3567 	/*
3568 	 * The expression below will return FALSE for any unsupported adapter
3569 	 * which will keep us "honest" in the future ...
3570 	 */
3571 	if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3572 	    (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3573 	    (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3574 		return 1;
3575 
3576 	CH_ERR(adap,
3577 		"FW image (%d) is not suitable for this adapter (%d)\n",
3578 		hdr->chip, chip_id(adap));
3579 	return 0;
3580 }
3581 
3582 /**
3583  *	t4_load_fw - download firmware
3584  *	@adap: the adapter
3585  *	@fw_data: the firmware image to write
3586  *	@size: image size
3587  *
3588  *	Write the supplied firmware image to the card's serial flash.
3589  */
3590 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3591 {
3592 	u32 csum;
3593 	int ret, addr;
3594 	unsigned int i;
3595 	u8 first_page[SF_PAGE_SIZE];
3596 	const u32 *p = (const u32 *)fw_data;
3597 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3598 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3599 	unsigned int fw_start_sec;
3600 	unsigned int fw_start;
3601 	unsigned int fw_size;
3602 
3603 	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3604 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3605 		fw_start = FLASH_FWBOOTSTRAP_START;
3606 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3607 	} else {
3608 		fw_start_sec = FLASH_FW_START_SEC;
3609  		fw_start = FLASH_FW_START;
3610 		fw_size = FLASH_FW_MAX_SIZE;
3611 	}
3612 
3613 	if (!size) {
3614 		CH_ERR(adap, "FW image has no data\n");
3615 		return -EINVAL;
3616 	}
3617 	if (size & 511) {
3618 		CH_ERR(adap,
3619 			"FW image size not multiple of 512 bytes\n");
3620 		return -EINVAL;
3621 	}
3622 	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3623 		CH_ERR(adap,
3624 			"FW image size differs from size in FW header\n");
3625 		return -EINVAL;
3626 	}
3627 	if (size > fw_size) {
3628 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
3629 			fw_size);
3630 		return -EFBIG;
3631 	}
3632 	if (!t4_fw_matches_chip(adap, hdr))
3633 		return -EINVAL;
3634 
3635 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3636 		csum += be32_to_cpu(p[i]);
3637 
3638 	if (csum != 0xffffffff) {
3639 		CH_ERR(adap,
3640 			"corrupted firmware image, checksum %#x\n", csum);
3641 		return -EINVAL;
3642 	}
3643 
3644 	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
3645 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3646 	if (ret)
3647 		goto out;
3648 
3649 	/*
3650 	 * We write the correct version at the end so the driver can see a bad
3651 	 * version if the FW write fails.  Start by writing a copy of the
3652 	 * first page with a bad version.
3653 	 */
3654 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
3655 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3656 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3657 	if (ret)
3658 		goto out;
3659 
3660 	addr = fw_start;
3661 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3662 		addr += SF_PAGE_SIZE;
3663 		fw_data += SF_PAGE_SIZE;
3664 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3665 		if (ret)
3666 			goto out;
3667 	}
3668 
3669 	ret = t4_write_flash(adap,
3670 			     fw_start + offsetof(struct fw_hdr, fw_ver),
3671 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3672 out:
3673 	if (ret)
3674 		CH_ERR(adap, "firmware download failed, error %d\n",
3675 			ret);
3676 	return ret;
3677 }
3678 
3679 /**
3680  *	t4_fwcache - firmware cache operation
3681  *	@adap: the adapter
3682  *	@op  : the operation (flush or flush and invalidate)
3683  */
3684 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3685 {
3686 	struct fw_params_cmd c;
3687 
3688 	memset(&c, 0, sizeof(c));
3689 	c.op_to_vfn =
3690 	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3691 			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3692 				V_FW_PARAMS_CMD_PFN(adap->pf) |
3693 				V_FW_PARAMS_CMD_VFN(0));
3694 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3695 	c.param[0].mnem =
3696 	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3697 			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3698 	c.param[0].val = (__force __be32)op;
3699 
3700 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3701 }
3702 
3703 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3704 			unsigned int *pif_req_wrptr,
3705 			unsigned int *pif_rsp_wrptr)
3706 {
3707 	int i, j;
3708 	u32 cfg, val, req, rsp;
3709 
3710 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3711 	if (cfg & F_LADBGEN)
3712 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3713 
3714 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3715 	req = G_POLADBGWRPTR(val);
3716 	rsp = G_PILADBGWRPTR(val);
3717 	if (pif_req_wrptr)
3718 		*pif_req_wrptr = req;
3719 	if (pif_rsp_wrptr)
3720 		*pif_rsp_wrptr = rsp;
3721 
3722 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3723 		for (j = 0; j < 6; j++) {
3724 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3725 				     V_PILADBGRDPTR(rsp));
3726 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3727 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3728 			req++;
3729 			rsp++;
3730 		}
3731 		req = (req + 2) & M_POLADBGRDPTR;
3732 		rsp = (rsp + 2) & M_PILADBGRDPTR;
3733 	}
3734 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3735 }
3736 
3737 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3738 {
3739 	u32 cfg;
3740 	int i, j, idx;
3741 
3742 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3743 	if (cfg & F_LADBGEN)
3744 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3745 
3746 	for (i = 0; i < CIM_MALA_SIZE; i++) {
3747 		for (j = 0; j < 5; j++) {
3748 			idx = 8 * i + j;
3749 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3750 				     V_PILADBGRDPTR(idx));
3751 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3752 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3753 		}
3754 	}
3755 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3756 }
3757 
3758 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3759 {
3760 	unsigned int i, j;
3761 
3762 	for (i = 0; i < 8; i++) {
3763 		u32 *p = la_buf + i;
3764 
3765 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3766 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3767 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3768 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3769 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3770 	}
3771 }
3772 
3773 /**
3774  *	fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3775  *	@caps16: a 16-bit Port Capabilities value
3776  *
3777  *	Returns the equivalent 32-bit Port Capabilities value.
3778  */
3779 static uint32_t fwcaps16_to_caps32(uint16_t caps16)
3780 {
3781 	uint32_t caps32 = 0;
3782 
3783 	#define CAP16_TO_CAP32(__cap) \
3784 		do { \
3785 			if (caps16 & FW_PORT_CAP_##__cap) \
3786 				caps32 |= FW_PORT_CAP32_##__cap; \
3787 		} while (0)
3788 
3789 	CAP16_TO_CAP32(SPEED_100M);
3790 	CAP16_TO_CAP32(SPEED_1G);
3791 	CAP16_TO_CAP32(SPEED_25G);
3792 	CAP16_TO_CAP32(SPEED_10G);
3793 	CAP16_TO_CAP32(SPEED_40G);
3794 	CAP16_TO_CAP32(SPEED_100G);
3795 	CAP16_TO_CAP32(FC_RX);
3796 	CAP16_TO_CAP32(FC_TX);
3797 	CAP16_TO_CAP32(ANEG);
3798 	CAP16_TO_CAP32(FORCE_PAUSE);
3799 	CAP16_TO_CAP32(MDIAUTO);
3800 	CAP16_TO_CAP32(MDISTRAIGHT);
3801 	CAP16_TO_CAP32(FEC_RS);
3802 	CAP16_TO_CAP32(FEC_BASER_RS);
3803 	CAP16_TO_CAP32(802_3_PAUSE);
3804 	CAP16_TO_CAP32(802_3_ASM_DIR);
3805 
3806 	#undef CAP16_TO_CAP32
3807 
3808 	return caps32;
3809 }
3810 
3811 /**
3812  *	fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3813  *	@caps32: a 32-bit Port Capabilities value
3814  *
3815  *	Returns the equivalent 16-bit Port Capabilities value.  Note that
3816  *	not all 32-bit Port Capabilities can be represented in the 16-bit
3817  *	Port Capabilities and some fields/values may not make it.
3818  */
3819 static uint16_t fwcaps32_to_caps16(uint32_t caps32)
3820 {
3821 	uint16_t caps16 = 0;
3822 
3823 	#define CAP32_TO_CAP16(__cap) \
3824 		do { \
3825 			if (caps32 & FW_PORT_CAP32_##__cap) \
3826 				caps16 |= FW_PORT_CAP_##__cap; \
3827 		} while (0)
3828 
3829 	CAP32_TO_CAP16(SPEED_100M);
3830 	CAP32_TO_CAP16(SPEED_1G);
3831 	CAP32_TO_CAP16(SPEED_10G);
3832 	CAP32_TO_CAP16(SPEED_25G);
3833 	CAP32_TO_CAP16(SPEED_40G);
3834 	CAP32_TO_CAP16(SPEED_100G);
3835 	CAP32_TO_CAP16(FC_RX);
3836 	CAP32_TO_CAP16(FC_TX);
3837 	CAP32_TO_CAP16(802_3_PAUSE);
3838 	CAP32_TO_CAP16(802_3_ASM_DIR);
3839 	CAP32_TO_CAP16(ANEG);
3840 	CAP32_TO_CAP16(FORCE_PAUSE);
3841 	CAP32_TO_CAP16(MDIAUTO);
3842 	CAP32_TO_CAP16(MDISTRAIGHT);
3843 	CAP32_TO_CAP16(FEC_RS);
3844 	CAP32_TO_CAP16(FEC_BASER_RS);
3845 
3846 	#undef CAP32_TO_CAP16
3847 
3848 	return caps16;
3849 }
3850 
3851 static bool
3852 is_bt(struct port_info *pi)
3853 {
3854 
3855 	return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
3856 	    pi->port_type == FW_PORT_TYPE_BT_XFI ||
3857 	    pi->port_type == FW_PORT_TYPE_BT_XAUI);
3858 }
3859 
3860 /**
3861  *	t4_link_l1cfg - apply link configuration to MAC/PHY
3862  *	@phy: the PHY to setup
3863  *	@mac: the MAC to setup
3864  *	@lc: the requested link configuration
3865  *
3866  *	Set up a port's MAC and PHY according to a desired link configuration.
3867  *	- If the PHY can auto-negotiate first decide what to advertise, then
3868  *	  enable/disable auto-negotiation as desired, and reset.
3869  *	- If the PHY does not auto-negotiate just reset it.
3870  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3871  *	  otherwise do it later based on the outcome of auto-negotiation.
3872  */
3873 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3874 		  struct link_config *lc)
3875 {
3876 	struct fw_port_cmd c;
3877 	unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
3878 	unsigned int aneg, fc, fec, speed, rcap;
3879 
3880 	fc = 0;
3881 	if (lc->requested_fc & PAUSE_RX)
3882 		fc |= FW_PORT_CAP32_FC_RX;
3883 	if (lc->requested_fc & PAUSE_TX)
3884 		fc |= FW_PORT_CAP32_FC_TX;
3885 	if (!(lc->requested_fc & PAUSE_AUTONEG))
3886 		fc |= FW_PORT_CAP32_FORCE_PAUSE;
3887 
3888 	fec = 0;
3889 	if (lc->requested_fec == FEC_AUTO)
3890 		fec = lc->fec_hint;
3891 	else {
3892 		if (lc->requested_fec & FEC_RS)
3893 			fec |= FW_PORT_CAP32_FEC_RS;
3894 		if (lc->requested_fec & FEC_BASER_RS)
3895 			fec |= FW_PORT_CAP32_FEC_BASER_RS;
3896 	}
3897 
3898 	if (lc->requested_aneg == AUTONEG_DISABLE)
3899 		aneg = 0;
3900 	else if (lc->requested_aneg == AUTONEG_ENABLE)
3901 		aneg = FW_PORT_CAP32_ANEG;
3902 	else
3903 		aneg = lc->supported & FW_PORT_CAP32_ANEG;
3904 
3905 	if (aneg) {
3906 		speed = lc->supported & V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
3907 	} else if (lc->requested_speed != 0)
3908 		speed = speed_to_fwcap(lc->requested_speed);
3909 	else
3910 		speed = fwcap_top_speed(lc->supported);
3911 
3912 	/* Force AN on for BT cards. */
3913 	if (is_bt(adap->port[port]))
3914 		aneg = lc->supported & FW_PORT_CAP32_ANEG;
3915 
3916 	rcap = aneg | speed | fc | fec;
3917 	if ((rcap | lc->supported) != lc->supported) {
3918 #ifdef INVARIANTS
3919 		CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3920 		    lc->supported);
3921 #endif
3922 		rcap &= lc->supported;
3923 	}
3924 	rcap |= mdi;
3925 
3926 	memset(&c, 0, sizeof(c));
3927 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3928 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3929 				     V_FW_PORT_CMD_PORTID(port));
3930 	if (adap->params.port_caps32) {
3931 		c.action_to_len16 =
3932 		    cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
3933 			FW_LEN16(c));
3934 		c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
3935 	} else {
3936 		c.action_to_len16 =
3937 		    cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3938 			    FW_LEN16(c));
3939 		c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
3940 	}
3941 
3942 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3943 }
3944 
3945 /**
3946  *	t4_restart_aneg - restart autonegotiation
3947  *	@adap: the adapter
3948  *	@mbox: mbox to use for the FW command
3949  *	@port: the port id
3950  *
3951  *	Restarts autonegotiation for the selected port.
3952  */
3953 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3954 {
3955 	struct fw_port_cmd c;
3956 
3957 	memset(&c, 0, sizeof(c));
3958 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3959 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3960 				     V_FW_PORT_CMD_PORTID(port));
3961 	c.action_to_len16 =
3962 		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3963 			    FW_LEN16(c));
3964 	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3965 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3966 }
3967 
3968 typedef void (*int_handler_t)(struct adapter *adap);
3969 
3970 struct intr_info {
3971 	unsigned int mask;	/* bits to check in interrupt status */
3972 	const char *msg;	/* message to print or NULL */
3973 	short stat_idx;		/* stat counter to increment or -1 */
3974 	unsigned short fatal;	/* whether the condition reported is fatal */
3975 	int_handler_t int_handler;	/* platform-specific int handler */
3976 };
3977 
3978 /**
3979  *	t4_handle_intr_status - table driven interrupt handler
3980  *	@adapter: the adapter that generated the interrupt
3981  *	@reg: the interrupt status register to process
3982  *	@acts: table of interrupt actions
3983  *
3984  *	A table driven interrupt handler that applies a set of masks to an
3985  *	interrupt status word and performs the corresponding actions if the
3986  *	interrupts described by the mask have occurred.  The actions include
3987  *	optionally emitting a warning or alert message.  The table is terminated
3988  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
3989  *	conditions.
3990  */
3991 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3992 				 const struct intr_info *acts)
3993 {
3994 	int fatal = 0;
3995 	unsigned int mask = 0;
3996 	unsigned int status = t4_read_reg(adapter, reg);
3997 
3998 	for ( ; acts->mask; ++acts) {
3999 		if (!(status & acts->mask))
4000 			continue;
4001 		if (acts->fatal) {
4002 			fatal++;
4003 			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4004 				  status & acts->mask);
4005 		} else if (acts->msg)
4006 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4007 				 status & acts->mask);
4008 		if (acts->int_handler)
4009 			acts->int_handler(adapter);
4010 		mask |= acts->mask;
4011 	}
4012 	status &= mask;
4013 	if (status)	/* clear processed interrupts */
4014 		t4_write_reg(adapter, reg, status);
4015 	return fatal;
4016 }
4017 
4018 /*
4019  * Interrupt handler for the PCIE module.
4020  */
4021 static void pcie_intr_handler(struct adapter *adapter)
4022 {
4023 	static const struct intr_info sysbus_intr_info[] = {
4024 		{ F_RNPP, "RXNP array parity error", -1, 1 },
4025 		{ F_RPCP, "RXPC array parity error", -1, 1 },
4026 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
4027 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
4028 		{ F_RFTP, "RXFT array parity error", -1, 1 },
4029 		{ 0 }
4030 	};
4031 	static const struct intr_info pcie_port_intr_info[] = {
4032 		{ F_TPCP, "TXPC array parity error", -1, 1 },
4033 		{ F_TNPP, "TXNP array parity error", -1, 1 },
4034 		{ F_TFTP, "TXFT array parity error", -1, 1 },
4035 		{ F_TCAP, "TXCA array parity error", -1, 1 },
4036 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
4037 		{ F_RCAP, "RXCA array parity error", -1, 1 },
4038 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
4039 		{ F_RDPE, "Rx data parity error", -1, 1 },
4040 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
4041 		{ 0 }
4042 	};
4043 	static const struct intr_info pcie_intr_info[] = {
4044 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4045 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4046 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4047 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4048 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4049 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4050 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4051 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4052 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4053 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4054 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4055 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4056 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4057 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4058 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4059 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4060 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4061 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4062 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4063 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4064 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4065 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4066 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4067 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4068 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4069 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4070 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4071 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
4072 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
4073 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4074 		  0 },
4075 		{ 0 }
4076 	};
4077 
4078 	static const struct intr_info t5_pcie_intr_info[] = {
4079 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
4080 		  -1, 1 },
4081 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4082 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4083 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4084 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4085 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4086 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4087 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4088 		  -1, 1 },
4089 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4090 		  -1, 1 },
4091 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4092 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4093 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4094 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4095 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
4096 		  -1, 1 },
4097 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4098 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4099 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4100 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4101 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4102 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4103 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4104 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4105 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4106 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4107 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4108 		  -1, 1 },
4109 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4110 		  -1, 1 },
4111 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4112 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4113 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4114 		{ F_READRSPERR, "Outbound read error", -1,
4115 		  0 },
4116 		{ 0 }
4117 	};
4118 
4119 	int fat;
4120 
4121 	if (is_t4(adapter))
4122 		fat = t4_handle_intr_status(adapter,
4123 				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4124 				sysbus_intr_info) +
4125 			t4_handle_intr_status(adapter,
4126 					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4127 					pcie_port_intr_info) +
4128 			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4129 					      pcie_intr_info);
4130 	else
4131 		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4132 					    t5_pcie_intr_info);
4133 	if (fat)
4134 		t4_fatal_err(adapter);
4135 }
4136 
4137 /*
4138  * TP interrupt handler.
4139  */
4140 static void tp_intr_handler(struct adapter *adapter)
4141 {
4142 	static const struct intr_info tp_intr_info[] = {
4143 		{ 0x3fffffff, "TP parity error", -1, 1 },
4144 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4145 		{ 0 }
4146 	};
4147 
4148 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4149 		t4_fatal_err(adapter);
4150 }
4151 
4152 /*
4153  * SGE interrupt handler.
4154  */
4155 static void sge_intr_handler(struct adapter *adapter)
4156 {
4157 	u64 v;
4158 	u32 err;
4159 
4160 	static const struct intr_info sge_intr_info[] = {
4161 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
4162 		  "SGE received CPL exceeding IQE size", -1, 1 },
4163 		{ F_ERR_INVALID_CIDX_INC,
4164 		  "SGE GTS CIDX increment too large", -1, 0 },
4165 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4166 		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4167 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4168 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
4169 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4170 		  0 },
4171 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4172 		  0 },
4173 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4174 		  0 },
4175 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4176 		  0 },
4177 		{ F_ERR_ING_CTXT_PRIO,
4178 		  "SGE too many priority ingress contexts", -1, 0 },
4179 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4180 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4181 		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4182 		  F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4183 		  "SGE PCIe error for a DBP thread", -1, 0 },
4184 		{ 0 }
4185 	};
4186 
4187 	static const struct intr_info t4t5_sge_intr_info[] = {
4188 		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4189 		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4190 		{ F_ERR_EGR_CTXT_PRIO,
4191 		  "SGE too many priority egress contexts", -1, 0 },
4192 		{ 0 }
4193 	};
4194 
4195 	/*
4196  	* For now, treat below interrupts as fatal so that we disable SGE and
4197  	* get better debug */
4198 	static const struct intr_info t6_sge_intr_info[] = {
4199 		{ F_FATAL_WRE_LEN,
4200 		  "SGE Actual WRE packet is less than advertized length",
4201 		  -1, 1 },
4202 		{ 0 }
4203 	};
4204 
4205 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4206 		((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4207 	if (v) {
4208 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4209 				(unsigned long long)v);
4210 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4211 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4212 	}
4213 
4214 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4215 	if (chip_id(adapter) <= CHELSIO_T5)
4216 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4217 					   t4t5_sge_intr_info);
4218 	else
4219 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4220 					   t6_sge_intr_info);
4221 
4222 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4223 	if (err & F_ERROR_QID_VALID) {
4224 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4225 		if (err & F_UNCAPTURED_ERROR)
4226 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4227 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4228 			     F_UNCAPTURED_ERROR);
4229 	}
4230 
4231 	if (v != 0)
4232 		t4_fatal_err(adapter);
4233 }
4234 
4235 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4236 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4237 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4238 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4239 
4240 /*
4241  * CIM interrupt handler.
4242  */
4243 static void cim_intr_handler(struct adapter *adapter)
4244 {
4245 	static const struct intr_info cim_intr_info[] = {
4246 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4247 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4248 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4249 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4250 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4251 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4252 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4253 		{ F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4254 		{ 0 }
4255 	};
4256 	static const struct intr_info cim_upintr_info[] = {
4257 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4258 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4259 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
4260 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
4261 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4262 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4263 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4264 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4265 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4266 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4267 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4268 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4269 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4270 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4271 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4272 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4273 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4274 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4275 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4276 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4277 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4278 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4279 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4280 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4281 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4282 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4283 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4284 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4285 		{ 0 }
4286 	};
4287 	u32 val, fw_err;
4288 	int fat;
4289 
4290 	fw_err = t4_read_reg(adapter, A_PCIE_FW);
4291 	if (fw_err & F_PCIE_FW_ERR)
4292 		t4_report_fw_error(adapter);
4293 
4294 	/* When the Firmware detects an internal error which normally wouldn't
4295 	 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4296 	 * to make sure the Host sees the Firmware Crash.  So if we have a
4297 	 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4298 	 * interrupt.
4299 	 */
4300 	val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4301 	if (val & F_TIMER0INT)
4302 		if (!(fw_err & F_PCIE_FW_ERR) ||
4303 		    (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4304 			t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4305 				     F_TIMER0INT);
4306 
4307 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4308 				    cim_intr_info) +
4309 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4310 				    cim_upintr_info);
4311 	if (fat)
4312 		t4_fatal_err(adapter);
4313 }
4314 
4315 /*
4316  * ULP RX interrupt handler.
4317  */
4318 static void ulprx_intr_handler(struct adapter *adapter)
4319 {
4320 	static const struct intr_info ulprx_intr_info[] = {
4321 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4322 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4323 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4324 		{ 0 }
4325 	};
4326 
4327 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4328 		t4_fatal_err(adapter);
4329 }
4330 
4331 /*
4332  * ULP TX interrupt handler.
4333  */
4334 static void ulptx_intr_handler(struct adapter *adapter)
4335 {
4336 	static const struct intr_info ulptx_intr_info[] = {
4337 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4338 		  0 },
4339 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4340 		  0 },
4341 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4342 		  0 },
4343 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4344 		  0 },
4345 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4346 		{ 0 }
4347 	};
4348 
4349 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4350 		t4_fatal_err(adapter);
4351 }
4352 
4353 /*
4354  * PM TX interrupt handler.
4355  */
4356 static void pmtx_intr_handler(struct adapter *adapter)
4357 {
4358 	static const struct intr_info pmtx_intr_info[] = {
4359 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4360 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4361 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4362 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4363 		{ 0xffffff0, "PMTX framing error", -1, 1 },
4364 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4365 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4366 		  1 },
4367 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4368 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4369 		{ 0 }
4370 	};
4371 
4372 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4373 		t4_fatal_err(adapter);
4374 }
4375 
4376 /*
4377  * PM RX interrupt handler.
4378  */
4379 static void pmrx_intr_handler(struct adapter *adapter)
4380 {
4381 	static const struct intr_info pmrx_intr_info[] = {
4382 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4383 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
4384 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4385 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4386 		  1 },
4387 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4388 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4389 		{ 0 }
4390 	};
4391 
4392 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4393 		t4_fatal_err(adapter);
4394 }
4395 
4396 /*
4397  * CPL switch interrupt handler.
4398  */
4399 static void cplsw_intr_handler(struct adapter *adapter)
4400 {
4401 	static const struct intr_info cplsw_intr_info[] = {
4402 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4403 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4404 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4405 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4406 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4407 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4408 		{ 0 }
4409 	};
4410 
4411 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4412 		t4_fatal_err(adapter);
4413 }
4414 
4415 /*
4416  * LE interrupt handler.
4417  */
4418 static void le_intr_handler(struct adapter *adap)
4419 {
4420 	unsigned int chip_ver = chip_id(adap);
4421 	static const struct intr_info le_intr_info[] = {
4422 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
4423 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
4424 		{ F_PARITYERR, "LE parity error", -1, 1 },
4425 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4426 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
4427 		{ 0 }
4428 	};
4429 
4430 	static const struct intr_info t6_le_intr_info[] = {
4431 		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4432 		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4433 		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
4434 		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4435 		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4436 		{ 0 }
4437 	};
4438 
4439 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4440 				  (chip_ver <= CHELSIO_T5) ?
4441 				  le_intr_info : t6_le_intr_info))
4442 		t4_fatal_err(adap);
4443 }
4444 
4445 /*
4446  * MPS interrupt handler.
4447  */
4448 static void mps_intr_handler(struct adapter *adapter)
4449 {
4450 	static const struct intr_info mps_rx_intr_info[] = {
4451 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
4452 		{ 0 }
4453 	};
4454 	static const struct intr_info mps_tx_intr_info[] = {
4455 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4456 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4457 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4458 		  -1, 1 },
4459 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4460 		  -1, 1 },
4461 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
4462 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4463 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
4464 		{ 0 }
4465 	};
4466 	static const struct intr_info mps_trc_intr_info[] = {
4467 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4468 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4469 		  1 },
4470 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4471 		{ 0 }
4472 	};
4473 	static const struct intr_info mps_stat_sram_intr_info[] = {
4474 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4475 		{ 0 }
4476 	};
4477 	static const struct intr_info mps_stat_tx_intr_info[] = {
4478 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4479 		{ 0 }
4480 	};
4481 	static const struct intr_info mps_stat_rx_intr_info[] = {
4482 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4483 		{ 0 }
4484 	};
4485 	static const struct intr_info mps_cls_intr_info[] = {
4486 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4487 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4488 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4489 		{ 0 }
4490 	};
4491 
4492 	int fat;
4493 
4494 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4495 				    mps_rx_intr_info) +
4496 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4497 				    mps_tx_intr_info) +
4498 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4499 				    mps_trc_intr_info) +
4500 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4501 				    mps_stat_sram_intr_info) +
4502 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4503 				    mps_stat_tx_intr_info) +
4504 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4505 				    mps_stat_rx_intr_info) +
4506 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4507 				    mps_cls_intr_info);
4508 
4509 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4510 	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
4511 	if (fat)
4512 		t4_fatal_err(adapter);
4513 }
4514 
4515 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4516 		      F_ECC_UE_INT_CAUSE)
4517 
4518 /*
4519  * EDC/MC interrupt handler.
4520  */
4521 static void mem_intr_handler(struct adapter *adapter, int idx)
4522 {
4523 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4524 
4525 	unsigned int addr, cnt_addr, v;
4526 
4527 	if (idx <= MEM_EDC1) {
4528 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4529 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4530 	} else if (idx == MEM_MC) {
4531 		if (is_t4(adapter)) {
4532 			addr = A_MC_INT_CAUSE;
4533 			cnt_addr = A_MC_ECC_STATUS;
4534 		} else {
4535 			addr = A_MC_P_INT_CAUSE;
4536 			cnt_addr = A_MC_P_ECC_STATUS;
4537 		}
4538 	} else {
4539 		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4540 		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4541 	}
4542 
4543 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4544 	if (v & F_PERR_INT_CAUSE)
4545 		CH_ALERT(adapter, "%s FIFO parity error\n",
4546 			  name[idx]);
4547 	if (v & F_ECC_CE_INT_CAUSE) {
4548 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4549 
4550 		if (idx <= MEM_EDC1)
4551 			t4_edc_err_read(adapter, idx);
4552 
4553 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4554 		CH_WARN_RATELIMIT(adapter,
4555 				  "%u %s correctable ECC data error%s\n",
4556 				  cnt, name[idx], cnt > 1 ? "s" : "");
4557 	}
4558 	if (v & F_ECC_UE_INT_CAUSE)
4559 		CH_ALERT(adapter,
4560 			 "%s uncorrectable ECC data error\n", name[idx]);
4561 
4562 	t4_write_reg(adapter, addr, v);
4563 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4564 		t4_fatal_err(adapter);
4565 }
4566 
4567 /*
4568  * MA interrupt handler.
4569  */
4570 static void ma_intr_handler(struct adapter *adapter)
4571 {
4572 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4573 
4574 	if (status & F_MEM_PERR_INT_CAUSE) {
4575 		CH_ALERT(adapter,
4576 			  "MA parity error, parity status %#x\n",
4577 			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4578 		if (is_t5(adapter))
4579 			CH_ALERT(adapter,
4580 				  "MA parity error, parity status %#x\n",
4581 				  t4_read_reg(adapter,
4582 					      A_MA_PARITY_ERROR_STATUS2));
4583 	}
4584 	if (status & F_MEM_WRAP_INT_CAUSE) {
4585 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4586 		CH_ALERT(adapter, "MA address wrap-around error by "
4587 			  "client %u to address %#x\n",
4588 			  G_MEM_WRAP_CLIENT_NUM(v),
4589 			  G_MEM_WRAP_ADDRESS(v) << 4);
4590 	}
4591 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4592 	t4_fatal_err(adapter);
4593 }
4594 
4595 /*
4596  * SMB interrupt handler.
4597  */
4598 static void smb_intr_handler(struct adapter *adap)
4599 {
4600 	static const struct intr_info smb_intr_info[] = {
4601 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4602 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4603 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4604 		{ 0 }
4605 	};
4606 
4607 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4608 		t4_fatal_err(adap);
4609 }
4610 
4611 /*
4612  * NC-SI interrupt handler.
4613  */
4614 static void ncsi_intr_handler(struct adapter *adap)
4615 {
4616 	static const struct intr_info ncsi_intr_info[] = {
4617 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4618 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4619 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4620 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4621 		{ 0 }
4622 	};
4623 
4624 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4625 		t4_fatal_err(adap);
4626 }
4627 
4628 /*
4629  * XGMAC interrupt handler.
4630  */
4631 static void xgmac_intr_handler(struct adapter *adap, int port)
4632 {
4633 	u32 v, int_cause_reg;
4634 
4635 	if (is_t4(adap))
4636 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4637 	else
4638 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4639 
4640 	v = t4_read_reg(adap, int_cause_reg);
4641 
4642 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4643 	if (!v)
4644 		return;
4645 
4646 	if (v & F_TXFIFO_PRTY_ERR)
4647 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4648 			  port);
4649 	if (v & F_RXFIFO_PRTY_ERR)
4650 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4651 			  port);
4652 	t4_write_reg(adap, int_cause_reg, v);
4653 	t4_fatal_err(adap);
4654 }
4655 
4656 /*
4657  * PL interrupt handler.
4658  */
4659 static void pl_intr_handler(struct adapter *adap)
4660 {
4661 	static const struct intr_info pl_intr_info[] = {
4662 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
4663 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4664 		{ 0 }
4665 	};
4666 
4667 	static const struct intr_info t5_pl_intr_info[] = {
4668 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
4669 		{ 0 }
4670 	};
4671 
4672 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4673 				  is_t4(adap) ?
4674 				  pl_intr_info : t5_pl_intr_info))
4675 		t4_fatal_err(adap);
4676 }
4677 
4678 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4679 
4680 /**
4681  *	t4_slow_intr_handler - control path interrupt handler
4682  *	@adapter: the adapter
4683  *
4684  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
4685  *	The designation 'slow' is because it involves register reads, while
4686  *	data interrupts typically don't involve any MMIOs.
4687  */
4688 int t4_slow_intr_handler(struct adapter *adapter)
4689 {
4690 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4691 
4692 	if (!(cause & GLBL_INTR_MASK))
4693 		return 0;
4694 	if (cause & F_CIM)
4695 		cim_intr_handler(adapter);
4696 	if (cause & F_MPS)
4697 		mps_intr_handler(adapter);
4698 	if (cause & F_NCSI)
4699 		ncsi_intr_handler(adapter);
4700 	if (cause & F_PL)
4701 		pl_intr_handler(adapter);
4702 	if (cause & F_SMB)
4703 		smb_intr_handler(adapter);
4704 	if (cause & F_MAC0)
4705 		xgmac_intr_handler(adapter, 0);
4706 	if (cause & F_MAC1)
4707 		xgmac_intr_handler(adapter, 1);
4708 	if (cause & F_MAC2)
4709 		xgmac_intr_handler(adapter, 2);
4710 	if (cause & F_MAC3)
4711 		xgmac_intr_handler(adapter, 3);
4712 	if (cause & F_PCIE)
4713 		pcie_intr_handler(adapter);
4714 	if (cause & F_MC0)
4715 		mem_intr_handler(adapter, MEM_MC);
4716 	if (is_t5(adapter) && (cause & F_MC1))
4717 		mem_intr_handler(adapter, MEM_MC1);
4718 	if (cause & F_EDC0)
4719 		mem_intr_handler(adapter, MEM_EDC0);
4720 	if (cause & F_EDC1)
4721 		mem_intr_handler(adapter, MEM_EDC1);
4722 	if (cause & F_LE)
4723 		le_intr_handler(adapter);
4724 	if (cause & F_TP)
4725 		tp_intr_handler(adapter);
4726 	if (cause & F_MA)
4727 		ma_intr_handler(adapter);
4728 	if (cause & F_PM_TX)
4729 		pmtx_intr_handler(adapter);
4730 	if (cause & F_PM_RX)
4731 		pmrx_intr_handler(adapter);
4732 	if (cause & F_ULP_RX)
4733 		ulprx_intr_handler(adapter);
4734 	if (cause & F_CPL_SWITCH)
4735 		cplsw_intr_handler(adapter);
4736 	if (cause & F_SGE)
4737 		sge_intr_handler(adapter);
4738 	if (cause & F_ULP_TX)
4739 		ulptx_intr_handler(adapter);
4740 
4741 	/* Clear the interrupts just processed for which we are the master. */
4742 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4743 	(void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4744 	return 1;
4745 }
4746 
4747 /**
4748  *	t4_intr_enable - enable interrupts
4749  *	@adapter: the adapter whose interrupts should be enabled
4750  *
4751  *	Enable PF-specific interrupts for the calling function and the top-level
4752  *	interrupt concentrator for global interrupts.  Interrupts are already
4753  *	enabled at each module,	here we just enable the roots of the interrupt
4754  *	hierarchies.
4755  *
4756  *	Note: this function should be called only when the driver manages
4757  *	non PF-specific interrupts from the various HW modules.  Only one PCI
4758  *	function at a time should be doing this.
4759  */
4760 void t4_intr_enable(struct adapter *adapter)
4761 {
4762 	u32 val = 0;
4763 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4764 	u32 pf = (chip_id(adapter) <= CHELSIO_T5
4765 		  ? G_SOURCEPF(whoami)
4766 		  : G_T6_SOURCEPF(whoami));
4767 
4768 	if (chip_id(adapter) <= CHELSIO_T5)
4769 		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4770 	else
4771 		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4772 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4773 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4774 		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4775 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4776 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4777 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4778 		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4779 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4780 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4781 }
4782 
4783 /**
4784  *	t4_intr_disable - disable interrupts
4785  *	@adapter: the adapter whose interrupts should be disabled
4786  *
4787  *	Disable interrupts.  We only disable the top-level interrupt
4788  *	concentrators.  The caller must be a PCI function managing global
4789  *	interrupts.
4790  */
4791 void t4_intr_disable(struct adapter *adapter)
4792 {
4793 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4794 	u32 pf = (chip_id(adapter) <= CHELSIO_T5
4795 		  ? G_SOURCEPF(whoami)
4796 		  : G_T6_SOURCEPF(whoami));
4797 
4798 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4799 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4800 }
4801 
4802 /**
4803  *	t4_intr_clear - clear all interrupts
4804  *	@adapter: the adapter whose interrupts should be cleared
4805  *
4806  *	Clears all interrupts.  The caller must be a PCI function managing
4807  *	global interrupts.
4808  */
4809 void t4_intr_clear(struct adapter *adapter)
4810 {
4811 	static const unsigned int cause_reg[] = {
4812 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4813 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4814 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4815 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4816 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4817 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4818 		A_TP_INT_CAUSE,
4819 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4820 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4821 		A_MPS_RX_PERR_INT_CAUSE,
4822 		A_CPL_INTR_CAUSE,
4823 		MYPF_REG(A_PL_PF_INT_CAUSE),
4824 		A_PL_PL_INT_CAUSE,
4825 		A_LE_DB_INT_CAUSE,
4826 	};
4827 
4828 	unsigned int i;
4829 
4830 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4831 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4832 
4833 	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4834 				A_MC_P_INT_CAUSE, 0xffffffff);
4835 
4836 	if (is_t4(adapter)) {
4837 		t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4838 				0xffffffff);
4839 		t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4840 				0xffffffff);
4841 	} else
4842 		t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4843 
4844 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4845 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
4846 }
4847 
4848 /**
4849  *	hash_mac_addr - return the hash value of a MAC address
4850  *	@addr: the 48-bit Ethernet MAC address
4851  *
4852  *	Hashes a MAC address according to the hash function used by HW inexact
4853  *	(hash) address matching.
4854  */
4855 static int hash_mac_addr(const u8 *addr)
4856 {
4857 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4858 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4859 	a ^= b;
4860 	a ^= (a >> 12);
4861 	a ^= (a >> 6);
4862 	return a & 0x3f;
4863 }
4864 
4865 /**
4866  *	t4_config_rss_range - configure a portion of the RSS mapping table
4867  *	@adapter: the adapter
4868  *	@mbox: mbox to use for the FW command
4869  *	@viid: virtual interface whose RSS subtable is to be written
4870  *	@start: start entry in the table to write
4871  *	@n: how many table entries to write
4872  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
4873  *	@nrspq: number of values in @rspq
4874  *
4875  *	Programs the selected part of the VI's RSS mapping table with the
4876  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
4877  *	until the full table range is populated.
4878  *
4879  *	The caller must ensure the values in @rspq are in the range allowed for
4880  *	@viid.
4881  */
4882 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4883 			int start, int n, const u16 *rspq, unsigned int nrspq)
4884 {
4885 	int ret;
4886 	const u16 *rsp = rspq;
4887 	const u16 *rsp_end = rspq + nrspq;
4888 	struct fw_rss_ind_tbl_cmd cmd;
4889 
4890 	memset(&cmd, 0, sizeof(cmd));
4891 	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4892 				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4893 				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
4894 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4895 
4896 	/*
4897 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4898 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
4899 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4900 	 * reserved.
4901 	 */
4902 	while (n > 0) {
4903 		int nq = min(n, 32);
4904 		int nq_packed = 0;
4905 		__be32 *qp = &cmd.iq0_to_iq2;
4906 
4907 		/*
4908 		 * Set up the firmware RSS command header to send the next
4909 		 * "nq" Ingress Queue IDs to the firmware.
4910 		 */
4911 		cmd.niqid = cpu_to_be16(nq);
4912 		cmd.startidx = cpu_to_be16(start);
4913 
4914 		/*
4915 		 * "nq" more done for the start of the next loop.
4916 		 */
4917 		start += nq;
4918 		n -= nq;
4919 
4920 		/*
4921 		 * While there are still Ingress Queue IDs to stuff into the
4922 		 * current firmware RSS command, retrieve them from the
4923 		 * Ingress Queue ID array and insert them into the command.
4924 		 */
4925 		while (nq > 0) {
4926 			/*
4927 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
4928 			 * around the Ingress Queue ID array if necessary) and
4929 			 * insert them into the firmware RSS command at the
4930 			 * current 3-tuple position within the commad.
4931 			 */
4932 			u16 qbuf[3];
4933 			u16 *qbp = qbuf;
4934 			int nqbuf = min(3, nq);
4935 
4936 			nq -= nqbuf;
4937 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
4938 			while (nqbuf && nq_packed < 32) {
4939 				nqbuf--;
4940 				nq_packed++;
4941 				*qbp++ = *rsp++;
4942 				if (rsp >= rsp_end)
4943 					rsp = rspq;
4944 			}
4945 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4946 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4947 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4948 		}
4949 
4950 		/*
4951 		 * Send this portion of the RRS table update to the firmware;
4952 		 * bail out on any errors.
4953 		 */
4954 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4955 		if (ret)
4956 			return ret;
4957 	}
4958 	return 0;
4959 }
4960 
4961 /**
4962  *	t4_config_glbl_rss - configure the global RSS mode
4963  *	@adapter: the adapter
4964  *	@mbox: mbox to use for the FW command
4965  *	@mode: global RSS mode
4966  *	@flags: mode-specific flags
4967  *
4968  *	Sets the global RSS mode.
4969  */
4970 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4971 		       unsigned int flags)
4972 {
4973 	struct fw_rss_glb_config_cmd c;
4974 
4975 	memset(&c, 0, sizeof(c));
4976 	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4977 				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4978 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4979 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4980 		c.u.manual.mode_pkd =
4981 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4982 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4983 		c.u.basicvirtual.mode_keymode =
4984 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4985 		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4986 	} else
4987 		return -EINVAL;
4988 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4989 }
4990 
4991 /**
4992  *	t4_config_vi_rss - configure per VI RSS settings
4993  *	@adapter: the adapter
4994  *	@mbox: mbox to use for the FW command
4995  *	@viid: the VI id
4996  *	@flags: RSS flags
4997  *	@defq: id of the default RSS queue for the VI.
4998  *	@skeyidx: RSS secret key table index for non-global mode
4999  *	@skey: RSS vf_scramble key for VI.
5000  *
5001  *	Configures VI-specific RSS properties.
5002  */
5003 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5004 		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
5005 		     unsigned int skey)
5006 {
5007 	struct fw_rss_vi_config_cmd c;
5008 
5009 	memset(&c, 0, sizeof(c));
5010 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5011 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5012 				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5013 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5014 	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5015 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5016 	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5017 					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5018 	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5019 
5020 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5021 }
5022 
5023 /* Read an RSS table row */
5024 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5025 {
5026 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5027 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5028 				   5, 0, val);
5029 }
5030 
5031 /**
5032  *	t4_read_rss - read the contents of the RSS mapping table
5033  *	@adapter: the adapter
5034  *	@map: holds the contents of the RSS mapping table
5035  *
5036  *	Reads the contents of the RSS hash->queue mapping table.
5037  */
5038 int t4_read_rss(struct adapter *adapter, u16 *map)
5039 {
5040 	u32 val;
5041 	int i, ret;
5042 
5043 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5044 		ret = rd_rss_row(adapter, i, &val);
5045 		if (ret)
5046 			return ret;
5047 		*map++ = G_LKPTBLQUEUE0(val);
5048 		*map++ = G_LKPTBLQUEUE1(val);
5049 	}
5050 	return 0;
5051 }
5052 
5053 /**
5054  * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5055  * @adap: the adapter
5056  * @cmd: TP fw ldst address space type
5057  * @vals: where the indirect register values are stored/written
5058  * @nregs: how many indirect registers to read/write
5059  * @start_idx: index of first indirect register to read/write
5060  * @rw: Read (1) or Write (0)
5061  * @sleep_ok: if true we may sleep while awaiting command completion
5062  *
5063  * Access TP indirect registers through LDST
5064  **/
5065 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5066 			    unsigned int nregs, unsigned int start_index,
5067 			    unsigned int rw, bool sleep_ok)
5068 {
5069 	int ret = 0;
5070 	unsigned int i;
5071 	struct fw_ldst_cmd c;
5072 
5073 	for (i = 0; i < nregs; i++) {
5074 		memset(&c, 0, sizeof(c));
5075 		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5076 						F_FW_CMD_REQUEST |
5077 						(rw ? F_FW_CMD_READ :
5078 						      F_FW_CMD_WRITE) |
5079 						V_FW_LDST_CMD_ADDRSPACE(cmd));
5080 		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5081 
5082 		c.u.addrval.addr = cpu_to_be32(start_index + i);
5083 		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5084 		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5085 				      sleep_ok);
5086 		if (ret)
5087 			return ret;
5088 
5089 		if (rw)
5090 			vals[i] = be32_to_cpu(c.u.addrval.val);
5091 	}
5092 	return 0;
5093 }
5094 
5095 /**
5096  * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5097  * @adap: the adapter
5098  * @reg_addr: Address Register
5099  * @reg_data: Data register
5100  * @buff: where the indirect register values are stored/written
5101  * @nregs: how many indirect registers to read/write
5102  * @start_index: index of first indirect register to read/write
5103  * @rw: READ(1) or WRITE(0)
5104  * @sleep_ok: if true we may sleep while awaiting command completion
5105  *
5106  * Read/Write TP indirect registers through LDST if possible.
5107  * Else, use backdoor access
5108  **/
5109 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5110 			      u32 *buff, u32 nregs, u32 start_index, int rw,
5111 			      bool sleep_ok)
5112 {
5113 	int rc = -EINVAL;
5114 	int cmd;
5115 
5116 	switch (reg_addr) {
5117 	case A_TP_PIO_ADDR:
5118 		cmd = FW_LDST_ADDRSPC_TP_PIO;
5119 		break;
5120 	case A_TP_TM_PIO_ADDR:
5121 		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5122 		break;
5123 	case A_TP_MIB_INDEX:
5124 		cmd = FW_LDST_ADDRSPC_TP_MIB;
5125 		break;
5126 	default:
5127 		goto indirect_access;
5128 	}
5129 
5130 	if (t4_use_ldst(adap))
5131 		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5132 				      sleep_ok);
5133 
5134 indirect_access:
5135 
5136 	if (rc) {
5137 		if (rw)
5138 			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5139 					 start_index);
5140 		else
5141 			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5142 					  start_index);
5143 	}
5144 }
5145 
5146 /**
5147  * t4_tp_pio_read - Read TP PIO registers
5148  * @adap: the adapter
5149  * @buff: where the indirect register values are written
5150  * @nregs: how many indirect registers to read
5151  * @start_index: index of first indirect register to read
5152  * @sleep_ok: if true we may sleep while awaiting command completion
5153  *
5154  * Read TP PIO Registers
5155  **/
5156 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5157 		    u32 start_index, bool sleep_ok)
5158 {
5159 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5160 			  start_index, 1, sleep_ok);
5161 }
5162 
5163 /**
5164  * t4_tp_pio_write - Write TP PIO registers
5165  * @adap: the adapter
5166  * @buff: where the indirect register values are stored
5167  * @nregs: how many indirect registers to write
5168  * @start_index: index of first indirect register to write
5169  * @sleep_ok: if true we may sleep while awaiting command completion
5170  *
5171  * Write TP PIO Registers
5172  **/
5173 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
5174 		     u32 start_index, bool sleep_ok)
5175 {
5176 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5177 	    __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
5178 }
5179 
5180 /**
5181  * t4_tp_tm_pio_read - Read TP TM PIO registers
5182  * @adap: the adapter
5183  * @buff: where the indirect register values are written
5184  * @nregs: how many indirect registers to read
5185  * @start_index: index of first indirect register to read
5186  * @sleep_ok: if true we may sleep while awaiting command completion
5187  *
5188  * Read TP TM PIO Registers
5189  **/
5190 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5191 		       u32 start_index, bool sleep_ok)
5192 {
5193 	t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5194 			  nregs, start_index, 1, sleep_ok);
5195 }
5196 
5197 /**
5198  * t4_tp_mib_read - Read TP MIB registers
5199  * @adap: the adapter
5200  * @buff: where the indirect register values are written
5201  * @nregs: how many indirect registers to read
5202  * @start_index: index of first indirect register to read
5203  * @sleep_ok: if true we may sleep while awaiting command completion
5204  *
5205  * Read TP MIB Registers
5206  **/
5207 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5208 		    bool sleep_ok)
5209 {
5210 	t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5211 			  start_index, 1, sleep_ok);
5212 }
5213 
5214 /**
5215  *	t4_read_rss_key - read the global RSS key
5216  *	@adap: the adapter
5217  *	@key: 10-entry array holding the 320-bit RSS key
5218  * 	@sleep_ok: if true we may sleep while awaiting command completion
5219  *
5220  *	Reads the global 320-bit RSS key.
5221  */
5222 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5223 {
5224 	t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5225 }
5226 
5227 /**
5228  *	t4_write_rss_key - program one of the RSS keys
5229  *	@adap: the adapter
5230  *	@key: 10-entry array holding the 320-bit RSS key
5231  *	@idx: which RSS key to write
5232  * 	@sleep_ok: if true we may sleep while awaiting command completion
5233  *
5234  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5235  *	0..15 the corresponding entry in the RSS key table is written,
5236  *	otherwise the global RSS key is written.
5237  */
5238 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5239 		      bool sleep_ok)
5240 {
5241 	u8 rss_key_addr_cnt = 16;
5242 	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5243 
5244 	/*
5245 	 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5246 	 * allows access to key addresses 16-63 by using KeyWrAddrX
5247 	 * as index[5:4](upper 2) into key table
5248 	 */
5249 	if ((chip_id(adap) > CHELSIO_T5) &&
5250 	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5251 		rss_key_addr_cnt = 32;
5252 
5253 	t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5254 
5255 	if (idx >= 0 && idx < rss_key_addr_cnt) {
5256 		if (rss_key_addr_cnt > 16)
5257 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5258 				     vrt | V_KEYWRADDRX(idx >> 4) |
5259 				     V_T6_VFWRADDR(idx) | F_KEYWREN);
5260 		else
5261 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5262 				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5263 	}
5264 }
5265 
5266 /**
5267  *	t4_read_rss_pf_config - read PF RSS Configuration Table
5268  *	@adapter: the adapter
5269  *	@index: the entry in the PF RSS table to read
5270  *	@valp: where to store the returned value
5271  * 	@sleep_ok: if true we may sleep while awaiting command completion
5272  *
5273  *	Reads the PF RSS Configuration Table at the specified index and returns
5274  *	the value found there.
5275  */
5276 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5277 			   u32 *valp, bool sleep_ok)
5278 {
5279 	t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5280 }
5281 
5282 /**
5283  *	t4_write_rss_pf_config - write PF RSS Configuration Table
5284  *	@adapter: the adapter
5285  *	@index: the entry in the VF RSS table to read
5286  *	@val: the value to store
5287  * 	@sleep_ok: if true we may sleep while awaiting command completion
5288  *
5289  *	Writes the PF RSS Configuration Table at the specified index with the
5290  *	specified value.
5291  */
5292 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5293 			    u32 val, bool sleep_ok)
5294 {
5295 	t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5296 			sleep_ok);
5297 }
5298 
5299 /**
5300  *	t4_read_rss_vf_config - read VF RSS Configuration Table
5301  *	@adapter: the adapter
5302  *	@index: the entry in the VF RSS table to read
5303  *	@vfl: where to store the returned VFL
5304  *	@vfh: where to store the returned VFH
5305  * 	@sleep_ok: if true we may sleep while awaiting command completion
5306  *
5307  *	Reads the VF RSS Configuration Table at the specified index and returns
5308  *	the (VFL, VFH) values found there.
5309  */
5310 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5311 			   u32 *vfl, u32 *vfh, bool sleep_ok)
5312 {
5313 	u32 vrt, mask, data;
5314 
5315 	if (chip_id(adapter) <= CHELSIO_T5) {
5316 		mask = V_VFWRADDR(M_VFWRADDR);
5317 		data = V_VFWRADDR(index);
5318 	} else {
5319 		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5320 		 data = V_T6_VFWRADDR(index);
5321 	}
5322 	/*
5323 	 * Request that the index'th VF Table values be read into VFL/VFH.
5324 	 */
5325 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5326 	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5327 	vrt |= data | F_VFRDEN;
5328 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5329 
5330 	/*
5331 	 * Grab the VFL/VFH values ...
5332 	 */
5333 	t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5334 	t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5335 }
5336 
5337 /**
5338  *	t4_write_rss_vf_config - write VF RSS Configuration Table
5339  *
5340  *	@adapter: the adapter
5341  *	@index: the entry in the VF RSS table to write
5342  *	@vfl: the VFL to store
5343  *	@vfh: the VFH to store
5344  *
5345  *	Writes the VF RSS Configuration Table at the specified index with the
5346  *	specified (VFL, VFH) values.
5347  */
5348 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5349 			    u32 vfl, u32 vfh, bool sleep_ok)
5350 {
5351 	u32 vrt, mask, data;
5352 
5353 	if (chip_id(adapter) <= CHELSIO_T5) {
5354 		mask = V_VFWRADDR(M_VFWRADDR);
5355 		data = V_VFWRADDR(index);
5356 	} else {
5357 		mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5358 		data = V_T6_VFWRADDR(index);
5359 	}
5360 
5361 	/*
5362 	 * Load up VFL/VFH with the values to be written ...
5363 	 */
5364 	t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5365 	t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5366 
5367 	/*
5368 	 * Write the VFL/VFH into the VF Table at index'th location.
5369 	 */
5370 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5371 	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5372 	vrt |= data | F_VFRDEN;
5373 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5374 }
5375 
5376 /**
5377  *	t4_read_rss_pf_map - read PF RSS Map
5378  *	@adapter: the adapter
5379  * 	@sleep_ok: if true we may sleep while awaiting command completion
5380  *
5381  *	Reads the PF RSS Map register and returns its value.
5382  */
5383 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5384 {
5385 	u32 pfmap;
5386 
5387 	t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5388 
5389 	return pfmap;
5390 }
5391 
5392 /**
5393  *	t4_write_rss_pf_map - write PF RSS Map
5394  *	@adapter: the adapter
5395  *	@pfmap: PF RSS Map value
5396  *
5397  *	Writes the specified value to the PF RSS Map register.
5398  */
5399 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
5400 {
5401 	t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5402 }
5403 
5404 /**
5405  *	t4_read_rss_pf_mask - read PF RSS Mask
5406  *	@adapter: the adapter
5407  * 	@sleep_ok: if true we may sleep while awaiting command completion
5408  *
5409  *	Reads the PF RSS Mask register and returns its value.
5410  */
5411 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5412 {
5413 	u32 pfmask;
5414 
5415 	t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5416 
5417 	return pfmask;
5418 }
5419 
5420 /**
5421  *	t4_write_rss_pf_mask - write PF RSS Mask
5422  *	@adapter: the adapter
5423  *	@pfmask: PF RSS Mask value
5424  *
5425  *	Writes the specified value to the PF RSS Mask register.
5426  */
5427 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
5428 {
5429 	t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5430 }
5431 
5432 /**
5433  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
5434  *	@adap: the adapter
5435  *	@v4: holds the TCP/IP counter values
5436  *	@v6: holds the TCP/IPv6 counter values
5437  * 	@sleep_ok: if true we may sleep while awaiting command completion
5438  *
5439  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5440  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5441  */
5442 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5443 			 struct tp_tcp_stats *v6, bool sleep_ok)
5444 {
5445 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5446 
5447 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5448 #define STAT(x)     val[STAT_IDX(x)]
5449 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5450 
5451 	if (v4) {
5452 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5453 			       A_TP_MIB_TCP_OUT_RST, sleep_ok);
5454 		v4->tcp_out_rsts = STAT(OUT_RST);
5455 		v4->tcp_in_segs  = STAT64(IN_SEG);
5456 		v4->tcp_out_segs = STAT64(OUT_SEG);
5457 		v4->tcp_retrans_segs = STAT64(RXT_SEG);
5458 	}
5459 	if (v6) {
5460 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5461 			       A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5462 		v6->tcp_out_rsts = STAT(OUT_RST);
5463 		v6->tcp_in_segs  = STAT64(IN_SEG);
5464 		v6->tcp_out_segs = STAT64(OUT_SEG);
5465 		v6->tcp_retrans_segs = STAT64(RXT_SEG);
5466 	}
5467 #undef STAT64
5468 #undef STAT
5469 #undef STAT_IDX
5470 }
5471 
5472 /**
5473  *	t4_tp_get_err_stats - read TP's error MIB counters
5474  *	@adap: the adapter
5475  *	@st: holds the counter values
5476  * 	@sleep_ok: if true we may sleep while awaiting command completion
5477  *
5478  *	Returns the values of TP's error counters.
5479  */
5480 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5481 			 bool sleep_ok)
5482 {
5483 	int nchan = adap->chip_params->nchan;
5484 
5485 	t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5486 		       sleep_ok);
5487 
5488 	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5489 		       sleep_ok);
5490 
5491 	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5492 		       sleep_ok);
5493 
5494 	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5495 		       A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5496 
5497 	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5498 		       A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5499 
5500 	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5501 		       sleep_ok);
5502 
5503 	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5504 		       A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5505 
5506 	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5507 		       A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
5508 
5509 	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
5510 		       sleep_ok);
5511 }
5512 
5513 /**
5514  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
5515  *	@adap: the adapter
5516  *	@st: holds the counter values
5517  *
5518  *	Returns the values of TP's proxy counters.
5519  */
5520 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
5521     bool sleep_ok)
5522 {
5523 	int nchan = adap->chip_params->nchan;
5524 
5525 	t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
5526 }
5527 
5528 /**
5529  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
5530  *	@adap: the adapter
5531  *	@st: holds the counter values
5532  * 	@sleep_ok: if true we may sleep while awaiting command completion
5533  *
5534  *	Returns the values of TP's CPL counters.
5535  */
5536 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5537 			 bool sleep_ok)
5538 {
5539 	int nchan = adap->chip_params->nchan;
5540 
5541 	t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
5542 
5543 	t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
5544 }
5545 
5546 /**
5547  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5548  *	@adap: the adapter
5549  *	@st: holds the counter values
5550  *
5551  *	Returns the values of TP's RDMA counters.
5552  */
5553 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5554 			  bool sleep_ok)
5555 {
5556 	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
5557 		       sleep_ok);
5558 }
5559 
5560 /**
5561  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5562  *	@adap: the adapter
5563  *	@idx: the port index
5564  *	@st: holds the counter values
5565  * 	@sleep_ok: if true we may sleep while awaiting command completion
5566  *
5567  *	Returns the values of TP's FCoE counters for the selected port.
5568  */
5569 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5570 		       struct tp_fcoe_stats *st, bool sleep_ok)
5571 {
5572 	u32 val[2];
5573 
5574 	t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
5575 		       sleep_ok);
5576 
5577 	t4_tp_mib_read(adap, &st->frames_drop, 1,
5578 		       A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
5579 
5580 	t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
5581 		       sleep_ok);
5582 
5583 	st->octets_ddp = ((u64)val[0] << 32) | val[1];
5584 }
5585 
5586 /**
5587  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5588  *	@adap: the adapter
5589  *	@st: holds the counter values
5590  * 	@sleep_ok: if true we may sleep while awaiting command completion
5591  *
5592  *	Returns the values of TP's counters for non-TCP directly-placed packets.
5593  */
5594 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5595 		      bool sleep_ok)
5596 {
5597 	u32 val[4];
5598 
5599 	t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
5600 
5601 	st->frames = val[0];
5602 	st->drops = val[1];
5603 	st->octets = ((u64)val[2] << 32) | val[3];
5604 }
5605 
5606 /**
5607  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
5608  *	@adap: the adapter
5609  *	@mtus: where to store the MTU values
5610  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
5611  *
5612  *	Reads the HW path MTU table.
5613  */
5614 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5615 {
5616 	u32 v;
5617 	int i;
5618 
5619 	for (i = 0; i < NMTUS; ++i) {
5620 		t4_write_reg(adap, A_TP_MTU_TABLE,
5621 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
5622 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
5623 		mtus[i] = G_MTUVALUE(v);
5624 		if (mtu_log)
5625 			mtu_log[i] = G_MTUWIDTH(v);
5626 	}
5627 }
5628 
5629 /**
5630  *	t4_read_cong_tbl - reads the congestion control table
5631  *	@adap: the adapter
5632  *	@incr: where to store the alpha values
5633  *
5634  *	Reads the additive increments programmed into the HW congestion
5635  *	control table.
5636  */
5637 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5638 {
5639 	unsigned int mtu, w;
5640 
5641 	for (mtu = 0; mtu < NMTUS; ++mtu)
5642 		for (w = 0; w < NCCTRL_WIN; ++w) {
5643 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
5644 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
5645 			incr[mtu][w] = (u16)t4_read_reg(adap,
5646 						A_TP_CCTRL_TABLE) & 0x1fff;
5647 		}
5648 }
5649 
5650 /**
5651  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5652  *	@adap: the adapter
5653  *	@addr: the indirect TP register address
5654  *	@mask: specifies the field within the register to modify
5655  *	@val: new value for the field
5656  *
5657  *	Sets a field of an indirect TP register to the given value.
5658  */
5659 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5660 			    unsigned int mask, unsigned int val)
5661 {
5662 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5663 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5664 	t4_write_reg(adap, A_TP_PIO_DATA, val);
5665 }
5666 
5667 /**
5668  *	init_cong_ctrl - initialize congestion control parameters
5669  *	@a: the alpha values for congestion control
5670  *	@b: the beta values for congestion control
5671  *
5672  *	Initialize the congestion control parameters.
5673  */
5674 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5675 {
5676 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5677 	a[9] = 2;
5678 	a[10] = 3;
5679 	a[11] = 4;
5680 	a[12] = 5;
5681 	a[13] = 6;
5682 	a[14] = 7;
5683 	a[15] = 8;
5684 	a[16] = 9;
5685 	a[17] = 10;
5686 	a[18] = 14;
5687 	a[19] = 17;
5688 	a[20] = 21;
5689 	a[21] = 25;
5690 	a[22] = 30;
5691 	a[23] = 35;
5692 	a[24] = 45;
5693 	a[25] = 60;
5694 	a[26] = 80;
5695 	a[27] = 100;
5696 	a[28] = 200;
5697 	a[29] = 300;
5698 	a[30] = 400;
5699 	a[31] = 500;
5700 
5701 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5702 	b[9] = b[10] = 1;
5703 	b[11] = b[12] = 2;
5704 	b[13] = b[14] = b[15] = b[16] = 3;
5705 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5706 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5707 	b[28] = b[29] = 6;
5708 	b[30] = b[31] = 7;
5709 }
5710 
5711 /* The minimum additive increment value for the congestion control table */
5712 #define CC_MIN_INCR 2U
5713 
5714 /**
5715  *	t4_load_mtus - write the MTU and congestion control HW tables
5716  *	@adap: the adapter
5717  *	@mtus: the values for the MTU table
5718  *	@alpha: the values for the congestion control alpha parameter
5719  *	@beta: the values for the congestion control beta parameter
5720  *
5721  *	Write the HW MTU table with the supplied MTUs and the high-speed
5722  *	congestion control table with the supplied alpha, beta, and MTUs.
5723  *	We write the two tables together because the additive increments
5724  *	depend on the MTUs.
5725  */
5726 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5727 		  const unsigned short *alpha, const unsigned short *beta)
5728 {
5729 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
5730 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5731 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5732 		28672, 40960, 57344, 81920, 114688, 163840, 229376
5733 	};
5734 
5735 	unsigned int i, w;
5736 
5737 	for (i = 0; i < NMTUS; ++i) {
5738 		unsigned int mtu = mtus[i];
5739 		unsigned int log2 = fls(mtu);
5740 
5741 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
5742 			log2--;
5743 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5744 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5745 
5746 		for (w = 0; w < NCCTRL_WIN; ++w) {
5747 			unsigned int inc;
5748 
5749 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5750 				  CC_MIN_INCR);
5751 
5752 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5753 				     (w << 16) | (beta[w] << 13) | inc);
5754 		}
5755 	}
5756 }
5757 
5758 /**
5759  *	t4_set_pace_tbl - set the pace table
5760  *	@adap: the adapter
5761  *	@pace_vals: the pace values in microseconds
5762  *	@start: index of the first entry in the HW pace table to set
5763  *	@n: how many entries to set
5764  *
5765  *	Sets (a subset of the) HW pace table.
5766  */
5767 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5768 		     unsigned int start, unsigned int n)
5769 {
5770 	unsigned int vals[NTX_SCHED], i;
5771 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5772 
5773 	if (n > NTX_SCHED)
5774 	    return -ERANGE;
5775 
5776 	/* convert values from us to dack ticks, rounding to closest value */
5777 	for (i = 0; i < n; i++, pace_vals++) {
5778 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5779 		if (vals[i] > 0x7ff)
5780 			return -ERANGE;
5781 		if (*pace_vals && vals[i] == 0)
5782 			return -ERANGE;
5783 	}
5784 	for (i = 0; i < n; i++, start++)
5785 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5786 	return 0;
5787 }
5788 
5789 /**
5790  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5791  *	@adap: the adapter
5792  *	@kbps: target rate in Kbps
5793  *	@sched: the scheduler index
5794  *
5795  *	Configure a Tx HW scheduler for the target rate.
5796  */
5797 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5798 {
5799 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5800 	unsigned int clk = adap->params.vpd.cclk * 1000;
5801 	unsigned int selected_cpt = 0, selected_bpt = 0;
5802 
5803 	if (kbps > 0) {
5804 		kbps *= 125;     /* -> bytes */
5805 		for (cpt = 1; cpt <= 255; cpt++) {
5806 			tps = clk / cpt;
5807 			bpt = (kbps + tps / 2) / tps;
5808 			if (bpt > 0 && bpt <= 255) {
5809 				v = bpt * tps;
5810 				delta = v >= kbps ? v - kbps : kbps - v;
5811 				if (delta < mindelta) {
5812 					mindelta = delta;
5813 					selected_cpt = cpt;
5814 					selected_bpt = bpt;
5815 				}
5816 			} else if (selected_cpt)
5817 				break;
5818 		}
5819 		if (!selected_cpt)
5820 			return -EINVAL;
5821 	}
5822 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5823 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5824 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5825 	if (sched & 1)
5826 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5827 	else
5828 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5829 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5830 	return 0;
5831 }
5832 
5833 /**
5834  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5835  *	@adap: the adapter
5836  *	@sched: the scheduler index
5837  *	@ipg: the interpacket delay in tenths of nanoseconds
5838  *
5839  *	Set the interpacket delay for a HW packet rate scheduler.
5840  */
5841 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5842 {
5843 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5844 
5845 	/* convert ipg to nearest number of core clocks */
5846 	ipg *= core_ticks_per_usec(adap);
5847 	ipg = (ipg + 5000) / 10000;
5848 	if (ipg > M_TXTIMERSEPQ0)
5849 		return -EINVAL;
5850 
5851 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5852 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5853 	if (sched & 1)
5854 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5855 	else
5856 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5857 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5858 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
5859 	return 0;
5860 }
5861 
5862 /*
5863  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5864  * clocks.  The formula is
5865  *
5866  * bytes/s = bytes256 * 256 * ClkFreq / 4096
5867  *
5868  * which is equivalent to
5869  *
5870  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5871  */
5872 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5873 {
5874 	u64 v = (u64)bytes256 * adap->params.vpd.cclk;
5875 
5876 	return v * 62 + v / 2;
5877 }
5878 
5879 /**
5880  *	t4_get_chan_txrate - get the current per channel Tx rates
5881  *	@adap: the adapter
5882  *	@nic_rate: rates for NIC traffic
5883  *	@ofld_rate: rates for offloaded traffic
5884  *
5885  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
5886  *	for each channel.
5887  */
5888 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5889 {
5890 	u32 v;
5891 
5892 	v = t4_read_reg(adap, A_TP_TX_TRATE);
5893 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5894 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5895 	if (adap->chip_params->nchan > 2) {
5896 		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5897 		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5898 	}
5899 
5900 	v = t4_read_reg(adap, A_TP_TX_ORATE);
5901 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5902 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5903 	if (adap->chip_params->nchan > 2) {
5904 		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5905 		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5906 	}
5907 }
5908 
5909 /**
5910  *	t4_set_trace_filter - configure one of the tracing filters
5911  *	@adap: the adapter
5912  *	@tp: the desired trace filter parameters
5913  *	@idx: which filter to configure
5914  *	@enable: whether to enable or disable the filter
5915  *
5916  *	Configures one of the tracing filters available in HW.  If @tp is %NULL
5917  *	it indicates that the filter is already written in the register and it
5918  *	just needs to be enabled or disabled.
5919  */
5920 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5921     int idx, int enable)
5922 {
5923 	int i, ofst = idx * 4;
5924 	u32 data_reg, mask_reg, cfg;
5925 	u32 multitrc = F_TRCMULTIFILTER;
5926 	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5927 
5928 	if (idx < 0 || idx >= NTRACE)
5929 		return -EINVAL;
5930 
5931 	if (tp == NULL || !enable) {
5932 		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5933 		    enable ? en : 0);
5934 		return 0;
5935 	}
5936 
5937 	/*
5938 	 * TODO - After T4 data book is updated, specify the exact
5939 	 * section below.
5940 	 *
5941 	 * See T4 data book - MPS section for a complete description
5942 	 * of the below if..else handling of A_MPS_TRC_CFG register
5943 	 * value.
5944 	 */
5945 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5946 	if (cfg & F_TRCMULTIFILTER) {
5947 		/*
5948 		 * If multiple tracers are enabled, then maximum
5949 		 * capture size is 2.5KB (FIFO size of a single channel)
5950 		 * minus 2 flits for CPL_TRACE_PKT header.
5951 		 */
5952 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5953 			return -EINVAL;
5954 	} else {
5955 		/*
5956 		 * If multiple tracers are disabled, to avoid deadlocks
5957 		 * maximum packet capture size of 9600 bytes is recommended.
5958 		 * Also in this mode, only trace0 can be enabled and running.
5959 		 */
5960 		multitrc = 0;
5961 		if (tp->snap_len > 9600 || idx)
5962 			return -EINVAL;
5963 	}
5964 
5965 	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5966 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5967 	    tp->min_len > M_TFMINPKTSIZE)
5968 		return -EINVAL;
5969 
5970 	/* stop the tracer we'll be changing */
5971 	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5972 
5973 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5974 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5975 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5976 
5977 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5978 		t4_write_reg(adap, data_reg, tp->data[i]);
5979 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5980 	}
5981 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5982 		     V_TFCAPTUREMAX(tp->snap_len) |
5983 		     V_TFMINPKTSIZE(tp->min_len));
5984 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5985 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5986 		     (is_t4(adap) ?
5987 		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5988 		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5989 
5990 	return 0;
5991 }
5992 
5993 /**
5994  *	t4_get_trace_filter - query one of the tracing filters
5995  *	@adap: the adapter
5996  *	@tp: the current trace filter parameters
5997  *	@idx: which trace filter to query
5998  *	@enabled: non-zero if the filter is enabled
5999  *
6000  *	Returns the current settings of one of the HW tracing filters.
6001  */
6002 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6003 			 int *enabled)
6004 {
6005 	u32 ctla, ctlb;
6006 	int i, ofst = idx * 4;
6007 	u32 data_reg, mask_reg;
6008 
6009 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6010 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6011 
6012 	if (is_t4(adap)) {
6013 		*enabled = !!(ctla & F_TFEN);
6014 		tp->port =  G_TFPORT(ctla);
6015 		tp->invert = !!(ctla & F_TFINVERTMATCH);
6016 	} else {
6017 		*enabled = !!(ctla & F_T5_TFEN);
6018 		tp->port = G_T5_TFPORT(ctla);
6019 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6020 	}
6021 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
6022 	tp->min_len = G_TFMINPKTSIZE(ctlb);
6023 	tp->skip_ofst = G_TFOFFSET(ctla);
6024 	tp->skip_len = G_TFLENGTH(ctla);
6025 
6026 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6027 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6028 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6029 
6030 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6031 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6032 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6033 	}
6034 }
6035 
6036 /**
6037  *	t4_pmtx_get_stats - returns the HW stats from PMTX
6038  *	@adap: the adapter
6039  *	@cnt: where to store the count statistics
6040  *	@cycles: where to store the cycle statistics
6041  *
6042  *	Returns performance statistics from PMTX.
6043  */
6044 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6045 {
6046 	int i;
6047 	u32 data[2];
6048 
6049 	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6050 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6051 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6052 		if (is_t4(adap))
6053 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6054 		else {
6055 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6056 					 A_PM_TX_DBG_DATA, data, 2,
6057 					 A_PM_TX_DBG_STAT_MSB);
6058 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6059 		}
6060 	}
6061 }
6062 
6063 /**
6064  *	t4_pmrx_get_stats - returns the HW stats from PMRX
6065  *	@adap: the adapter
6066  *	@cnt: where to store the count statistics
6067  *	@cycles: where to store the cycle statistics
6068  *
6069  *	Returns performance statistics from PMRX.
6070  */
6071 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6072 {
6073 	int i;
6074 	u32 data[2];
6075 
6076 	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6077 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6078 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6079 		if (is_t4(adap)) {
6080 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6081 		} else {
6082 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6083 					 A_PM_RX_DBG_DATA, data, 2,
6084 					 A_PM_RX_DBG_STAT_MSB);
6085 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6086 		}
6087 	}
6088 }
6089 
6090 /**
6091  *	t4_get_mps_bg_map - return the buffer groups associated with a port
6092  *	@adap: the adapter
6093  *	@idx: the port index
6094  *
6095  *	Returns a bitmap indicating which MPS buffer groups are associated
6096  *	with the given port.  Bit i is set if buffer group i is used by the
6097  *	port.
6098  */
6099 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
6100 {
6101 	u32 n;
6102 
6103 	if (adap->params.mps_bg_map)
6104 		return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
6105 
6106 	n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6107 	if (n == 0)
6108 		return idx == 0 ? 0xf : 0;
6109 	if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6110 		return idx < 2 ? (3 << (2 * idx)) : 0;
6111 	return 1 << idx;
6112 }
6113 
6114 /*
6115  * TP RX e-channels associated with the port.
6116  */
6117 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
6118 {
6119 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6120 
6121 	if (n == 0)
6122 		return idx == 0 ? 0xf : 0;
6123 	if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6124 		return idx < 2 ? (3 << (2 * idx)) : 0;
6125 	return 1 << idx;
6126 }
6127 
6128 /**
6129  *      t4_get_port_type_description - return Port Type string description
6130  *      @port_type: firmware Port Type enumeration
6131  */
6132 const char *t4_get_port_type_description(enum fw_port_type port_type)
6133 {
6134 	static const char *const port_type_description[] = {
6135 		"Fiber_XFI",
6136 		"Fiber_XAUI",
6137 		"BT_SGMII",
6138 		"BT_XFI",
6139 		"BT_XAUI",
6140 		"KX4",
6141 		"CX4",
6142 		"KX",
6143 		"KR",
6144 		"SFP",
6145 		"BP_AP",
6146 		"BP4_AP",
6147 		"QSFP_10G",
6148 		"QSA",
6149 		"QSFP",
6150 		"BP40_BA",
6151 		"KR4_100G",
6152 		"CR4_QSFP",
6153 		"CR_QSFP",
6154 		"CR2_QSFP",
6155 		"SFP28",
6156 		"KR_SFP28",
6157 	};
6158 
6159 	if (port_type < ARRAY_SIZE(port_type_description))
6160 		return port_type_description[port_type];
6161 	return "UNKNOWN";
6162 }
6163 
6164 /**
6165  *      t4_get_port_stats_offset - collect port stats relative to a previous
6166  *				   snapshot
6167  *      @adap: The adapter
6168  *      @idx: The port
6169  *      @stats: Current stats to fill
6170  *      @offset: Previous stats snapshot
6171  */
6172 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6173 		struct port_stats *stats,
6174 		struct port_stats *offset)
6175 {
6176 	u64 *s, *o;
6177 	int i;
6178 
6179 	t4_get_port_stats(adap, idx, stats);
6180 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6181 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
6182 			i++, s++, o++)
6183 		*s -= *o;
6184 }
6185 
6186 /**
6187  *	t4_get_port_stats - collect port statistics
6188  *	@adap: the adapter
6189  *	@idx: the port index
6190  *	@p: the stats structure to fill
6191  *
6192  *	Collect statistics related to the given port from HW.
6193  */
6194 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6195 {
6196 	u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6197 	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6198 
6199 #define GET_STAT(name) \
6200 	t4_read_reg64(adap, \
6201 	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6202 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6203 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6204 
6205 	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
6206 	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
6207 	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
6208 	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
6209 	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
6210 	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
6211 	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
6212 	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
6213 	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
6214 	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
6215 	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
6216 	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
6217 	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
6218 	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
6219 	p->tx_drop		= GET_STAT(TX_PORT_DROP);
6220 	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
6221 	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
6222 	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
6223 	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
6224 	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
6225 	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
6226 	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
6227 	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
6228 
6229 	if (chip_id(adap) >= CHELSIO_T5) {
6230 		if (stat_ctl & F_COUNTPAUSESTATTX) {
6231 			p->tx_frames -= p->tx_pause;
6232 			p->tx_octets -= p->tx_pause * 64;
6233 		}
6234 		if (stat_ctl & F_COUNTPAUSEMCTX)
6235 			p->tx_mcast_frames -= p->tx_pause;
6236 	}
6237 
6238 	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
6239 	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
6240 	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
6241 	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
6242 	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
6243 	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
6244 	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
6245 	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
6246 	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
6247 	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
6248 	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
6249 	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
6250 	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
6251 	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
6252 	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
6253 	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
6254 	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
6255 	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
6256 	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
6257 	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
6258 	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
6259 	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
6260 	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
6261 	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
6262 	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
6263 	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
6264 	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
6265 
6266 	if (chip_id(adap) >= CHELSIO_T5) {
6267 		if (stat_ctl & F_COUNTPAUSESTATRX) {
6268 			p->rx_frames -= p->rx_pause;
6269 			p->rx_octets -= p->rx_pause * 64;
6270 		}
6271 		if (stat_ctl & F_COUNTPAUSEMCRX)
6272 			p->rx_mcast_frames -= p->rx_pause;
6273 	}
6274 
6275 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6276 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6277 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6278 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6279 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6280 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6281 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6282 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6283 
6284 #undef GET_STAT
6285 #undef GET_STAT_COM
6286 }
6287 
6288 /**
6289  *	t4_get_lb_stats - collect loopback port statistics
6290  *	@adap: the adapter
6291  *	@idx: the loopback port index
6292  *	@p: the stats structure to fill
6293  *
6294  *	Return HW statistics for the given loopback port.
6295  */
6296 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6297 {
6298 	u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6299 
6300 #define GET_STAT(name) \
6301 	t4_read_reg64(adap, \
6302 	(is_t4(adap) ? \
6303 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6304 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6305 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6306 
6307 	p->octets	= GET_STAT(BYTES);
6308 	p->frames	= GET_STAT(FRAMES);
6309 	p->bcast_frames	= GET_STAT(BCAST);
6310 	p->mcast_frames	= GET_STAT(MCAST);
6311 	p->ucast_frames	= GET_STAT(UCAST);
6312 	p->error_frames	= GET_STAT(ERROR);
6313 
6314 	p->frames_64		= GET_STAT(64B);
6315 	p->frames_65_127	= GET_STAT(65B_127B);
6316 	p->frames_128_255	= GET_STAT(128B_255B);
6317 	p->frames_256_511	= GET_STAT(256B_511B);
6318 	p->frames_512_1023	= GET_STAT(512B_1023B);
6319 	p->frames_1024_1518	= GET_STAT(1024B_1518B);
6320 	p->frames_1519_max	= GET_STAT(1519B_MAX);
6321 	p->drop			= GET_STAT(DROP_FRAMES);
6322 
6323 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6324 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6325 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6326 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6327 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6328 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6329 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6330 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6331 
6332 #undef GET_STAT
6333 #undef GET_STAT_COM
6334 }
6335 
6336 /**
6337  *	t4_wol_magic_enable - enable/disable magic packet WoL
6338  *	@adap: the adapter
6339  *	@port: the physical port index
6340  *	@addr: MAC address expected in magic packets, %NULL to disable
6341  *
6342  *	Enables/disables magic packet wake-on-LAN for the selected port.
6343  */
6344 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6345 			 const u8 *addr)
6346 {
6347 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6348 
6349 	if (is_t4(adap)) {
6350 		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6351 		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6352 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6353 	} else {
6354 		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6355 		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6356 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6357 	}
6358 
6359 	if (addr) {
6360 		t4_write_reg(adap, mag_id_reg_l,
6361 			     (addr[2] << 24) | (addr[3] << 16) |
6362 			     (addr[4] << 8) | addr[5]);
6363 		t4_write_reg(adap, mag_id_reg_h,
6364 			     (addr[0] << 8) | addr[1]);
6365 	}
6366 	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6367 			 V_MAGICEN(addr != NULL));
6368 }
6369 
6370 /**
6371  *	t4_wol_pat_enable - enable/disable pattern-based WoL
6372  *	@adap: the adapter
6373  *	@port: the physical port index
6374  *	@map: bitmap of which HW pattern filters to set
6375  *	@mask0: byte mask for bytes 0-63 of a packet
6376  *	@mask1: byte mask for bytes 64-127 of a packet
6377  *	@crc: Ethernet CRC for selected bytes
6378  *	@enable: enable/disable switch
6379  *
6380  *	Sets the pattern filters indicated in @map to mask out the bytes
6381  *	specified in @mask0/@mask1 in received packets and compare the CRC of
6382  *	the resulting packet against @crc.  If @enable is %true pattern-based
6383  *	WoL is enabled, otherwise disabled.
6384  */
6385 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6386 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
6387 {
6388 	int i;
6389 	u32 port_cfg_reg;
6390 
6391 	if (is_t4(adap))
6392 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6393 	else
6394 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6395 
6396 	if (!enable) {
6397 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6398 		return 0;
6399 	}
6400 	if (map > 0xff)
6401 		return -EINVAL;
6402 
6403 #define EPIO_REG(name) \
6404 	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6405 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6406 
6407 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6408 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6409 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6410 
6411 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6412 		if (!(map & 1))
6413 			continue;
6414 
6415 		/* write byte masks */
6416 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6417 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6418 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
6419 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6420 			return -ETIMEDOUT;
6421 
6422 		/* write CRC */
6423 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
6424 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6425 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
6426 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6427 			return -ETIMEDOUT;
6428 	}
6429 #undef EPIO_REG
6430 
6431 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6432 	return 0;
6433 }
6434 
6435 /*     t4_mk_filtdelwr - create a delete filter WR
6436  *     @ftid: the filter ID
6437  *     @wr: the filter work request to populate
6438  *     @qid: ingress queue to receive the delete notification
6439  *
6440  *     Creates a filter work request to delete the supplied filter.  If @qid is
6441  *     negative the delete notification is suppressed.
6442  */
6443 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6444 {
6445 	memset(wr, 0, sizeof(*wr));
6446 	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6447 	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6448 	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6449 				    V_FW_FILTER_WR_NOREPLY(qid < 0));
6450 	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6451 	if (qid >= 0)
6452 		wr->rx_chan_rx_rpl_iq =
6453 				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6454 }
6455 
6456 #define INIT_CMD(var, cmd, rd_wr) do { \
6457 	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6458 					F_FW_CMD_REQUEST | \
6459 					F_FW_CMD_##rd_wr); \
6460 	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6461 } while (0)
6462 
6463 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6464 			  u32 addr, u32 val)
6465 {
6466 	u32 ldst_addrspace;
6467 	struct fw_ldst_cmd c;
6468 
6469 	memset(&c, 0, sizeof(c));
6470 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6471 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6472 					F_FW_CMD_REQUEST |
6473 					F_FW_CMD_WRITE |
6474 					ldst_addrspace);
6475 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6476 	c.u.addrval.addr = cpu_to_be32(addr);
6477 	c.u.addrval.val = cpu_to_be32(val);
6478 
6479 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6480 }
6481 
6482 /**
6483  *	t4_mdio_rd - read a PHY register through MDIO
6484  *	@adap: the adapter
6485  *	@mbox: mailbox to use for the FW command
6486  *	@phy_addr: the PHY address
6487  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6488  *	@reg: the register to read
6489  *	@valp: where to store the value
6490  *
6491  *	Issues a FW command through the given mailbox to read a PHY register.
6492  */
6493 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6494 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
6495 {
6496 	int ret;
6497 	u32 ldst_addrspace;
6498 	struct fw_ldst_cmd c;
6499 
6500 	memset(&c, 0, sizeof(c));
6501 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6502 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6503 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6504 					ldst_addrspace);
6505 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6506 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6507 					 V_FW_LDST_CMD_MMD(mmd));
6508 	c.u.mdio.raddr = cpu_to_be16(reg);
6509 
6510 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6511 	if (ret == 0)
6512 		*valp = be16_to_cpu(c.u.mdio.rval);
6513 	return ret;
6514 }
6515 
6516 /**
6517  *	t4_mdio_wr - write a PHY register through MDIO
6518  *	@adap: the adapter
6519  *	@mbox: mailbox to use for the FW command
6520  *	@phy_addr: the PHY address
6521  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6522  *	@reg: the register to write
6523  *	@valp: value to write
6524  *
6525  *	Issues a FW command through the given mailbox to write a PHY register.
6526  */
6527 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6528 	       unsigned int mmd, unsigned int reg, unsigned int val)
6529 {
6530 	u32 ldst_addrspace;
6531 	struct fw_ldst_cmd c;
6532 
6533 	memset(&c, 0, sizeof(c));
6534 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6535 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6536 					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6537 					ldst_addrspace);
6538 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6539 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6540 					 V_FW_LDST_CMD_MMD(mmd));
6541 	c.u.mdio.raddr = cpu_to_be16(reg);
6542 	c.u.mdio.rval = cpu_to_be16(val);
6543 
6544 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6545 }
6546 
6547 /**
6548  *
6549  *	t4_sge_decode_idma_state - decode the idma state
6550  *	@adap: the adapter
6551  *	@state: the state idma is stuck in
6552  */
6553 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6554 {
6555 	static const char * const t4_decode[] = {
6556 		"IDMA_IDLE",
6557 		"IDMA_PUSH_MORE_CPL_FIFO",
6558 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6559 		"Not used",
6560 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6561 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6562 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6563 		"IDMA_SEND_FIFO_TO_IMSG",
6564 		"IDMA_FL_REQ_DATA_FL_PREP",
6565 		"IDMA_FL_REQ_DATA_FL",
6566 		"IDMA_FL_DROP",
6567 		"IDMA_FL_H_REQ_HEADER_FL",
6568 		"IDMA_FL_H_SEND_PCIEHDR",
6569 		"IDMA_FL_H_PUSH_CPL_FIFO",
6570 		"IDMA_FL_H_SEND_CPL",
6571 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6572 		"IDMA_FL_H_SEND_IP_HDR",
6573 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6574 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6575 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6576 		"IDMA_FL_D_SEND_PCIEHDR",
6577 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6578 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6579 		"IDMA_FL_SEND_PCIEHDR",
6580 		"IDMA_FL_PUSH_CPL_FIFO",
6581 		"IDMA_FL_SEND_CPL",
6582 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6583 		"IDMA_FL_SEND_PAYLOAD",
6584 		"IDMA_FL_REQ_NEXT_DATA_FL",
6585 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6586 		"IDMA_FL_SEND_PADDING",
6587 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6588 		"IDMA_FL_SEND_FIFO_TO_IMSG",
6589 		"IDMA_FL_REQ_DATAFL_DONE",
6590 		"IDMA_FL_REQ_HEADERFL_DONE",
6591 	};
6592 	static const char * const t5_decode[] = {
6593 		"IDMA_IDLE",
6594 		"IDMA_ALMOST_IDLE",
6595 		"IDMA_PUSH_MORE_CPL_FIFO",
6596 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6597 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6598 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6599 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6600 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6601 		"IDMA_SEND_FIFO_TO_IMSG",
6602 		"IDMA_FL_REQ_DATA_FL",
6603 		"IDMA_FL_DROP",
6604 		"IDMA_FL_DROP_SEND_INC",
6605 		"IDMA_FL_H_REQ_HEADER_FL",
6606 		"IDMA_FL_H_SEND_PCIEHDR",
6607 		"IDMA_FL_H_PUSH_CPL_FIFO",
6608 		"IDMA_FL_H_SEND_CPL",
6609 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6610 		"IDMA_FL_H_SEND_IP_HDR",
6611 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6612 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6613 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6614 		"IDMA_FL_D_SEND_PCIEHDR",
6615 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6616 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6617 		"IDMA_FL_SEND_PCIEHDR",
6618 		"IDMA_FL_PUSH_CPL_FIFO",
6619 		"IDMA_FL_SEND_CPL",
6620 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6621 		"IDMA_FL_SEND_PAYLOAD",
6622 		"IDMA_FL_REQ_NEXT_DATA_FL",
6623 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6624 		"IDMA_FL_SEND_PADDING",
6625 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6626 	};
6627 	static const char * const t6_decode[] = {
6628 		"IDMA_IDLE",
6629 		"IDMA_PUSH_MORE_CPL_FIFO",
6630 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6631 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6632 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6633 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6634 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6635 		"IDMA_FL_REQ_DATA_FL",
6636 		"IDMA_FL_DROP",
6637 		"IDMA_FL_DROP_SEND_INC",
6638 		"IDMA_FL_H_REQ_HEADER_FL",
6639 		"IDMA_FL_H_SEND_PCIEHDR",
6640 		"IDMA_FL_H_PUSH_CPL_FIFO",
6641 		"IDMA_FL_H_SEND_CPL",
6642 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6643 		"IDMA_FL_H_SEND_IP_HDR",
6644 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6645 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6646 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6647 		"IDMA_FL_D_SEND_PCIEHDR",
6648 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6649 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6650 		"IDMA_FL_SEND_PCIEHDR",
6651 		"IDMA_FL_PUSH_CPL_FIFO",
6652 		"IDMA_FL_SEND_CPL",
6653 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6654 		"IDMA_FL_SEND_PAYLOAD",
6655 		"IDMA_FL_REQ_NEXT_DATA_FL",
6656 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6657 		"IDMA_FL_SEND_PADDING",
6658 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6659 	};
6660 	static const u32 sge_regs[] = {
6661 		A_SGE_DEBUG_DATA_LOW_INDEX_2,
6662 		A_SGE_DEBUG_DATA_LOW_INDEX_3,
6663 		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6664 	};
6665 	const char * const *sge_idma_decode;
6666 	int sge_idma_decode_nstates;
6667 	int i;
6668 	unsigned int chip_version = chip_id(adapter);
6669 
6670 	/* Select the right set of decode strings to dump depending on the
6671 	 * adapter chip type.
6672 	 */
6673 	switch (chip_version) {
6674 	case CHELSIO_T4:
6675 		sge_idma_decode = (const char * const *)t4_decode;
6676 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6677 		break;
6678 
6679 	case CHELSIO_T5:
6680 		sge_idma_decode = (const char * const *)t5_decode;
6681 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6682 		break;
6683 
6684 	case CHELSIO_T6:
6685 		sge_idma_decode = (const char * const *)t6_decode;
6686 		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6687 		break;
6688 
6689 	default:
6690 		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
6691 		return;
6692 	}
6693 
6694 	if (state < sge_idma_decode_nstates)
6695 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6696 	else
6697 		CH_WARN(adapter, "idma state %d unknown\n", state);
6698 
6699 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6700 		CH_WARN(adapter, "SGE register %#x value %#x\n",
6701 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6702 }
6703 
6704 /**
6705  *      t4_sge_ctxt_flush - flush the SGE context cache
6706  *      @adap: the adapter
6707  *      @mbox: mailbox to use for the FW command
6708  *
6709  *      Issues a FW command through the given mailbox to flush the
6710  *      SGE context cache.
6711  */
6712 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6713 {
6714 	int ret;
6715 	u32 ldst_addrspace;
6716 	struct fw_ldst_cmd c;
6717 
6718 	memset(&c, 0, sizeof(c));
6719 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6720 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6721 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6722 					ldst_addrspace);
6723 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6724 	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6725 
6726 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6727 	return ret;
6728 }
6729 
6730 /**
6731  *      t4_fw_hello - establish communication with FW
6732  *      @adap: the adapter
6733  *      @mbox: mailbox to use for the FW command
6734  *      @evt_mbox: mailbox to receive async FW events
6735  *      @master: specifies the caller's willingness to be the device master
6736  *	@state: returns the current device state (if non-NULL)
6737  *
6738  *	Issues a command to establish communication with FW.  Returns either
6739  *	an error (negative integer) or the mailbox of the Master PF.
6740  */
6741 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6742 		enum dev_master master, enum dev_state *state)
6743 {
6744 	int ret;
6745 	struct fw_hello_cmd c;
6746 	u32 v;
6747 	unsigned int master_mbox;
6748 	int retries = FW_CMD_HELLO_RETRIES;
6749 
6750 retry:
6751 	memset(&c, 0, sizeof(c));
6752 	INIT_CMD(c, HELLO, WRITE);
6753 	c.err_to_clearinit = cpu_to_be32(
6754 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6755 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6756 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6757 					mbox : M_FW_HELLO_CMD_MBMASTER) |
6758 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6759 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6760 		F_FW_HELLO_CMD_CLEARINIT);
6761 
6762 	/*
6763 	 * Issue the HELLO command to the firmware.  If it's not successful
6764 	 * but indicates that we got a "busy" or "timeout" condition, retry
6765 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
6766 	 * retry limit, check to see if the firmware left us any error
6767 	 * information and report that if so ...
6768 	 */
6769 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6770 	if (ret != FW_SUCCESS) {
6771 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6772 			goto retry;
6773 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6774 			t4_report_fw_error(adap);
6775 		return ret;
6776 	}
6777 
6778 	v = be32_to_cpu(c.err_to_clearinit);
6779 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6780 	if (state) {
6781 		if (v & F_FW_HELLO_CMD_ERR)
6782 			*state = DEV_STATE_ERR;
6783 		else if (v & F_FW_HELLO_CMD_INIT)
6784 			*state = DEV_STATE_INIT;
6785 		else
6786 			*state = DEV_STATE_UNINIT;
6787 	}
6788 
6789 	/*
6790 	 * If we're not the Master PF then we need to wait around for the
6791 	 * Master PF Driver to finish setting up the adapter.
6792 	 *
6793 	 * Note that we also do this wait if we're a non-Master-capable PF and
6794 	 * there is no current Master PF; a Master PF may show up momentarily
6795 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
6796 	 * OS loads lots of different drivers rapidly at the same time).  In
6797 	 * this case, the Master PF returned by the firmware will be
6798 	 * M_PCIE_FW_MASTER so the test below will work ...
6799 	 */
6800 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6801 	    master_mbox != mbox) {
6802 		int waiting = FW_CMD_HELLO_TIMEOUT;
6803 
6804 		/*
6805 		 * Wait for the firmware to either indicate an error or
6806 		 * initialized state.  If we see either of these we bail out
6807 		 * and report the issue to the caller.  If we exhaust the
6808 		 * "hello timeout" and we haven't exhausted our retries, try
6809 		 * again.  Otherwise bail with a timeout error.
6810 		 */
6811 		for (;;) {
6812 			u32 pcie_fw;
6813 
6814 			msleep(50);
6815 			waiting -= 50;
6816 
6817 			/*
6818 			 * If neither Error nor Initialialized are indicated
6819 			 * by the firmware keep waiting till we exhaust our
6820 			 * timeout ... and then retry if we haven't exhausted
6821 			 * our retries ...
6822 			 */
6823 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6824 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6825 				if (waiting <= 0) {
6826 					if (retries-- > 0)
6827 						goto retry;
6828 
6829 					return -ETIMEDOUT;
6830 				}
6831 				continue;
6832 			}
6833 
6834 			/*
6835 			 * We either have an Error or Initialized condition
6836 			 * report errors preferentially.
6837 			 */
6838 			if (state) {
6839 				if (pcie_fw & F_PCIE_FW_ERR)
6840 					*state = DEV_STATE_ERR;
6841 				else if (pcie_fw & F_PCIE_FW_INIT)
6842 					*state = DEV_STATE_INIT;
6843 			}
6844 
6845 			/*
6846 			 * If we arrived before a Master PF was selected and
6847 			 * there's not a valid Master PF, grab its identity
6848 			 * for our caller.
6849 			 */
6850 			if (master_mbox == M_PCIE_FW_MASTER &&
6851 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
6852 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6853 			break;
6854 		}
6855 	}
6856 
6857 	return master_mbox;
6858 }
6859 
6860 /**
6861  *	t4_fw_bye - end communication with FW
6862  *	@adap: the adapter
6863  *	@mbox: mailbox to use for the FW command
6864  *
6865  *	Issues a command to terminate communication with FW.
6866  */
6867 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6868 {
6869 	struct fw_bye_cmd c;
6870 
6871 	memset(&c, 0, sizeof(c));
6872 	INIT_CMD(c, BYE, WRITE);
6873 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6874 }
6875 
6876 /**
6877  *	t4_fw_reset - issue a reset to FW
6878  *	@adap: the adapter
6879  *	@mbox: mailbox to use for the FW command
6880  *	@reset: specifies the type of reset to perform
6881  *
6882  *	Issues a reset command of the specified type to FW.
6883  */
6884 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6885 {
6886 	struct fw_reset_cmd c;
6887 
6888 	memset(&c, 0, sizeof(c));
6889 	INIT_CMD(c, RESET, WRITE);
6890 	c.val = cpu_to_be32(reset);
6891 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6892 }
6893 
6894 /**
6895  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6896  *	@adap: the adapter
6897  *	@mbox: mailbox to use for the FW RESET command (if desired)
6898  *	@force: force uP into RESET even if FW RESET command fails
6899  *
6900  *	Issues a RESET command to firmware (if desired) with a HALT indication
6901  *	and then puts the microprocessor into RESET state.  The RESET command
6902  *	will only be issued if a legitimate mailbox is provided (mbox <=
6903  *	M_PCIE_FW_MASTER).
6904  *
6905  *	This is generally used in order for the host to safely manipulate the
6906  *	adapter without fear of conflicting with whatever the firmware might
6907  *	be doing.  The only way out of this state is to RESTART the firmware
6908  *	...
6909  */
6910 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6911 {
6912 	int ret = 0;
6913 
6914 	/*
6915 	 * If a legitimate mailbox is provided, issue a RESET command
6916 	 * with a HALT indication.
6917 	 */
6918 	if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
6919 		struct fw_reset_cmd c;
6920 
6921 		memset(&c, 0, sizeof(c));
6922 		INIT_CMD(c, RESET, WRITE);
6923 		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6924 		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6925 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6926 	}
6927 
6928 	/*
6929 	 * Normally we won't complete the operation if the firmware RESET
6930 	 * command fails but if our caller insists we'll go ahead and put the
6931 	 * uP into RESET.  This can be useful if the firmware is hung or even
6932 	 * missing ...  We'll have to take the risk of putting the uP into
6933 	 * RESET without the cooperation of firmware in that case.
6934 	 *
6935 	 * We also force the firmware's HALT flag to be on in case we bypassed
6936 	 * the firmware RESET command above or we're dealing with old firmware
6937 	 * which doesn't have the HALT capability.  This will serve as a flag
6938 	 * for the incoming firmware to know that it's coming out of a HALT
6939 	 * rather than a RESET ... if it's new enough to understand that ...
6940 	 */
6941 	if (ret == 0 || force) {
6942 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6943 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6944 				 F_PCIE_FW_HALT);
6945 	}
6946 
6947 	/*
6948 	 * And we always return the result of the firmware RESET command
6949 	 * even when we force the uP into RESET ...
6950 	 */
6951 	return ret;
6952 }
6953 
6954 /**
6955  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
6956  *	@adap: the adapter
6957  *
6958  *	Restart firmware previously halted by t4_fw_halt().  On successful
6959  *	return the previous PF Master remains as the new PF Master and there
6960  *	is no need to issue a new HELLO command, etc.
6961  */
6962 int t4_fw_restart(struct adapter *adap, unsigned int mbox)
6963 {
6964 	int ms;
6965 
6966 	t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6967 	for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6968 		if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6969 			return FW_SUCCESS;
6970 		msleep(100);
6971 		ms += 100;
6972 	}
6973 
6974 	return -ETIMEDOUT;
6975 }
6976 
6977 /**
6978  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6979  *	@adap: the adapter
6980  *	@mbox: mailbox to use for the FW RESET command (if desired)
6981  *	@fw_data: the firmware image to write
6982  *	@size: image size
6983  *	@force: force upgrade even if firmware doesn't cooperate
6984  *
6985  *	Perform all of the steps necessary for upgrading an adapter's
6986  *	firmware image.  Normally this requires the cooperation of the
6987  *	existing firmware in order to halt all existing activities
6988  *	but if an invalid mailbox token is passed in we skip that step
6989  *	(though we'll still put the adapter microprocessor into RESET in
6990  *	that case).
6991  *
6992  *	On successful return the new firmware will have been loaded and
6993  *	the adapter will have been fully RESET losing all previous setup
6994  *	state.  On unsuccessful return the adapter may be completely hosed ...
6995  *	positive errno indicates that the adapter is ~probably~ intact, a
6996  *	negative errno indicates that things are looking bad ...
6997  */
6998 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6999 		  const u8 *fw_data, unsigned int size, int force)
7000 {
7001 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7002 	unsigned int bootstrap =
7003 	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7004 	int ret;
7005 
7006 	if (!t4_fw_matches_chip(adap, fw_hdr))
7007 		return -EINVAL;
7008 
7009 	if (!bootstrap) {
7010 		ret = t4_fw_halt(adap, mbox, force);
7011 		if (ret < 0 && !force)
7012 			return ret;
7013 	}
7014 
7015 	ret = t4_load_fw(adap, fw_data, size);
7016 	if (ret < 0 || bootstrap)
7017 		return ret;
7018 
7019 	return t4_fw_restart(adap, mbox);
7020 }
7021 
7022 /**
7023  *	t4_fw_initialize - ask FW to initialize the device
7024  *	@adap: the adapter
7025  *	@mbox: mailbox to use for the FW command
7026  *
7027  *	Issues a command to FW to partially initialize the device.  This
7028  *	performs initialization that generally doesn't depend on user input.
7029  */
7030 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7031 {
7032 	struct fw_initialize_cmd c;
7033 
7034 	memset(&c, 0, sizeof(c));
7035 	INIT_CMD(c, INITIALIZE, WRITE);
7036 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7037 }
7038 
7039 /**
7040  *	t4_query_params_rw - query FW or device parameters
7041  *	@adap: the adapter
7042  *	@mbox: mailbox to use for the FW command
7043  *	@pf: the PF
7044  *	@vf: the VF
7045  *	@nparams: the number of parameters
7046  *	@params: the parameter names
7047  *	@val: the parameter values
7048  *	@rw: Write and read flag
7049  *
7050  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
7051  *	queried at once.
7052  */
7053 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7054 		       unsigned int vf, unsigned int nparams, const u32 *params,
7055 		       u32 *val, int rw)
7056 {
7057 	int i, ret;
7058 	struct fw_params_cmd c;
7059 	__be32 *p = &c.param[0].mnem;
7060 
7061 	if (nparams > 7)
7062 		return -EINVAL;
7063 
7064 	memset(&c, 0, sizeof(c));
7065 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7066 				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
7067 				  V_FW_PARAMS_CMD_PFN(pf) |
7068 				  V_FW_PARAMS_CMD_VFN(vf));
7069 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7070 
7071 	for (i = 0; i < nparams; i++) {
7072 		*p++ = cpu_to_be32(*params++);
7073 		if (rw)
7074 			*p = cpu_to_be32(*(val + i));
7075 		p++;
7076 	}
7077 
7078 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7079 	if (ret == 0)
7080 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7081 			*val++ = be32_to_cpu(*p);
7082 	return ret;
7083 }
7084 
7085 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7086 		    unsigned int vf, unsigned int nparams, const u32 *params,
7087 		    u32 *val)
7088 {
7089 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
7090 }
7091 
7092 /**
7093  *      t4_set_params_timeout - sets FW or device parameters
7094  *      @adap: the adapter
7095  *      @mbox: mailbox to use for the FW command
7096  *      @pf: the PF
7097  *      @vf: the VF
7098  *      @nparams: the number of parameters
7099  *      @params: the parameter names
7100  *      @val: the parameter values
7101  *      @timeout: the timeout time
7102  *
7103  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
7104  *      specified at once.
7105  */
7106 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7107 			  unsigned int pf, unsigned int vf,
7108 			  unsigned int nparams, const u32 *params,
7109 			  const u32 *val, int timeout)
7110 {
7111 	struct fw_params_cmd c;
7112 	__be32 *p = &c.param[0].mnem;
7113 
7114 	if (nparams > 7)
7115 		return -EINVAL;
7116 
7117 	memset(&c, 0, sizeof(c));
7118 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7119 				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7120 				  V_FW_PARAMS_CMD_PFN(pf) |
7121 				  V_FW_PARAMS_CMD_VFN(vf));
7122 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7123 
7124 	while (nparams--) {
7125 		*p++ = cpu_to_be32(*params++);
7126 		*p++ = cpu_to_be32(*val++);
7127 	}
7128 
7129 	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7130 }
7131 
7132 /**
7133  *	t4_set_params - sets FW or device parameters
7134  *	@adap: the adapter
7135  *	@mbox: mailbox to use for the FW command
7136  *	@pf: the PF
7137  *	@vf: the VF
7138  *	@nparams: the number of parameters
7139  *	@params: the parameter names
7140  *	@val: the parameter values
7141  *
7142  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
7143  *	specified at once.
7144  */
7145 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7146 		  unsigned int vf, unsigned int nparams, const u32 *params,
7147 		  const u32 *val)
7148 {
7149 	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7150 				     FW_CMD_MAX_TIMEOUT);
7151 }
7152 
7153 /**
7154  *	t4_cfg_pfvf - configure PF/VF resource limits
7155  *	@adap: the adapter
7156  *	@mbox: mailbox to use for the FW command
7157  *	@pf: the PF being configured
7158  *	@vf: the VF being configured
7159  *	@txq: the max number of egress queues
7160  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
7161  *	@rxqi: the max number of interrupt-capable ingress queues
7162  *	@rxq: the max number of interruptless ingress queues
7163  *	@tc: the PCI traffic class
7164  *	@vi: the max number of virtual interfaces
7165  *	@cmask: the channel access rights mask for the PF/VF
7166  *	@pmask: the port access rights mask for the PF/VF
7167  *	@nexact: the maximum number of exact MPS filters
7168  *	@rcaps: read capabilities
7169  *	@wxcaps: write/execute capabilities
7170  *
7171  *	Configures resource limits and capabilities for a physical or virtual
7172  *	function.
7173  */
7174 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7175 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7176 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
7177 		unsigned int vi, unsigned int cmask, unsigned int pmask,
7178 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7179 {
7180 	struct fw_pfvf_cmd c;
7181 
7182 	memset(&c, 0, sizeof(c));
7183 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7184 				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7185 				  V_FW_PFVF_CMD_VFN(vf));
7186 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7187 	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7188 				     V_FW_PFVF_CMD_NIQ(rxq));
7189 	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7190 				    V_FW_PFVF_CMD_PMASK(pmask) |
7191 				    V_FW_PFVF_CMD_NEQ(txq));
7192 	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7193 				      V_FW_PFVF_CMD_NVI(vi) |
7194 				      V_FW_PFVF_CMD_NEXACTF(nexact));
7195 	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7196 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7197 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7198 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7199 }
7200 
7201 /**
7202  *	t4_alloc_vi_func - allocate a virtual interface
7203  *	@adap: the adapter
7204  *	@mbox: mailbox to use for the FW command
7205  *	@port: physical port associated with the VI
7206  *	@pf: the PF owning the VI
7207  *	@vf: the VF owning the VI
7208  *	@nmac: number of MAC addresses needed (1 to 5)
7209  *	@mac: the MAC addresses of the VI
7210  *	@rss_size: size of RSS table slice associated with this VI
7211  *	@portfunc: which Port Application Function MAC Address is desired
7212  *	@idstype: Intrusion Detection Type
7213  *
7214  *	Allocates a virtual interface for the given physical port.  If @mac is
7215  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
7216  *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7217  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
7218  *	stored consecutively so the space needed is @nmac * 6 bytes.
7219  *	Returns a negative error number or the non-negative VI id.
7220  */
7221 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7222 		     unsigned int port, unsigned int pf, unsigned int vf,
7223 		     unsigned int nmac, u8 *mac, u16 *rss_size,
7224 		     unsigned int portfunc, unsigned int idstype)
7225 {
7226 	int ret;
7227 	struct fw_vi_cmd c;
7228 
7229 	memset(&c, 0, sizeof(c));
7230 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7231 				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7232 				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7233 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7234 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7235 				     V_FW_VI_CMD_FUNC(portfunc));
7236 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7237 	c.nmac = nmac - 1;
7238 	if(!rss_size)
7239 		c.norss_rsssize = F_FW_VI_CMD_NORSS;
7240 
7241 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7242 	if (ret)
7243 		return ret;
7244 
7245 	if (mac) {
7246 		memcpy(mac, c.mac, sizeof(c.mac));
7247 		switch (nmac) {
7248 		case 5:
7249 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7250 		case 4:
7251 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7252 		case 3:
7253 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7254 		case 2:
7255 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
7256 		}
7257 	}
7258 	if (rss_size)
7259 		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7260 	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7261 }
7262 
7263 /**
7264  *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7265  *      @adap: the adapter
7266  *      @mbox: mailbox to use for the FW command
7267  *      @port: physical port associated with the VI
7268  *      @pf: the PF owning the VI
7269  *      @vf: the VF owning the VI
7270  *      @nmac: number of MAC addresses needed (1 to 5)
7271  *      @mac: the MAC addresses of the VI
7272  *      @rss_size: size of RSS table slice associated with this VI
7273  *
7274  *	backwards compatible and convieniance routine to allocate a Virtual
7275  *	Interface with a Ethernet Port Application Function and Intrustion
7276  *	Detection System disabled.
7277  */
7278 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7279 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7280 		u16 *rss_size)
7281 {
7282 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7283 				FW_VI_FUNC_ETH, 0);
7284 }
7285 
7286 /**
7287  * 	t4_free_vi - free a virtual interface
7288  * 	@adap: the adapter
7289  * 	@mbox: mailbox to use for the FW command
7290  * 	@pf: the PF owning the VI
7291  * 	@vf: the VF owning the VI
7292  * 	@viid: virtual interface identifiler
7293  *
7294  * 	Free a previously allocated virtual interface.
7295  */
7296 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7297 	       unsigned int vf, unsigned int viid)
7298 {
7299 	struct fw_vi_cmd c;
7300 
7301 	memset(&c, 0, sizeof(c));
7302 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7303 				  F_FW_CMD_REQUEST |
7304 				  F_FW_CMD_EXEC |
7305 				  V_FW_VI_CMD_PFN(pf) |
7306 				  V_FW_VI_CMD_VFN(vf));
7307 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7308 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7309 
7310 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7311 }
7312 
7313 /**
7314  *	t4_set_rxmode - set Rx properties of a virtual interface
7315  *	@adap: the adapter
7316  *	@mbox: mailbox to use for the FW command
7317  *	@viid: the VI id
7318  *	@mtu: the new MTU or -1
7319  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7320  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7321  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7322  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7323  *	@sleep_ok: if true we may sleep while awaiting command completion
7324  *
7325  *	Sets Rx properties of a virtual interface.
7326  */
7327 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7328 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
7329 		  bool sleep_ok)
7330 {
7331 	struct fw_vi_rxmode_cmd c;
7332 
7333 	/* convert to FW values */
7334 	if (mtu < 0)
7335 		mtu = M_FW_VI_RXMODE_CMD_MTU;
7336 	if (promisc < 0)
7337 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7338 	if (all_multi < 0)
7339 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7340 	if (bcast < 0)
7341 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7342 	if (vlanex < 0)
7343 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7344 
7345 	memset(&c, 0, sizeof(c));
7346 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7347 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7348 				   V_FW_VI_RXMODE_CMD_VIID(viid));
7349 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7350 	c.mtu_to_vlanexen =
7351 		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7352 			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7353 			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7354 			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7355 			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7356 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7357 }
7358 
7359 /**
7360  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7361  *	@adap: the adapter
7362  *	@mbox: mailbox to use for the FW command
7363  *	@viid: the VI id
7364  *	@free: if true any existing filters for this VI id are first removed
7365  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
7366  *	@addr: the MAC address(es)
7367  *	@idx: where to store the index of each allocated filter
7368  *	@hash: pointer to hash address filter bitmap
7369  *	@sleep_ok: call is allowed to sleep
7370  *
7371  *	Allocates an exact-match filter for each of the supplied addresses and
7372  *	sets it to the corresponding address.  If @idx is not %NULL it should
7373  *	have at least @naddr entries, each of which will be set to the index of
7374  *	the filter allocated for the corresponding MAC address.  If a filter
7375  *	could not be allocated for an address its index is set to 0xffff.
7376  *	If @hash is not %NULL addresses that fail to allocate an exact filter
7377  *	are hashed and update the hash filter bitmap pointed at by @hash.
7378  *
7379  *	Returns a negative error number or the number of filters allocated.
7380  */
7381 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7382 		      unsigned int viid, bool free, unsigned int naddr,
7383 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7384 {
7385 	int offset, ret = 0;
7386 	struct fw_vi_mac_cmd c;
7387 	unsigned int nfilters = 0;
7388 	unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7389 	unsigned int rem = naddr;
7390 
7391 	if (naddr > max_naddr)
7392 		return -EINVAL;
7393 
7394 	for (offset = 0; offset < naddr ; /**/) {
7395 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7396 					 ? rem
7397 					 : ARRAY_SIZE(c.u.exact));
7398 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7399 						     u.exact[fw_naddr]), 16);
7400 		struct fw_vi_mac_exact *p;
7401 		int i;
7402 
7403 		memset(&c, 0, sizeof(c));
7404 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7405 					   F_FW_CMD_REQUEST |
7406 					   F_FW_CMD_WRITE |
7407 					   V_FW_CMD_EXEC(free) |
7408 					   V_FW_VI_MAC_CMD_VIID(viid));
7409 		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7410 						  V_FW_CMD_LEN16(len16));
7411 
7412 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7413 			p->valid_to_idx =
7414 				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7415 					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7416 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7417 		}
7418 
7419 		/*
7420 		 * It's okay if we run out of space in our MAC address arena.
7421 		 * Some of the addresses we submit may get stored so we need
7422 		 * to run through the reply to see what the results were ...
7423 		 */
7424 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7425 		if (ret && ret != -FW_ENOMEM)
7426 			break;
7427 
7428 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7429 			u16 index = G_FW_VI_MAC_CMD_IDX(
7430 						be16_to_cpu(p->valid_to_idx));
7431 
7432 			if (idx)
7433 				idx[offset+i] = (index >=  max_naddr
7434 						 ? 0xffff
7435 						 : index);
7436 			if (index < max_naddr)
7437 				nfilters++;
7438 			else if (hash)
7439 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7440 		}
7441 
7442 		free = false;
7443 		offset += fw_naddr;
7444 		rem -= fw_naddr;
7445 	}
7446 
7447 	if (ret == 0 || ret == -FW_ENOMEM)
7448 		ret = nfilters;
7449 	return ret;
7450 }
7451 
7452 /**
7453  *	t4_change_mac - modifies the exact-match filter for a MAC address
7454  *	@adap: the adapter
7455  *	@mbox: mailbox to use for the FW command
7456  *	@viid: the VI id
7457  *	@idx: index of existing filter for old value of MAC address, or -1
7458  *	@addr: the new MAC address value
7459  *	@persist: whether a new MAC allocation should be persistent
7460  *	@add_smt: if true also add the address to the HW SMT
7461  *
7462  *	Modifies an exact-match filter and sets it to the new MAC address if
7463  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
7464  *	latter case the address is added persistently if @persist is %true.
7465  *
7466  *	Note that in general it is not possible to modify the value of a given
7467  *	filter so the generic way to modify an address filter is to free the one
7468  *	being used by the old address value and allocate a new filter for the
7469  *	new address value.
7470  *
7471  *	Returns a negative error number or the index of the filter with the new
7472  *	MAC value.  Note that this index may differ from @idx.
7473  */
7474 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7475 		  int idx, const u8 *addr, bool persist, bool add_smt)
7476 {
7477 	int ret, mode;
7478 	struct fw_vi_mac_cmd c;
7479 	struct fw_vi_mac_exact *p = c.u.exact;
7480 	unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7481 
7482 	if (idx < 0)		/* new allocation */
7483 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7484 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7485 
7486 	memset(&c, 0, sizeof(c));
7487 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7488 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7489 				   V_FW_VI_MAC_CMD_VIID(viid));
7490 	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7491 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7492 				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7493 				      V_FW_VI_MAC_CMD_IDX(idx));
7494 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
7495 
7496 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7497 	if (ret == 0) {
7498 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7499 		if (ret >= max_mac_addr)
7500 			ret = -ENOMEM;
7501 	}
7502 	return ret;
7503 }
7504 
7505 /**
7506  *	t4_set_addr_hash - program the MAC inexact-match hash filter
7507  *	@adap: the adapter
7508  *	@mbox: mailbox to use for the FW command
7509  *	@viid: the VI id
7510  *	@ucast: whether the hash filter should also match unicast addresses
7511  *	@vec: the value to be written to the hash filter
7512  *	@sleep_ok: call is allowed to sleep
7513  *
7514  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
7515  */
7516 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7517 		     bool ucast, u64 vec, bool sleep_ok)
7518 {
7519 	struct fw_vi_mac_cmd c;
7520 	u32 val;
7521 
7522 	memset(&c, 0, sizeof(c));
7523 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7524 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7525 				   V_FW_VI_ENABLE_CMD_VIID(viid));
7526 	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7527 	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7528 	c.freemacs_to_len16 = cpu_to_be32(val);
7529 	c.u.hash.hashvec = cpu_to_be64(vec);
7530 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7531 }
7532 
7533 /**
7534  *      t4_enable_vi_params - enable/disable a virtual interface
7535  *      @adap: the adapter
7536  *      @mbox: mailbox to use for the FW command
7537  *      @viid: the VI id
7538  *      @rx_en: 1=enable Rx, 0=disable Rx
7539  *      @tx_en: 1=enable Tx, 0=disable Tx
7540  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
7541  *
7542  *      Enables/disables a virtual interface.  Note that setting DCB Enable
7543  *      only makes sense when enabling a Virtual Interface ...
7544  */
7545 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7546 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7547 {
7548 	struct fw_vi_enable_cmd c;
7549 
7550 	memset(&c, 0, sizeof(c));
7551 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7552 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7553 				   V_FW_VI_ENABLE_CMD_VIID(viid));
7554 	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7555 				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7556 				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7557 				     FW_LEN16(c));
7558 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7559 }
7560 
7561 /**
7562  *	t4_enable_vi - enable/disable a virtual interface
7563  *	@adap: the adapter
7564  *	@mbox: mailbox to use for the FW command
7565  *	@viid: the VI id
7566  *	@rx_en: 1=enable Rx, 0=disable Rx
7567  *	@tx_en: 1=enable Tx, 0=disable Tx
7568  *
7569  *	Enables/disables a virtual interface.  Note that setting DCB Enable
7570  *	only makes sense when enabling a Virtual Interface ...
7571  */
7572 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7573 		 bool rx_en, bool tx_en)
7574 {
7575 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7576 }
7577 
7578 /**
7579  *	t4_identify_port - identify a VI's port by blinking its LED
7580  *	@adap: the adapter
7581  *	@mbox: mailbox to use for the FW command
7582  *	@viid: the VI id
7583  *	@nblinks: how many times to blink LED at 2.5 Hz
7584  *
7585  *	Identifies a VI's port by blinking its LED.
7586  */
7587 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7588 		     unsigned int nblinks)
7589 {
7590 	struct fw_vi_enable_cmd c;
7591 
7592 	memset(&c, 0, sizeof(c));
7593 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7594 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7595 				   V_FW_VI_ENABLE_CMD_VIID(viid));
7596 	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7597 	c.blinkdur = cpu_to_be16(nblinks);
7598 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7599 }
7600 
7601 /**
7602  *	t4_iq_stop - stop an ingress queue and its FLs
7603  *	@adap: the adapter
7604  *	@mbox: mailbox to use for the FW command
7605  *	@pf: the PF owning the queues
7606  *	@vf: the VF owning the queues
7607  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7608  *	@iqid: ingress queue id
7609  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7610  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7611  *
7612  *	Stops an ingress queue and its associated FLs, if any.  This causes
7613  *	any current or future data/messages destined for these queues to be
7614  *	tossed.
7615  */
7616 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7617 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7618 	       unsigned int fl0id, unsigned int fl1id)
7619 {
7620 	struct fw_iq_cmd c;
7621 
7622 	memset(&c, 0, sizeof(c));
7623 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7624 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7625 				  V_FW_IQ_CMD_VFN(vf));
7626 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7627 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7628 	c.iqid = cpu_to_be16(iqid);
7629 	c.fl0id = cpu_to_be16(fl0id);
7630 	c.fl1id = cpu_to_be16(fl1id);
7631 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7632 }
7633 
7634 /**
7635  *	t4_iq_free - free an ingress queue and its FLs
7636  *	@adap: the adapter
7637  *	@mbox: mailbox to use for the FW command
7638  *	@pf: the PF owning the queues
7639  *	@vf: the VF owning the queues
7640  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7641  *	@iqid: ingress queue id
7642  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7643  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7644  *
7645  *	Frees an ingress queue and its associated FLs, if any.
7646  */
7647 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7648 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7649 	       unsigned int fl0id, unsigned int fl1id)
7650 {
7651 	struct fw_iq_cmd c;
7652 
7653 	memset(&c, 0, sizeof(c));
7654 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7655 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7656 				  V_FW_IQ_CMD_VFN(vf));
7657 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7658 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7659 	c.iqid = cpu_to_be16(iqid);
7660 	c.fl0id = cpu_to_be16(fl0id);
7661 	c.fl1id = cpu_to_be16(fl1id);
7662 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7663 }
7664 
7665 /**
7666  *	t4_eth_eq_free - free an Ethernet egress queue
7667  *	@adap: the adapter
7668  *	@mbox: mailbox to use for the FW command
7669  *	@pf: the PF owning the queue
7670  *	@vf: the VF owning the queue
7671  *	@eqid: egress queue id
7672  *
7673  *	Frees an Ethernet egress queue.
7674  */
7675 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7676 		   unsigned int vf, unsigned int eqid)
7677 {
7678 	struct fw_eq_eth_cmd c;
7679 
7680 	memset(&c, 0, sizeof(c));
7681 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7682 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7683 				  V_FW_EQ_ETH_CMD_PFN(pf) |
7684 				  V_FW_EQ_ETH_CMD_VFN(vf));
7685 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7686 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7687 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7688 }
7689 
7690 /**
7691  *	t4_ctrl_eq_free - free a control egress queue
7692  *	@adap: the adapter
7693  *	@mbox: mailbox to use for the FW command
7694  *	@pf: the PF owning the queue
7695  *	@vf: the VF owning the queue
7696  *	@eqid: egress queue id
7697  *
7698  *	Frees a control egress queue.
7699  */
7700 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7701 		    unsigned int vf, unsigned int eqid)
7702 {
7703 	struct fw_eq_ctrl_cmd c;
7704 
7705 	memset(&c, 0, sizeof(c));
7706 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7707 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7708 				  V_FW_EQ_CTRL_CMD_PFN(pf) |
7709 				  V_FW_EQ_CTRL_CMD_VFN(vf));
7710 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7711 	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7712 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7713 }
7714 
7715 /**
7716  *	t4_ofld_eq_free - free an offload egress queue
7717  *	@adap: the adapter
7718  *	@mbox: mailbox to use for the FW command
7719  *	@pf: the PF owning the queue
7720  *	@vf: the VF owning the queue
7721  *	@eqid: egress queue id
7722  *
7723  *	Frees a control egress queue.
7724  */
7725 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7726 		    unsigned int vf, unsigned int eqid)
7727 {
7728 	struct fw_eq_ofld_cmd c;
7729 
7730 	memset(&c, 0, sizeof(c));
7731 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7732 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7733 				  V_FW_EQ_OFLD_CMD_PFN(pf) |
7734 				  V_FW_EQ_OFLD_CMD_VFN(vf));
7735 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7736 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7737 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7738 }
7739 
7740 /**
7741  *	t4_link_down_rc_str - return a string for a Link Down Reason Code
7742  *	@link_down_rc: Link Down Reason Code
7743  *
7744  *	Returns a string representation of the Link Down Reason Code.
7745  */
7746 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7747 {
7748 	static const char *reason[] = {
7749 		"Link Down",
7750 		"Remote Fault",
7751 		"Auto-negotiation Failure",
7752 		"Reserved3",
7753 		"Insufficient Airflow",
7754 		"Unable To Determine Reason",
7755 		"No RX Signal Detected",
7756 		"Reserved7",
7757 	};
7758 
7759 	if (link_down_rc >= ARRAY_SIZE(reason))
7760 		return "Bad Reason Code";
7761 
7762 	return reason[link_down_rc];
7763 }
7764 
7765 /*
7766  * Return the highest speed set in the port capabilities, in Mb/s.
7767  */
7768 unsigned int fwcap_to_speed(uint32_t caps)
7769 {
7770 	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
7771 		do { \
7772 			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7773 				return __speed; \
7774 		} while (0)
7775 
7776 	TEST_SPEED_RETURN(400G, 400000);
7777 	TEST_SPEED_RETURN(200G, 200000);
7778 	TEST_SPEED_RETURN(100G, 100000);
7779 	TEST_SPEED_RETURN(50G,   50000);
7780 	TEST_SPEED_RETURN(40G,   40000);
7781 	TEST_SPEED_RETURN(25G,   25000);
7782 	TEST_SPEED_RETURN(10G,   10000);
7783 	TEST_SPEED_RETURN(1G,     1000);
7784 	TEST_SPEED_RETURN(100M,    100);
7785 
7786 	#undef TEST_SPEED_RETURN
7787 
7788 	return 0;
7789 }
7790 
7791 /*
7792  * Return the port capabilities bit for the given speed, which is in Mb/s.
7793  */
7794 uint32_t speed_to_fwcap(unsigned int speed)
7795 {
7796 	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
7797 		do { \
7798 			if (speed == __speed) \
7799 				return FW_PORT_CAP32_SPEED_##__caps_speed; \
7800 		} while (0)
7801 
7802 	TEST_SPEED_RETURN(400G, 400000);
7803 	TEST_SPEED_RETURN(200G, 200000);
7804 	TEST_SPEED_RETURN(100G, 100000);
7805 	TEST_SPEED_RETURN(50G,   50000);
7806 	TEST_SPEED_RETURN(40G,   40000);
7807 	TEST_SPEED_RETURN(25G,   25000);
7808 	TEST_SPEED_RETURN(10G,   10000);
7809 	TEST_SPEED_RETURN(1G,     1000);
7810 	TEST_SPEED_RETURN(100M,    100);
7811 
7812 	#undef TEST_SPEED_RETURN
7813 
7814 	return 0;
7815 }
7816 
7817 /*
7818  * Return the port capabilities bit for the highest speed in the capabilities.
7819  */
7820 uint32_t fwcap_top_speed(uint32_t caps)
7821 {
7822 	#define TEST_SPEED_RETURN(__caps_speed) \
7823 		do { \
7824 			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7825 				return FW_PORT_CAP32_SPEED_##__caps_speed; \
7826 		} while (0)
7827 
7828 	TEST_SPEED_RETURN(400G);
7829 	TEST_SPEED_RETURN(200G);
7830 	TEST_SPEED_RETURN(100G);
7831 	TEST_SPEED_RETURN(50G);
7832 	TEST_SPEED_RETURN(40G);
7833 	TEST_SPEED_RETURN(25G);
7834 	TEST_SPEED_RETURN(10G);
7835 	TEST_SPEED_RETURN(1G);
7836 	TEST_SPEED_RETURN(100M);
7837 
7838 	#undef TEST_SPEED_RETURN
7839 
7840 	return 0;
7841 }
7842 
7843 
7844 /**
7845  *	lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
7846  *	@lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
7847  *
7848  *	Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
7849  *	32-bit Port Capabilities value.
7850  */
7851 static uint32_t lstatus_to_fwcap(u32 lstatus)
7852 {
7853 	uint32_t linkattr = 0;
7854 
7855 	/*
7856 	 * Unfortunately the format of the Link Status in the old
7857 	 * 16-bit Port Information message isn't the same as the
7858 	 * 16-bit Port Capabilities bitfield used everywhere else ...
7859 	 */
7860 	if (lstatus & F_FW_PORT_CMD_RXPAUSE)
7861 		linkattr |= FW_PORT_CAP32_FC_RX;
7862 	if (lstatus & F_FW_PORT_CMD_TXPAUSE)
7863 		linkattr |= FW_PORT_CAP32_FC_TX;
7864 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7865 		linkattr |= FW_PORT_CAP32_SPEED_100M;
7866 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7867 		linkattr |= FW_PORT_CAP32_SPEED_1G;
7868 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7869 		linkattr |= FW_PORT_CAP32_SPEED_10G;
7870 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7871 		linkattr |= FW_PORT_CAP32_SPEED_25G;
7872 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7873 		linkattr |= FW_PORT_CAP32_SPEED_40G;
7874 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7875 		linkattr |= FW_PORT_CAP32_SPEED_100G;
7876 
7877 	return linkattr;
7878 }
7879 
7880 /*
7881  * Updates all fields owned by the common code in port_info and link_config
7882  * based on information provided by the firmware.  Does not touch any
7883  * requested_* field.
7884  */
7885 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
7886     enum fw_port_action action, bool *mod_changed, bool *link_changed)
7887 {
7888 	struct link_config old_lc, *lc = &pi->link_cfg;
7889 	unsigned char fc, fec;
7890 	u32 stat, linkattr;
7891 	int old_ptype, old_mtype;
7892 
7893 	old_ptype = pi->port_type;
7894 	old_mtype = pi->mod_type;
7895 	old_lc = *lc;
7896 	if (action == FW_PORT_ACTION_GET_PORT_INFO) {
7897 		stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7898 
7899 		pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
7900 		pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
7901 		pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
7902 		    G_FW_PORT_CMD_MDIOADDR(stat) : -1;
7903 
7904 		lc->supported = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
7905 		lc->advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
7906 		lc->lp_advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
7907 		lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7908 		lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
7909 
7910 		linkattr = lstatus_to_fwcap(stat);
7911 	} else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
7912 		stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
7913 
7914 		pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
7915 		pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
7916 		pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
7917 		    G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
7918 
7919 		lc->supported = be32_to_cpu(p->u.info32.pcaps32);
7920 		lc->advertising = be32_to_cpu(p->u.info32.acaps32);
7921 		lc->lp_advertising = be16_to_cpu(p->u.info32.lpacaps32);
7922 		lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
7923 		lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
7924 
7925 		linkattr = be32_to_cpu(p->u.info32.linkattr32);
7926 	} else {
7927 		CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
7928 		return;
7929 	}
7930 
7931 	lc->speed = fwcap_to_speed(linkattr);
7932 
7933 	fc = 0;
7934 	if (linkattr & FW_PORT_CAP32_FC_RX)
7935 		fc |= PAUSE_RX;
7936 	if (linkattr & FW_PORT_CAP32_FC_TX)
7937 		fc |= PAUSE_TX;
7938 	lc->fc = fc;
7939 
7940 	fec = FEC_NONE;
7941 	if (linkattr & FW_PORT_CAP32_FEC_RS)
7942 		fec |= FEC_RS;
7943 	if (linkattr & FW_PORT_CAP32_FEC_BASER_RS)
7944 		fec |= FEC_BASER_RS;
7945 	lc->fec = fec;
7946 
7947 	if (mod_changed != NULL)
7948 		*mod_changed = false;
7949 	if (link_changed != NULL)
7950 		*link_changed = false;
7951 	if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
7952 	    old_lc.supported != lc->supported) {
7953 		if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
7954 			lc->fec_hint = lc->advertising &
7955 			    V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
7956 		}
7957 		if (mod_changed != NULL)
7958 			*mod_changed = true;
7959 	}
7960 	if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
7961 	    old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
7962 		if (link_changed != NULL)
7963 			*link_changed = true;
7964 	}
7965 }
7966 
7967 /**
7968  *	t4_update_port_info - retrieve and update port information if changed
7969  *	@pi: the port_info
7970  *
7971  *	We issue a Get Port Information Command to the Firmware and, if
7972  *	successful, we check to see if anything is different from what we
7973  *	last recorded and update things accordingly.
7974  */
7975  int t4_update_port_info(struct port_info *pi)
7976  {
7977 	struct adapter *sc = pi->adapter;
7978 	struct fw_port_cmd cmd;
7979 	enum fw_port_action action;
7980 	int ret;
7981 
7982 	memset(&cmd, 0, sizeof(cmd));
7983 	cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
7984 	    F_FW_CMD_REQUEST | F_FW_CMD_READ |
7985 	    V_FW_PORT_CMD_PORTID(pi->tx_chan));
7986 	action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
7987 	    FW_PORT_ACTION_GET_PORT_INFO;
7988 	cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
7989 	    FW_LEN16(cmd));
7990 	ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
7991 	if (ret)
7992 		return ret;
7993 
7994 	handle_port_info(pi, &cmd, action, NULL, NULL);
7995 	return 0;
7996 }
7997 
7998 /**
7999  *	t4_handle_fw_rpl - process a FW reply message
8000  *	@adap: the adapter
8001  *	@rpl: start of the FW message
8002  *
8003  *	Processes a FW message, such as link state change messages.
8004  */
8005 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8006 {
8007 	u8 opcode = *(const u8 *)rpl;
8008 	const struct fw_port_cmd *p = (const void *)rpl;
8009 	enum fw_port_action action =
8010 	    G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
8011 	bool mod_changed, link_changed;
8012 
8013 	if (opcode == FW_PORT_CMD &&
8014 	    (action == FW_PORT_ACTION_GET_PORT_INFO ||
8015 	    action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8016 		/* link/module state change message */
8017 		int i;
8018 		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
8019 		struct port_info *pi = NULL;
8020 		struct link_config *lc;
8021 
8022 		for_each_port(adap, i) {
8023 			pi = adap2pinfo(adap, i);
8024 			if (pi->tx_chan == chan)
8025 				break;
8026 		}
8027 
8028 		lc = &pi->link_cfg;
8029 		PORT_LOCK(pi);
8030 		handle_port_info(pi, p, action, &mod_changed, &link_changed);
8031 		PORT_UNLOCK(pi);
8032 		if (mod_changed)
8033 			t4_os_portmod_changed(pi);
8034 		if (link_changed) {
8035 			PORT_LOCK(pi);
8036 			t4_os_link_changed(pi);
8037 			PORT_UNLOCK(pi);
8038 		}
8039 	} else {
8040 		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
8041 		return -EINVAL;
8042 	}
8043 	return 0;
8044 }
8045 
8046 /**
8047  *	get_pci_mode - determine a card's PCI mode
8048  *	@adapter: the adapter
8049  *	@p: where to store the PCI settings
8050  *
8051  *	Determines a card's PCI mode and associated parameters, such as speed
8052  *	and width.
8053  */
8054 static void get_pci_mode(struct adapter *adapter,
8055 				   struct pci_params *p)
8056 {
8057 	u16 val;
8058 	u32 pcie_cap;
8059 
8060 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8061 	if (pcie_cap) {
8062 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
8063 		p->speed = val & PCI_EXP_LNKSTA_CLS;
8064 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8065 	}
8066 }
8067 
8068 struct flash_desc {
8069 	u32 vendor_and_model_id;
8070 	u32 size_mb;
8071 };
8072 
8073 int t4_get_flash_params(struct adapter *adapter)
8074 {
8075 	/*
8076 	 * Table for non-standard supported Flash parts.  Note, all Flash
8077 	 * parts must have 64KB sectors.
8078 	 */
8079 	static struct flash_desc supported_flash[] = {
8080 		{ 0x00150201, 4 << 20 },	/* Spansion 4MB S25FL032P */
8081 	};
8082 
8083 	int ret;
8084 	u32 flashid = 0;
8085 	unsigned int part, manufacturer;
8086 	unsigned int density, size = 0;
8087 
8088 
8089 	/*
8090 	 * Issue a Read ID Command to the Flash part.  We decode supported
8091 	 * Flash parts and their sizes from this.  There's a newer Query
8092 	 * Command which can retrieve detailed geometry information but many
8093 	 * Flash parts don't support it.
8094 	 */
8095 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
8096 	if (!ret)
8097 		ret = sf1_read(adapter, 3, 0, 1, &flashid);
8098 	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
8099 	if (ret < 0)
8100 		return ret;
8101 
8102 	/*
8103 	 * Check to see if it's one of our non-standard supported Flash parts.
8104 	 */
8105 	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8106 		if (supported_flash[part].vendor_and_model_id == flashid) {
8107 			adapter->params.sf_size =
8108 				supported_flash[part].size_mb;
8109 			adapter->params.sf_nsec =
8110 				adapter->params.sf_size / SF_SEC_SIZE;
8111 			goto found;
8112 		}
8113 
8114 	/*
8115 	 * Decode Flash part size.  The code below looks repetative with
8116 	 * common encodings, but that's not guaranteed in the JEDEC
8117 	 * specification for the Read JADEC ID command.  The only thing that
8118 	 * we're guaranteed by the JADEC specification is where the
8119 	 * Manufacturer ID is in the returned result.  After that each
8120 	 * Manufacturer ~could~ encode things completely differently.
8121 	 * Note, all Flash parts must have 64KB sectors.
8122 	 */
8123 	manufacturer = flashid & 0xff;
8124 	switch (manufacturer) {
8125 	case 0x20: /* Micron/Numonix */
8126 		/*
8127 		 * This Density -> Size decoding table is taken from Micron
8128 		 * Data Sheets.
8129 		 */
8130 		density = (flashid >> 16) & 0xff;
8131 		switch (density) {
8132 		case 0x14: size = 1 << 20; break; /*   1MB */
8133 		case 0x15: size = 1 << 21; break; /*   2MB */
8134 		case 0x16: size = 1 << 22; break; /*   4MB */
8135 		case 0x17: size = 1 << 23; break; /*   8MB */
8136 		case 0x18: size = 1 << 24; break; /*  16MB */
8137 		case 0x19: size = 1 << 25; break; /*  32MB */
8138 		case 0x20: size = 1 << 26; break; /*  64MB */
8139 		case 0x21: size = 1 << 27; break; /* 128MB */
8140 		case 0x22: size = 1 << 28; break; /* 256MB */
8141 		}
8142 		break;
8143 
8144 	case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
8145 		/*
8146 		 * This Density -> Size decoding table is taken from ISSI
8147 		 * Data Sheets.
8148 		 */
8149 		density = (flashid >> 16) & 0xff;
8150 		switch (density) {
8151 		case 0x16: size = 1 << 25; break; /*  32MB */
8152 		case 0x17: size = 1 << 26; break; /*  64MB */
8153 		}
8154 		break;
8155 
8156 	case 0xc2: /* Macronix */
8157 		/*
8158 		 * This Density -> Size decoding table is taken from Macronix
8159 		 * Data Sheets.
8160 		 */
8161 		density = (flashid >> 16) & 0xff;
8162 		switch (density) {
8163 		case 0x17: size = 1 << 23; break; /*   8MB */
8164 		case 0x18: size = 1 << 24; break; /*  16MB */
8165 		}
8166 		break;
8167 
8168 	case 0xef: /* Winbond */
8169 		/*
8170 		 * This Density -> Size decoding table is taken from Winbond
8171 		 * Data Sheets.
8172 		 */
8173 		density = (flashid >> 16) & 0xff;
8174 		switch (density) {
8175 		case 0x17: size = 1 << 23; break; /*   8MB */
8176 		case 0x18: size = 1 << 24; break; /*  16MB */
8177 		}
8178 		break;
8179 	}
8180 
8181 	/* If we didn't recognize the FLASH part, that's no real issue: the
8182 	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8183 	 * use a FLASH part which is at least 4MB in size and has 64KB
8184 	 * sectors.  The unrecognized FLASH part is likely to be much larger
8185 	 * than 4MB, but that's all we really need.
8186 	 */
8187 	if (size == 0) {
8188 		CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
8189 		size = 1 << 22;
8190 	}
8191 
8192 	/*
8193 	 * Store decoded Flash size and fall through into vetting code.
8194 	 */
8195 	adapter->params.sf_size = size;
8196 	adapter->params.sf_nsec = size / SF_SEC_SIZE;
8197 
8198  found:
8199 	/*
8200 	 * We should ~probably~ reject adapters with FLASHes which are too
8201 	 * small but we have some legacy FPGAs with small FLASHes that we'd
8202 	 * still like to use.  So instead we emit a scary message ...
8203 	 */
8204 	if (adapter->params.sf_size < FLASH_MIN_SIZE)
8205 		CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8206 			flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
8207 
8208 	return 0;
8209 }
8210 
8211 static void set_pcie_completion_timeout(struct adapter *adapter,
8212 						  u8 range)
8213 {
8214 	u16 val;
8215 	u32 pcie_cap;
8216 
8217 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8218 	if (pcie_cap) {
8219 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
8220 		val &= 0xfff0;
8221 		val |= range ;
8222 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
8223 	}
8224 }
8225 
8226 const struct chip_params *t4_get_chip_params(int chipid)
8227 {
8228 	static const struct chip_params chip_params[] = {
8229 		{
8230 			/* T4 */
8231 			.nchan = NCHAN,
8232 			.pm_stats_cnt = PM_NSTATS,
8233 			.cng_ch_bits_log = 2,
8234 			.nsched_cls = 15,
8235 			.cim_num_obq = CIM_NUM_OBQ,
8236 			.mps_rplc_size = 128,
8237 			.vfcount = 128,
8238 			.sge_fl_db = F_DBPRIO,
8239 			.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
8240 		},
8241 		{
8242 			/* T5 */
8243 			.nchan = NCHAN,
8244 			.pm_stats_cnt = PM_NSTATS,
8245 			.cng_ch_bits_log = 2,
8246 			.nsched_cls = 16,
8247 			.cim_num_obq = CIM_NUM_OBQ_T5,
8248 			.mps_rplc_size = 128,
8249 			.vfcount = 128,
8250 			.sge_fl_db = F_DBPRIO | F_DBTYPE,
8251 			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8252 		},
8253 		{
8254 			/* T6 */
8255 			.nchan = T6_NCHAN,
8256 			.pm_stats_cnt = T6_PM_NSTATS,
8257 			.cng_ch_bits_log = 3,
8258 			.nsched_cls = 16,
8259 			.cim_num_obq = CIM_NUM_OBQ_T5,
8260 			.mps_rplc_size = 256,
8261 			.vfcount = 256,
8262 			.sge_fl_db = 0,
8263 			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8264 		},
8265 	};
8266 
8267 	chipid -= CHELSIO_T4;
8268 	if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
8269 		return NULL;
8270 
8271 	return &chip_params[chipid];
8272 }
8273 
8274 /**
8275  *	t4_prep_adapter - prepare SW and HW for operation
8276  *	@adapter: the adapter
8277  *	@buf: temporary space of at least VPD_LEN size provided by the caller.
8278  *
8279  *	Initialize adapter SW state for the various HW modules, set initial
8280  *	values for some adapter tunables, take PHYs out of reset, and
8281  *	initialize the MDIO interface.
8282  */
8283 int t4_prep_adapter(struct adapter *adapter, u32 *buf)
8284 {
8285 	int ret;
8286 	uint16_t device_id;
8287 	uint32_t pl_rev;
8288 
8289 	get_pci_mode(adapter, &adapter->params.pci);
8290 
8291 	pl_rev = t4_read_reg(adapter, A_PL_REV);
8292 	adapter->params.chipid = G_CHIPID(pl_rev);
8293 	adapter->params.rev = G_REV(pl_rev);
8294 	if (adapter->params.chipid == 0) {
8295 		/* T4 did not have chipid in PL_REV (T5 onwards do) */
8296 		adapter->params.chipid = CHELSIO_T4;
8297 
8298 		/* T4A1 chip is not supported */
8299 		if (adapter->params.rev == 1) {
8300 			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
8301 			return -EINVAL;
8302 		}
8303 	}
8304 
8305 	adapter->chip_params = t4_get_chip_params(chip_id(adapter));
8306 	if (adapter->chip_params == NULL)
8307 		return -EINVAL;
8308 
8309 	adapter->params.pci.vpd_cap_addr =
8310 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
8311 
8312 	ret = t4_get_flash_params(adapter);
8313 	if (ret < 0)
8314 		return ret;
8315 
8316 	/* Cards with real ASICs have the chipid in the PCIe device id */
8317 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
8318 	if (device_id >> 12 == chip_id(adapter))
8319 		adapter->params.cim_la_size = CIMLA_SIZE;
8320 	else {
8321 		/* FPGA */
8322 		adapter->params.fpga = 1;
8323 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
8324 	}
8325 
8326 	ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
8327 	if (ret < 0)
8328 		return ret;
8329 
8330 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8331 
8332 	/*
8333 	 * Default port and clock for debugging in case we can't reach FW.
8334 	 */
8335 	adapter->params.nports = 1;
8336 	adapter->params.portvec = 1;
8337 	adapter->params.vpd.cclk = 50000;
8338 
8339 	/* Set pci completion timeout value to 4 seconds. */
8340 	set_pcie_completion_timeout(adapter, 0xd);
8341 	return 0;
8342 }
8343 
8344 /**
8345  *	t4_shutdown_adapter - shut down adapter, host & wire
8346  *	@adapter: the adapter
8347  *
8348  *	Perform an emergency shutdown of the adapter and stop it from
8349  *	continuing any further communication on the ports or DMA to the
8350  *	host.  This is typically used when the adapter and/or firmware
8351  *	have crashed and we want to prevent any further accidental
8352  *	communication with the rest of the world.  This will also force
8353  *	the port Link Status to go down -- if register writes work --
8354  *	which should help our peers figure out that we're down.
8355  */
8356 int t4_shutdown_adapter(struct adapter *adapter)
8357 {
8358 	int port;
8359 
8360 	t4_intr_disable(adapter);
8361 	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
8362 	for_each_port(adapter, port) {
8363 		u32 a_port_cfg = is_t4(adapter) ?
8364 				 PORT_REG(port, A_XGMAC_PORT_CFG) :
8365 				 T5_PORT_REG(port, A_MAC_PORT_CFG);
8366 
8367 		t4_write_reg(adapter, a_port_cfg,
8368 			     t4_read_reg(adapter, a_port_cfg)
8369 			     & ~V_SIGNAL_DET(1));
8370 	}
8371 	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
8372 
8373 	return 0;
8374 }
8375 
8376 /**
8377  *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8378  *	@adapter: the adapter
8379  *	@qid: the Queue ID
8380  *	@qtype: the Ingress or Egress type for @qid
8381  *	@user: true if this request is for a user mode queue
8382  *	@pbar2_qoffset: BAR2 Queue Offset
8383  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8384  *
8385  *	Returns the BAR2 SGE Queue Registers information associated with the
8386  *	indicated Absolute Queue ID.  These are passed back in return value
8387  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8388  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8389  *
8390  *	This may return an error which indicates that BAR2 SGE Queue
8391  *	registers aren't available.  If an error is not returned, then the
8392  *	following values are returned:
8393  *
8394  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8395  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8396  *
8397  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8398  *	require the "Inferred Queue ID" ability may be used.  E.g. the
8399  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8400  *	then these "Inferred Queue ID" register may not be used.
8401  */
8402 int t4_bar2_sge_qregs(struct adapter *adapter,
8403 		      unsigned int qid,
8404 		      enum t4_bar2_qtype qtype,
8405 		      int user,
8406 		      u64 *pbar2_qoffset,
8407 		      unsigned int *pbar2_qid)
8408 {
8409 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8410 	u64 bar2_page_offset, bar2_qoffset;
8411 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8412 
8413 	/* T4 doesn't support BAR2 SGE Queue registers for kernel
8414 	 * mode queues.
8415 	 */
8416 	if (!user && is_t4(adapter))
8417 		return -EINVAL;
8418 
8419 	/* Get our SGE Page Size parameters.
8420 	 */
8421 	page_shift = adapter->params.sge.page_shift;
8422 	page_size = 1 << page_shift;
8423 
8424 	/* Get the right Queues per Page parameters for our Queue.
8425 	 */
8426 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8427 		     ? adapter->params.sge.eq_s_qpp
8428 		     : adapter->params.sge.iq_s_qpp);
8429 	qpp_mask = (1 << qpp_shift) - 1;
8430 
8431 	/* Calculate the basics of the BAR2 SGE Queue register area:
8432 	 *  o The BAR2 page the Queue registers will be in.
8433 	 *  o The BAR2 Queue ID.
8434 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
8435 	 */
8436 	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
8437 	bar2_qid = qid & qpp_mask;
8438 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8439 
8440 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
8441 	 * hardware will infer the Absolute Queue ID simply from the writes to
8442 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8443 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
8444 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8445 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8446 	 * from the BAR2 Page and BAR2 Queue ID.
8447 	 *
8448 	 * One important censequence of this is that some BAR2 SGE registers
8449 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8450 	 * there.  But other registers synthesize the SGE Queue ID purely
8451 	 * from the writes to the registers -- the Write Combined Doorbell
8452 	 * Buffer is a good example.  These BAR2 SGE Registers are only
8453 	 * available for those BAR2 SGE Register areas where the SGE Absolute
8454 	 * Queue ID can be inferred from simple writes.
8455 	 */
8456 	bar2_qoffset = bar2_page_offset;
8457 	bar2_qinferred = (bar2_qid_offset < page_size);
8458 	if (bar2_qinferred) {
8459 		bar2_qoffset += bar2_qid_offset;
8460 		bar2_qid = 0;
8461 	}
8462 
8463 	*pbar2_qoffset = bar2_qoffset;
8464 	*pbar2_qid = bar2_qid;
8465 	return 0;
8466 }
8467 
8468 /**
8469  *	t4_init_devlog_params - initialize adapter->params.devlog
8470  *	@adap: the adapter
8471  *	@fw_attach: whether we can talk to the firmware
8472  *
8473  *	Initialize various fields of the adapter's Firmware Device Log
8474  *	Parameters structure.
8475  */
8476 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
8477 {
8478 	struct devlog_params *dparams = &adap->params.devlog;
8479 	u32 pf_dparams;
8480 	unsigned int devlog_meminfo;
8481 	struct fw_devlog_cmd devlog_cmd;
8482 	int ret;
8483 
8484 	/* If we're dealing with newer firmware, the Device Log Paramerters
8485 	 * are stored in a designated register which allows us to access the
8486 	 * Device Log even if we can't talk to the firmware.
8487 	 */
8488 	pf_dparams =
8489 		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
8490 	if (pf_dparams) {
8491 		unsigned int nentries, nentries128;
8492 
8493 		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
8494 		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
8495 
8496 		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
8497 		nentries = (nentries128 + 1) * 128;
8498 		dparams->size = nentries * sizeof(struct fw_devlog_e);
8499 
8500 		return 0;
8501 	}
8502 
8503 	/*
8504 	 * For any failing returns ...
8505 	 */
8506 	memset(dparams, 0, sizeof *dparams);
8507 
8508 	/*
8509 	 * If we can't talk to the firmware, there's really nothing we can do
8510 	 * at this point.
8511 	 */
8512 	if (!fw_attach)
8513 		return -ENXIO;
8514 
8515 	/* Otherwise, ask the firmware for it's Device Log Parameters.
8516 	 */
8517 	memset(&devlog_cmd, 0, sizeof devlog_cmd);
8518 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
8519 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
8520 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8521 	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8522 			 &devlog_cmd);
8523 	if (ret)
8524 		return ret;
8525 
8526 	devlog_meminfo =
8527 		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8528 	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
8529 	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
8530 	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8531 
8532 	return 0;
8533 }
8534 
8535 /**
8536  *	t4_init_sge_params - initialize adap->params.sge
8537  *	@adapter: the adapter
8538  *
8539  *	Initialize various fields of the adapter's SGE Parameters structure.
8540  */
8541 int t4_init_sge_params(struct adapter *adapter)
8542 {
8543 	u32 r;
8544 	struct sge_params *sp = &adapter->params.sge;
8545 	unsigned i, tscale = 1;
8546 
8547 	r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
8548 	sp->counter_val[0] = G_THRESHOLD_0(r);
8549 	sp->counter_val[1] = G_THRESHOLD_1(r);
8550 	sp->counter_val[2] = G_THRESHOLD_2(r);
8551 	sp->counter_val[3] = G_THRESHOLD_3(r);
8552 
8553 	if (chip_id(adapter) >= CHELSIO_T6) {
8554 		r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
8555 		tscale = G_TSCALE(r);
8556 		if (tscale == 0)
8557 			tscale = 1;
8558 		else
8559 			tscale += 2;
8560 	}
8561 
8562 	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
8563 	sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
8564 	sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
8565 	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
8566 	sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
8567 	sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
8568 	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
8569 	sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
8570 	sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
8571 
8572 	r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
8573 	sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
8574 	if (is_t4(adapter))
8575 		sp->fl_starve_threshold2 = sp->fl_starve_threshold;
8576 	else if (is_t5(adapter))
8577 		sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
8578 	else
8579 		sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
8580 
8581 	/* egress queues: log2 of # of doorbells per BAR2 page */
8582 	r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
8583 	r >>= S_QUEUESPERPAGEPF0 +
8584 	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8585 	sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
8586 
8587 	/* ingress queues: log2 of # of doorbells per BAR2 page */
8588 	r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
8589 	r >>= S_QUEUESPERPAGEPF0 +
8590 	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8591 	sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
8592 
8593 	r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
8594 	r >>= S_HOSTPAGESIZEPF0 +
8595 	    (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
8596 	sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
8597 
8598 	r = t4_read_reg(adapter, A_SGE_CONTROL);
8599 	sp->sge_control = r;
8600 	sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
8601 	sp->fl_pktshift = G_PKTSHIFT(r);
8602 	if (chip_id(adapter) <= CHELSIO_T5) {
8603 		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8604 		    X_INGPADBOUNDARY_SHIFT);
8605 	} else {
8606 		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8607 		    X_T6_INGPADBOUNDARY_SHIFT);
8608 	}
8609 	if (is_t4(adapter))
8610 		sp->pack_boundary = sp->pad_boundary;
8611 	else {
8612 		r = t4_read_reg(adapter, A_SGE_CONTROL2);
8613 		if (G_INGPACKBOUNDARY(r) == 0)
8614 			sp->pack_boundary = 16;
8615 		else
8616 			sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
8617 	}
8618 	for (i = 0; i < SGE_FLBUF_SIZES; i++)
8619 		sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
8620 		    A_SGE_FL_BUFFER_SIZE0 + (4 * i));
8621 
8622 	return 0;
8623 }
8624 
8625 /*
8626  * Read and cache the adapter's compressed filter mode and ingress config.
8627  */
8628 static void read_filter_mode_and_ingress_config(struct adapter *adap,
8629     bool sleep_ok)
8630 {
8631 	uint32_t v;
8632 	struct tp_params *tpp = &adap->params.tp;
8633 
8634 	t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
8635 	    sleep_ok);
8636 	t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
8637 	    sleep_ok);
8638 
8639 	/*
8640 	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8641 	 * shift positions of several elements of the Compressed Filter Tuple
8642 	 * for this adapter which we need frequently ...
8643 	 */
8644 	tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
8645 	tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
8646 	tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
8647 	tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
8648 	tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
8649 	tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
8650 	tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
8651 	tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
8652 	tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
8653 	tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
8654 
8655 	if (chip_id(adap) > CHELSIO_T4) {
8656 		v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
8657 		adap->params.tp.hash_filter_mask = v;
8658 		v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
8659 		adap->params.tp.hash_filter_mask |= (u64)v << 32;
8660 	}
8661 }
8662 
8663 /**
8664  *      t4_init_tp_params - initialize adap->params.tp
8665  *      @adap: the adapter
8666  *
8667  *      Initialize various fields of the adapter's TP Parameters structure.
8668  */
8669 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8670 {
8671 	int chan;
8672 	u32 v;
8673 	struct tp_params *tpp = &adap->params.tp;
8674 
8675 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
8676 	tpp->tre = G_TIMERRESOLUTION(v);
8677 	tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
8678 
8679 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8680 	for (chan = 0; chan < MAX_NCHAN; chan++)
8681 		tpp->tx_modq[chan] = chan;
8682 
8683 	read_filter_mode_and_ingress_config(adap, sleep_ok);
8684 
8685 	/*
8686 	 * Cache a mask of the bits that represent the error vector portion of
8687 	 * rx_pkt.err_vec.  T6+ can use a compressed error vector to make room
8688 	 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
8689 	 */
8690 	tpp->err_vec_mask = htobe16(0xffff);
8691 	if (chip_id(adap) > CHELSIO_T5) {
8692 		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8693 		if (v & F_CRXPKTENC) {
8694 			tpp->err_vec_mask =
8695 			    htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
8696 		}
8697 	}
8698 
8699 	return 0;
8700 }
8701 
8702 /**
8703  *      t4_filter_field_shift - calculate filter field shift
8704  *      @adap: the adapter
8705  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8706  *
8707  *      Return the shift position of a filter field within the Compressed
8708  *      Filter Tuple.  The filter field is specified via its selection bit
8709  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
8710  */
8711 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8712 {
8713 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8714 	unsigned int sel;
8715 	int field_shift;
8716 
8717 	if ((filter_mode & filter_sel) == 0)
8718 		return -1;
8719 
8720 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8721 		switch (filter_mode & sel) {
8722 		case F_FCOE:
8723 			field_shift += W_FT_FCOE;
8724 			break;
8725 		case F_PORT:
8726 			field_shift += W_FT_PORT;
8727 			break;
8728 		case F_VNIC_ID:
8729 			field_shift += W_FT_VNIC_ID;
8730 			break;
8731 		case F_VLAN:
8732 			field_shift += W_FT_VLAN;
8733 			break;
8734 		case F_TOS:
8735 			field_shift += W_FT_TOS;
8736 			break;
8737 		case F_PROTOCOL:
8738 			field_shift += W_FT_PROTOCOL;
8739 			break;
8740 		case F_ETHERTYPE:
8741 			field_shift += W_FT_ETHERTYPE;
8742 			break;
8743 		case F_MACMATCH:
8744 			field_shift += W_FT_MACMATCH;
8745 			break;
8746 		case F_MPSHITTYPE:
8747 			field_shift += W_FT_MPSHITTYPE;
8748 			break;
8749 		case F_FRAGMENTATION:
8750 			field_shift += W_FT_FRAGMENTATION;
8751 			break;
8752 		}
8753 	}
8754 	return field_shift;
8755 }
8756 
8757 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8758 {
8759 	u8 addr[6];
8760 	int ret, i, j;
8761 	u16 rss_size;
8762 	struct port_info *p = adap2pinfo(adap, port_id);
8763 	u32 param, val;
8764 
8765 	for (i = 0, j = -1; i <= p->port_id; i++) {
8766 		do {
8767 			j++;
8768 		} while ((adap->params.portvec & (1 << j)) == 0);
8769 	}
8770 
8771 	p->tx_chan = j;
8772 	p->mps_bg_map = t4_get_mps_bg_map(adap, j);
8773 	p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
8774 	p->lport = j;
8775 
8776 	if (!(adap->flags & IS_VF) ||
8777 	    adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8778  		t4_update_port_info(p);
8779 	}
8780 
8781 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8782 	if (ret < 0)
8783 		return ret;
8784 
8785 	p->vi[0].viid = ret;
8786 	if (chip_id(adap) <= CHELSIO_T5)
8787 		p->vi[0].smt_idx = (ret & 0x7f) << 1;
8788 	else
8789 		p->vi[0].smt_idx = (ret & 0x7f);
8790 	p->vi[0].rss_size = rss_size;
8791 	t4_os_set_hw_addr(p, addr);
8792 
8793 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8794 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8795 	    V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8796 	ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
8797 	if (ret)
8798 		p->vi[0].rss_base = 0xffff;
8799 	else {
8800 		/* MPASS((val >> 16) == rss_size); */
8801 		p->vi[0].rss_base = val & 0xffff;
8802 	}
8803 
8804 	return 0;
8805 }
8806 
8807 /**
8808  *	t4_read_cimq_cfg - read CIM queue configuration
8809  *	@adap: the adapter
8810  *	@base: holds the queue base addresses in bytes
8811  *	@size: holds the queue sizes in bytes
8812  *	@thres: holds the queue full thresholds in bytes
8813  *
8814  *	Returns the current configuration of the CIM queues, starting with
8815  *	the IBQs, then the OBQs.
8816  */
8817 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8818 {
8819 	unsigned int i, v;
8820 	int cim_num_obq = adap->chip_params->cim_num_obq;
8821 
8822 	for (i = 0; i < CIM_NUM_IBQ; i++) {
8823 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8824 			     V_QUENUMSELECT(i));
8825 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8826 		/* value is in 256-byte units */
8827 		*base++ = G_CIMQBASE(v) * 256;
8828 		*size++ = G_CIMQSIZE(v) * 256;
8829 		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8830 	}
8831 	for (i = 0; i < cim_num_obq; i++) {
8832 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8833 			     V_QUENUMSELECT(i));
8834 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8835 		/* value is in 256-byte units */
8836 		*base++ = G_CIMQBASE(v) * 256;
8837 		*size++ = G_CIMQSIZE(v) * 256;
8838 	}
8839 }
8840 
8841 /**
8842  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
8843  *	@adap: the adapter
8844  *	@qid: the queue index
8845  *	@data: where to store the queue contents
8846  *	@n: capacity of @data in 32-bit words
8847  *
8848  *	Reads the contents of the selected CIM queue starting at address 0 up
8849  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8850  *	error and the number of 32-bit words actually read on success.
8851  */
8852 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8853 {
8854 	int i, err, attempts;
8855 	unsigned int addr;
8856 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
8857 
8858 	if (qid > 5 || (n & 3))
8859 		return -EINVAL;
8860 
8861 	addr = qid * nwords;
8862 	if (n > nwords)
8863 		n = nwords;
8864 
8865 	/* It might take 3-10ms before the IBQ debug read access is allowed.
8866 	 * Wait for 1 Sec with a delay of 1 usec.
8867 	 */
8868 	attempts = 1000000;
8869 
8870 	for (i = 0; i < n; i++, addr++) {
8871 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8872 			     F_IBQDBGEN);
8873 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8874 				      attempts, 1);
8875 		if (err)
8876 			return err;
8877 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8878 	}
8879 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8880 	return i;
8881 }
8882 
8883 /**
8884  *	t4_read_cim_obq - read the contents of a CIM outbound queue
8885  *	@adap: the adapter
8886  *	@qid: the queue index
8887  *	@data: where to store the queue contents
8888  *	@n: capacity of @data in 32-bit words
8889  *
8890  *	Reads the contents of the selected CIM queue starting at address 0 up
8891  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8892  *	error and the number of 32-bit words actually read on success.
8893  */
8894 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8895 {
8896 	int i, err;
8897 	unsigned int addr, v, nwords;
8898 	int cim_num_obq = adap->chip_params->cim_num_obq;
8899 
8900 	if ((qid > (cim_num_obq - 1)) || (n & 3))
8901 		return -EINVAL;
8902 
8903 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8904 		     V_QUENUMSELECT(qid));
8905 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8906 
8907 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
8908 	nwords = G_CIMQSIZE(v) * 64;  /* same */
8909 	if (n > nwords)
8910 		n = nwords;
8911 
8912 	for (i = 0; i < n; i++, addr++) {
8913 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8914 			     F_OBQDBGEN);
8915 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8916 				      2, 1);
8917 		if (err)
8918 			return err;
8919 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8920 	}
8921 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8922 	return i;
8923 }
8924 
8925 enum {
8926 	CIM_QCTL_BASE     = 0,
8927 	CIM_CTL_BASE      = 0x2000,
8928 	CIM_PBT_ADDR_BASE = 0x2800,
8929 	CIM_PBT_LRF_BASE  = 0x3000,
8930 	CIM_PBT_DATA_BASE = 0x3800
8931 };
8932 
8933 /**
8934  *	t4_cim_read - read a block from CIM internal address space
8935  *	@adap: the adapter
8936  *	@addr: the start address within the CIM address space
8937  *	@n: number of words to read
8938  *	@valp: where to store the result
8939  *
8940  *	Reads a block of 4-byte words from the CIM intenal address space.
8941  */
8942 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8943 		unsigned int *valp)
8944 {
8945 	int ret = 0;
8946 
8947 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8948 		return -EBUSY;
8949 
8950 	for ( ; !ret && n--; addr += 4) {
8951 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8952 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8953 				      0, 5, 2);
8954 		if (!ret)
8955 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8956 	}
8957 	return ret;
8958 }
8959 
8960 /**
8961  *	t4_cim_write - write a block into CIM internal address space
8962  *	@adap: the adapter
8963  *	@addr: the start address within the CIM address space
8964  *	@n: number of words to write
8965  *	@valp: set of values to write
8966  *
8967  *	Writes a block of 4-byte words into the CIM intenal address space.
8968  */
8969 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8970 		 const unsigned int *valp)
8971 {
8972 	int ret = 0;
8973 
8974 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8975 		return -EBUSY;
8976 
8977 	for ( ; !ret && n--; addr += 4) {
8978 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8979 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8980 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8981 				      0, 5, 2);
8982 	}
8983 	return ret;
8984 }
8985 
8986 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8987 			 unsigned int val)
8988 {
8989 	return t4_cim_write(adap, addr, 1, &val);
8990 }
8991 
8992 /**
8993  *	t4_cim_ctl_read - read a block from CIM control region
8994  *	@adap: the adapter
8995  *	@addr: the start address within the CIM control region
8996  *	@n: number of words to read
8997  *	@valp: where to store the result
8998  *
8999  *	Reads a block of 4-byte words from the CIM control region.
9000  */
9001 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
9002 		    unsigned int *valp)
9003 {
9004 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
9005 }
9006 
9007 /**
9008  *	t4_cim_read_la - read CIM LA capture buffer
9009  *	@adap: the adapter
9010  *	@la_buf: where to store the LA data
9011  *	@wrptr: the HW write pointer within the capture buffer
9012  *
9013  *	Reads the contents of the CIM LA buffer with the most recent entry at
9014  *	the end	of the returned data and with the entry at @wrptr first.
9015  *	We try to leave the LA in the running state we find it in.
9016  */
9017 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9018 {
9019 	int i, ret;
9020 	unsigned int cfg, val, idx;
9021 
9022 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9023 	if (ret)
9024 		return ret;
9025 
9026 	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
9027 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
9028 		if (ret)
9029 			return ret;
9030 	}
9031 
9032 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9033 	if (ret)
9034 		goto restart;
9035 
9036 	idx = G_UPDBGLAWRPTR(val);
9037 	if (wrptr)
9038 		*wrptr = idx;
9039 
9040 	for (i = 0; i < adap->params.cim_la_size; i++) {
9041 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9042 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
9043 		if (ret)
9044 			break;
9045 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9046 		if (ret)
9047 			break;
9048 		if (val & F_UPDBGLARDEN) {
9049 			ret = -ETIMEDOUT;
9050 			break;
9051 		}
9052 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
9053 		if (ret)
9054 			break;
9055 
9056 		/* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
9057 		idx = (idx + 1) & M_UPDBGLARDPTR;
9058 		/*
9059 		 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9060 		 * identify the 32-bit portion of the full 312-bit data
9061 		 */
9062 		if (is_t6(adap))
9063 			while ((idx & 0xf) > 9)
9064 				idx = (idx + 1) % M_UPDBGLARDPTR;
9065 	}
9066 restart:
9067 	if (cfg & F_UPDBGLAEN) {
9068 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9069 				      cfg & ~F_UPDBGLARDEN);
9070 		if (!ret)
9071 			ret = r;
9072 	}
9073 	return ret;
9074 }
9075 
9076 /**
9077  *	t4_tp_read_la - read TP LA capture buffer
9078  *	@adap: the adapter
9079  *	@la_buf: where to store the LA data
9080  *	@wrptr: the HW write pointer within the capture buffer
9081  *
9082  *	Reads the contents of the TP LA buffer with the most recent entry at
9083  *	the end	of the returned data and with the entry at @wrptr first.
9084  *	We leave the LA in the running state we find it in.
9085  */
9086 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9087 {
9088 	bool last_incomplete;
9089 	unsigned int i, cfg, val, idx;
9090 
9091 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
9092 	if (cfg & F_DBGLAENABLE)			/* freeze LA */
9093 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9094 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
9095 
9096 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
9097 	idx = G_DBGLAWPTR(val);
9098 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
9099 	if (last_incomplete)
9100 		idx = (idx + 1) & M_DBGLARPTR;
9101 	if (wrptr)
9102 		*wrptr = idx;
9103 
9104 	val &= 0xffff;
9105 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
9106 	val |= adap->params.tp.la_mask;
9107 
9108 	for (i = 0; i < TPLA_SIZE; i++) {
9109 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
9110 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
9111 		idx = (idx + 1) & M_DBGLARPTR;
9112 	}
9113 
9114 	/* Wipe out last entry if it isn't valid */
9115 	if (last_incomplete)
9116 		la_buf[TPLA_SIZE - 1] = ~0ULL;
9117 
9118 	if (cfg & F_DBGLAENABLE)		/* restore running state */
9119 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9120 			     cfg | adap->params.tp.la_mask);
9121 }
9122 
9123 /*
9124  * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9125  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
9126  * state for more than the Warning Threshold then we'll issue a warning about
9127  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
9128  * appears to be hung every Warning Repeat second till the situation clears.
9129  * If the situation clears, we'll note that as well.
9130  */
9131 #define SGE_IDMA_WARN_THRESH 1
9132 #define SGE_IDMA_WARN_REPEAT 300
9133 
9134 /**
9135  *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9136  *	@adapter: the adapter
9137  *	@idma: the adapter IDMA Monitor state
9138  *
9139  *	Initialize the state of an SGE Ingress DMA Monitor.
9140  */
9141 void t4_idma_monitor_init(struct adapter *adapter,
9142 			  struct sge_idma_monitor_state *idma)
9143 {
9144 	/* Initialize the state variables for detecting an SGE Ingress DMA
9145 	 * hang.  The SGE has internal counters which count up on each clock
9146 	 * tick whenever the SGE finds its Ingress DMA State Engines in the
9147 	 * same state they were on the previous clock tick.  The clock used is
9148 	 * the Core Clock so we have a limit on the maximum "time" they can
9149 	 * record; typically a very small number of seconds.  For instance,
9150 	 * with a 600MHz Core Clock, we can only count up to a bit more than
9151 	 * 7s.  So we'll synthesize a larger counter in order to not run the
9152 	 * risk of having the "timers" overflow and give us the flexibility to
9153 	 * maintain a Hung SGE State Machine of our own which operates across
9154 	 * a longer time frame.
9155 	 */
9156 	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9157 	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
9158 }
9159 
9160 /**
9161  *	t4_idma_monitor - monitor SGE Ingress DMA state
9162  *	@adapter: the adapter
9163  *	@idma: the adapter IDMA Monitor state
9164  *	@hz: number of ticks/second
9165  *	@ticks: number of ticks since the last IDMA Monitor call
9166  */
9167 void t4_idma_monitor(struct adapter *adapter,
9168 		     struct sge_idma_monitor_state *idma,
9169 		     int hz, int ticks)
9170 {
9171 	int i, idma_same_state_cnt[2];
9172 
9173 	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
9174 	  * are counters inside the SGE which count up on each clock when the
9175 	  * SGE finds its Ingress DMA State Engines in the same states they
9176 	  * were in the previous clock.  The counters will peg out at
9177 	  * 0xffffffff without wrapping around so once they pass the 1s
9178 	  * threshold they'll stay above that till the IDMA state changes.
9179 	  */
9180 	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
9181 	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
9182 	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9183 
9184 	for (i = 0; i < 2; i++) {
9185 		u32 debug0, debug11;
9186 
9187 		/* If the Ingress DMA Same State Counter ("timer") is less
9188 		 * than 1s, then we can reset our synthesized Stall Timer and
9189 		 * continue.  If we have previously emitted warnings about a
9190 		 * potential stalled Ingress Queue, issue a note indicating
9191 		 * that the Ingress Queue has resumed forward progress.
9192 		 */
9193 		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9194 			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
9195 				CH_WARN(adapter, "SGE idma%d, queue %u, "
9196 					"resumed after %d seconds\n",
9197 					i, idma->idma_qid[i],
9198 					idma->idma_stalled[i]/hz);
9199 			idma->idma_stalled[i] = 0;
9200 			continue;
9201 		}
9202 
9203 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9204 		 * domain.  The first time we get here it'll be because we
9205 		 * passed the 1s Threshold; each additional time it'll be
9206 		 * because the RX Timer Callback is being fired on its regular
9207 		 * schedule.
9208 		 *
9209 		 * If the stall is below our Potential Hung Ingress Queue
9210 		 * Warning Threshold, continue.
9211 		 */
9212 		if (idma->idma_stalled[i] == 0) {
9213 			idma->idma_stalled[i] = hz;
9214 			idma->idma_warn[i] = 0;
9215 		} else {
9216 			idma->idma_stalled[i] += ticks;
9217 			idma->idma_warn[i] -= ticks;
9218 		}
9219 
9220 		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
9221 			continue;
9222 
9223 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9224 		 */
9225 		if (idma->idma_warn[i] > 0)
9226 			continue;
9227 		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
9228 
9229 		/* Read and save the SGE IDMA State and Queue ID information.
9230 		 * We do this every time in case it changes across time ...
9231 		 * can't be too careful ...
9232 		 */
9233 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
9234 		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9235 		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9236 
9237 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
9238 		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9239 		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9240 
9241 		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
9242 			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9243 			i, idma->idma_qid[i], idma->idma_state[i],
9244 			idma->idma_stalled[i]/hz,
9245 			debug0, debug11);
9246 		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9247 	}
9248 }
9249 
9250 /**
9251  *	t4_read_pace_tbl - read the pace table
9252  *	@adap: the adapter
9253  *	@pace_vals: holds the returned values
9254  *
9255  *	Returns the values of TP's pace table in microseconds.
9256  */
9257 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9258 {
9259 	unsigned int i, v;
9260 
9261 	for (i = 0; i < NTX_SCHED; i++) {
9262 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
9263 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
9264 		pace_vals[i] = dack_ticks_to_usec(adap, v);
9265 	}
9266 }
9267 
9268 /**
9269  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9270  *	@adap: the adapter
9271  *	@sched: the scheduler index
9272  *	@kbps: the byte rate in Kbps
9273  *	@ipg: the interpacket delay in tenths of nanoseconds
9274  *
9275  *	Return the current configuration of a HW Tx scheduler.
9276  */
9277 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
9278 		     unsigned int *ipg, bool sleep_ok)
9279 {
9280 	unsigned int v, addr, bpt, cpt;
9281 
9282 	if (kbps) {
9283 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
9284 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9285 		if (sched & 1)
9286 			v >>= 16;
9287 		bpt = (v >> 8) & 0xff;
9288 		cpt = v & 0xff;
9289 		if (!cpt)
9290 			*kbps = 0;	/* scheduler disabled */
9291 		else {
9292 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9293 			*kbps = (v * bpt) / 125;
9294 		}
9295 	}
9296 	if (ipg) {
9297 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
9298 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9299 		if (sched & 1)
9300 			v >>= 16;
9301 		v &= 0xffff;
9302 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
9303 	}
9304 }
9305 
9306 /**
9307  *	t4_load_cfg - download config file
9308  *	@adap: the adapter
9309  *	@cfg_data: the cfg text file to write
9310  *	@size: text file size
9311  *
9312  *	Write the supplied config text file to the card's serial flash.
9313  */
9314 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9315 {
9316 	int ret, i, n, cfg_addr;
9317 	unsigned int addr;
9318 	unsigned int flash_cfg_start_sec;
9319 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9320 
9321 	cfg_addr = t4_flash_cfg_addr(adap);
9322 	if (cfg_addr < 0)
9323 		return cfg_addr;
9324 
9325 	addr = cfg_addr;
9326 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
9327 
9328 	if (size > FLASH_CFG_MAX_SIZE) {
9329 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
9330 		       FLASH_CFG_MAX_SIZE);
9331 		return -EFBIG;
9332 	}
9333 
9334 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
9335 			 sf_sec_size);
9336 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9337 				     flash_cfg_start_sec + i - 1);
9338 	/*
9339 	 * If size == 0 then we're simply erasing the FLASH sectors associated
9340 	 * with the on-adapter Firmware Configuration File.
9341 	 */
9342 	if (ret || size == 0)
9343 		goto out;
9344 
9345 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
9346 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9347 		if ( (size - i) <  SF_PAGE_SIZE)
9348 			n = size - i;
9349 		else
9350 			n = SF_PAGE_SIZE;
9351 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
9352 		if (ret)
9353 			goto out;
9354 
9355 		addr += SF_PAGE_SIZE;
9356 		cfg_data += SF_PAGE_SIZE;
9357 	}
9358 
9359 out:
9360 	if (ret)
9361 		CH_ERR(adap, "config file %s failed %d\n",
9362 		       (size == 0 ? "clear" : "download"), ret);
9363 	return ret;
9364 }
9365 
9366 /**
9367  *	t5_fw_init_extern_mem - initialize the external memory
9368  *	@adap: the adapter
9369  *
9370  *	Initializes the external memory on T5.
9371  */
9372 int t5_fw_init_extern_mem(struct adapter *adap)
9373 {
9374 	u32 params[1], val[1];
9375 	int ret;
9376 
9377 	if (!is_t5(adap))
9378 		return 0;
9379 
9380 	val[0] = 0xff; /* Initialize all MCs */
9381 	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
9382 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
9383 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
9384 			FW_CMD_MAX_TIMEOUT);
9385 
9386 	return ret;
9387 }
9388 
9389 /* BIOS boot headers */
9390 typedef struct pci_expansion_rom_header {
9391 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
9392 	u8	reserved[22]; /* Reserved per processor Architecture data */
9393 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
9394 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
9395 
9396 /* Legacy PCI Expansion ROM Header */
9397 typedef struct legacy_pci_expansion_rom_header {
9398 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
9399 	u8	size512; /* Current Image Size in units of 512 bytes */
9400 	u8	initentry_point[4];
9401 	u8	cksum; /* Checksum computed on the entire Image */
9402 	u8	reserved[16]; /* Reserved */
9403 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
9404 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
9405 
9406 /* EFI PCI Expansion ROM Header */
9407 typedef struct efi_pci_expansion_rom_header {
9408 	u8	signature[2]; // ROM signature. The value 0xaa55
9409 	u8	initialization_size[2]; /* Units 512. Includes this header */
9410 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
9411 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
9412 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
9413 	u8	compression_type[2]; /* Compression type. */
9414 		/*
9415 		 * Compression type definition
9416 		 * 0x0: uncompressed
9417 		 * 0x1: Compressed
9418 		 * 0x2-0xFFFF: Reserved
9419 		 */
9420 	u8	reserved[8]; /* Reserved */
9421 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
9422 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
9423 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
9424 
9425 /* PCI Data Structure Format */
9426 typedef struct pcir_data_structure { /* PCI Data Structure */
9427 	u8	signature[4]; /* Signature. The string "PCIR" */
9428 	u8	vendor_id[2]; /* Vendor Identification */
9429 	u8	device_id[2]; /* Device Identification */
9430 	u8	vital_product[2]; /* Pointer to Vital Product Data */
9431 	u8	length[2]; /* PCIR Data Structure Length */
9432 	u8	revision; /* PCIR Data Structure Revision */
9433 	u8	class_code[3]; /* Class Code */
9434 	u8	image_length[2]; /* Image Length. Multiple of 512B */
9435 	u8	code_revision[2]; /* Revision Level of Code/Data */
9436 	u8	code_type; /* Code Type. */
9437 		/*
9438 		 * PCI Expansion ROM Code Types
9439 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
9440 		 * 0x01: Open Firmware standard for PCI. FCODE
9441 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
9442 		 * 0x03: EFI Image. EFI
9443 		 * 0x04-0xFF: Reserved.
9444 		 */
9445 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
9446 	u8	reserved[2]; /* Reserved */
9447 } pcir_data_t; /* PCI__DATA_STRUCTURE */
9448 
9449 /* BOOT constants */
9450 enum {
9451 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
9452 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
9453 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
9454 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
9455 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
9456 	VENDOR_ID = 0x1425, /* Vendor ID */
9457 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
9458 };
9459 
9460 /*
9461  *	modify_device_id - Modifies the device ID of the Boot BIOS image
9462  *	@adatper: the device ID to write.
9463  *	@boot_data: the boot image to modify.
9464  *
9465  *	Write the supplied device ID to the boot BIOS image.
9466  */
9467 static void modify_device_id(int device_id, u8 *boot_data)
9468 {
9469 	legacy_pci_exp_rom_header_t *header;
9470 	pcir_data_t *pcir_header;
9471 	u32 cur_header = 0;
9472 
9473 	/*
9474 	 * Loop through all chained images and change the device ID's
9475 	 */
9476 	while (1) {
9477 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
9478 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
9479 			      le16_to_cpu(*(u16*)header->pcir_offset)];
9480 
9481 		/*
9482 		 * Only modify the Device ID if code type is Legacy or HP.
9483 		 * 0x00: Okay to modify
9484 		 * 0x01: FCODE. Do not be modify
9485 		 * 0x03: Okay to modify
9486 		 * 0x04-0xFF: Do not modify
9487 		 */
9488 		if (pcir_header->code_type == 0x00) {
9489 			u8 csum = 0;
9490 			int i;
9491 
9492 			/*
9493 			 * Modify Device ID to match current adatper
9494 			 */
9495 			*(u16*) pcir_header->device_id = device_id;
9496 
9497 			/*
9498 			 * Set checksum temporarily to 0.
9499 			 * We will recalculate it later.
9500 			 */
9501 			header->cksum = 0x0;
9502 
9503 			/*
9504 			 * Calculate and update checksum
9505 			 */
9506 			for (i = 0; i < (header->size512 * 512); i++)
9507 				csum += (u8)boot_data[cur_header + i];
9508 
9509 			/*
9510 			 * Invert summed value to create the checksum
9511 			 * Writing new checksum value directly to the boot data
9512 			 */
9513 			boot_data[cur_header + 7] = -csum;
9514 
9515 		} else if (pcir_header->code_type == 0x03) {
9516 
9517 			/*
9518 			 * Modify Device ID to match current adatper
9519 			 */
9520 			*(u16*) pcir_header->device_id = device_id;
9521 
9522 		}
9523 
9524 
9525 		/*
9526 		 * Check indicator element to identify if this is the last
9527 		 * image in the ROM.
9528 		 */
9529 		if (pcir_header->indicator & 0x80)
9530 			break;
9531 
9532 		/*
9533 		 * Move header pointer up to the next image in the ROM.
9534 		 */
9535 		cur_header += header->size512 * 512;
9536 	}
9537 }
9538 
9539 /*
9540  *	t4_load_boot - download boot flash
9541  *	@adapter: the adapter
9542  *	@boot_data: the boot image to write
9543  *	@boot_addr: offset in flash to write boot_data
9544  *	@size: image size
9545  *
9546  *	Write the supplied boot image to the card's serial flash.
9547  *	The boot image has the following sections: a 28-byte header and the
9548  *	boot image.
9549  */
9550 int t4_load_boot(struct adapter *adap, u8 *boot_data,
9551 		 unsigned int boot_addr, unsigned int size)
9552 {
9553 	pci_exp_rom_header_t *header;
9554 	int pcir_offset ;
9555 	pcir_data_t *pcir_header;
9556 	int ret, addr;
9557 	uint16_t device_id;
9558 	unsigned int i;
9559 	unsigned int boot_sector = (boot_addr * 1024 );
9560 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9561 
9562 	/*
9563 	 * Make sure the boot image does not encroach on the firmware region
9564 	 */
9565 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
9566 		CH_ERR(adap, "boot image encroaching on firmware region\n");
9567 		return -EFBIG;
9568 	}
9569 
9570 	/*
9571 	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
9572 	 * and Boot configuration data sections. These 3 boot sections span
9573 	 * sectors 0 to 7 in flash and live right before the FW image location.
9574 	 */
9575 	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
9576 			sf_sec_size);
9577 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
9578 				     (boot_sector >> 16) + i - 1);
9579 
9580 	/*
9581 	 * If size == 0 then we're simply erasing the FLASH sectors associated
9582 	 * with the on-adapter option ROM file
9583 	 */
9584 	if (ret || (size == 0))
9585 		goto out;
9586 
9587 	/* Get boot header */
9588 	header = (pci_exp_rom_header_t *)boot_data;
9589 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
9590 	/* PCIR Data Structure */
9591 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
9592 
9593 	/*
9594 	 * Perform some primitive sanity testing to avoid accidentally
9595 	 * writing garbage over the boot sectors.  We ought to check for
9596 	 * more but it's not worth it for now ...
9597 	 */
9598 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
9599 		CH_ERR(adap, "boot image too small/large\n");
9600 		return -EFBIG;
9601 	}
9602 
9603 #ifndef CHELSIO_T4_DIAGS
9604 	/*
9605 	 * Check BOOT ROM header signature
9606 	 */
9607 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
9608 		CH_ERR(adap, "Boot image missing signature\n");
9609 		return -EINVAL;
9610 	}
9611 
9612 	/*
9613 	 * Check PCI header signature
9614 	 */
9615 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
9616 		CH_ERR(adap, "PCI header missing signature\n");
9617 		return -EINVAL;
9618 	}
9619 
9620 	/*
9621 	 * Check Vendor ID matches Chelsio ID
9622 	 */
9623 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
9624 		CH_ERR(adap, "Vendor ID missing signature\n");
9625 		return -EINVAL;
9626 	}
9627 #endif
9628 
9629 	/*
9630 	 * Retrieve adapter's device ID
9631 	 */
9632 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
9633 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
9634 	device_id = device_id & 0xf0ff;
9635 
9636 	/*
9637 	 * Check PCIE Device ID
9638 	 */
9639 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
9640 		/*
9641 		 * Change the device ID in the Boot BIOS image to match
9642 		 * the Device ID of the current adapter.
9643 		 */
9644 		modify_device_id(device_id, boot_data);
9645 	}
9646 
9647 	/*
9648 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
9649 	 * we finish copying the rest of the boot image. This will ensure
9650 	 * that the BIOS boot header will only be written if the boot image
9651 	 * was written in full.
9652 	 */
9653 	addr = boot_sector;
9654 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
9655 		addr += SF_PAGE_SIZE;
9656 		boot_data += SF_PAGE_SIZE;
9657 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
9658 		if (ret)
9659 			goto out;
9660 	}
9661 
9662 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
9663 			     (const u8 *)header, 0);
9664 
9665 out:
9666 	if (ret)
9667 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
9668 	return ret;
9669 }
9670 
9671 /*
9672  *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9673  *	@adapter: the adapter
9674  *
9675  *	Return the address within the flash where the OptionROM Configuration
9676  *	is stored, or an error if the device FLASH is too small to contain
9677  *	a OptionROM Configuration.
9678  */
9679 static int t4_flash_bootcfg_addr(struct adapter *adapter)
9680 {
9681 	/*
9682 	 * If the device FLASH isn't large enough to hold a Firmware
9683 	 * Configuration File, return an error.
9684 	 */
9685 	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9686 		return -ENOSPC;
9687 
9688 	return FLASH_BOOTCFG_START;
9689 }
9690 
9691 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9692 {
9693 	int ret, i, n, cfg_addr;
9694 	unsigned int addr;
9695 	unsigned int flash_cfg_start_sec;
9696 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9697 
9698 	cfg_addr = t4_flash_bootcfg_addr(adap);
9699 	if (cfg_addr < 0)
9700 		return cfg_addr;
9701 
9702 	addr = cfg_addr;
9703 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
9704 
9705 	if (size > FLASH_BOOTCFG_MAX_SIZE) {
9706 		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9707 			FLASH_BOOTCFG_MAX_SIZE);
9708 		return -EFBIG;
9709 	}
9710 
9711 	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9712 			 sf_sec_size);
9713 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9714 					flash_cfg_start_sec + i - 1);
9715 
9716 	/*
9717 	 * If size == 0 then we're simply erasing the FLASH sectors associated
9718 	 * with the on-adapter OptionROM Configuration File.
9719 	 */
9720 	if (ret || size == 0)
9721 		goto out;
9722 
9723 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
9724 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9725 		if ( (size - i) <  SF_PAGE_SIZE)
9726 			n = size - i;
9727 		else
9728 			n = SF_PAGE_SIZE;
9729 		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9730 		if (ret)
9731 			goto out;
9732 
9733 		addr += SF_PAGE_SIZE;
9734 		cfg_data += SF_PAGE_SIZE;
9735 	}
9736 
9737 out:
9738 	if (ret)
9739 		CH_ERR(adap, "boot config data %s failed %d\n",
9740 				(size == 0 ? "clear" : "download"), ret);
9741 	return ret;
9742 }
9743 
9744 /**
9745  *	t4_set_filter_mode - configure the optional components of filter tuples
9746  *	@adap: the adapter
9747  *	@mode_map: a bitmap selcting which optional filter components to enable
9748  * 	@sleep_ok: if true we may sleep while awaiting command completion
9749  *
9750  *	Sets the filter mode by selecting the optional components to enable
9751  *	in filter tuples.  Returns 0 on success and a negative error if the
9752  *	requested mode needs more bits than are available for optional
9753  *	components.
9754  */
9755 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
9756 		       bool sleep_ok)
9757 {
9758 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9759 
9760 	int i, nbits = 0;
9761 
9762 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9763 		if (mode_map & (1 << i))
9764 			nbits += width[i];
9765 	if (nbits > FILTER_OPT_LEN)
9766 		return -EINVAL;
9767 	t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
9768 	read_filter_mode_and_ingress_config(adap, sleep_ok);
9769 
9770 	return 0;
9771 }
9772 
9773 /**
9774  *	t4_clr_port_stats - clear port statistics
9775  *	@adap: the adapter
9776  *	@idx: the port index
9777  *
9778  *	Clear HW statistics for the given port.
9779  */
9780 void t4_clr_port_stats(struct adapter *adap, int idx)
9781 {
9782 	unsigned int i;
9783 	u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
9784 	u32 port_base_addr;
9785 
9786 	if (is_t4(adap))
9787 		port_base_addr = PORT_BASE(idx);
9788 	else
9789 		port_base_addr = T5_PORT_BASE(idx);
9790 
9791 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9792 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9793 		t4_write_reg(adap, port_base_addr + i, 0);
9794 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9795 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9796 		t4_write_reg(adap, port_base_addr + i, 0);
9797 	for (i = 0; i < 4; i++)
9798 		if (bgmap & (1 << i)) {
9799 			t4_write_reg(adap,
9800 			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9801 			t4_write_reg(adap,
9802 			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9803 		}
9804 }
9805 
9806 /**
9807  *	t4_i2c_rd - read I2C data from adapter
9808  *	@adap: the adapter
9809  *	@port: Port number if per-port device; <0 if not
9810  *	@devid: per-port device ID or absolute device ID
9811  *	@offset: byte offset into device I2C space
9812  *	@len: byte length of I2C space data
9813  *	@buf: buffer in which to return I2C data
9814  *
9815  *	Reads the I2C data from the indicated device and location.
9816  */
9817 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9818 	      int port, unsigned int devid,
9819 	      unsigned int offset, unsigned int len,
9820 	      u8 *buf)
9821 {
9822 	u32 ldst_addrspace;
9823 	struct fw_ldst_cmd ldst;
9824 	int ret;
9825 
9826 	if (port >= 4 ||
9827 	    devid >= 256 ||
9828 	    offset >= 256 ||
9829 	    len > sizeof ldst.u.i2c.data)
9830 		return -EINVAL;
9831 
9832 	memset(&ldst, 0, sizeof ldst);
9833 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9834 	ldst.op_to_addrspace =
9835 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9836 			    F_FW_CMD_REQUEST |
9837 			    F_FW_CMD_READ |
9838 			    ldst_addrspace);
9839 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9840 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9841 	ldst.u.i2c.did = devid;
9842 	ldst.u.i2c.boffset = offset;
9843 	ldst.u.i2c.blen = len;
9844 	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9845 	if (!ret)
9846 		memcpy(buf, ldst.u.i2c.data, len);
9847 	return ret;
9848 }
9849 
9850 /**
9851  *	t4_i2c_wr - write I2C data to adapter
9852  *	@adap: the adapter
9853  *	@port: Port number if per-port device; <0 if not
9854  *	@devid: per-port device ID or absolute device ID
9855  *	@offset: byte offset into device I2C space
9856  *	@len: byte length of I2C space data
9857  *	@buf: buffer containing new I2C data
9858  *
9859  *	Write the I2C data to the indicated device and location.
9860  */
9861 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9862 	      int port, unsigned int devid,
9863 	      unsigned int offset, unsigned int len,
9864 	      u8 *buf)
9865 {
9866 	u32 ldst_addrspace;
9867 	struct fw_ldst_cmd ldst;
9868 
9869 	if (port >= 4 ||
9870 	    devid >= 256 ||
9871 	    offset >= 256 ||
9872 	    len > sizeof ldst.u.i2c.data)
9873 		return -EINVAL;
9874 
9875 	memset(&ldst, 0, sizeof ldst);
9876 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9877 	ldst.op_to_addrspace =
9878 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9879 			    F_FW_CMD_REQUEST |
9880 			    F_FW_CMD_WRITE |
9881 			    ldst_addrspace);
9882 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9883 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9884 	ldst.u.i2c.did = devid;
9885 	ldst.u.i2c.boffset = offset;
9886 	ldst.u.i2c.blen = len;
9887 	memcpy(ldst.u.i2c.data, buf, len);
9888 	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9889 }
9890 
9891 /**
9892  * 	t4_sge_ctxt_rd - read an SGE context through FW
9893  * 	@adap: the adapter
9894  * 	@mbox: mailbox to use for the FW command
9895  * 	@cid: the context id
9896  * 	@ctype: the context type
9897  * 	@data: where to store the context data
9898  *
9899  * 	Issues a FW command through the given mailbox to read an SGE context.
9900  */
9901 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9902 		   enum ctxt_type ctype, u32 *data)
9903 {
9904 	int ret;
9905 	struct fw_ldst_cmd c;
9906 
9907 	if (ctype == CTXT_EGRESS)
9908 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
9909 	else if (ctype == CTXT_INGRESS)
9910 		ret = FW_LDST_ADDRSPC_SGE_INGC;
9911 	else if (ctype == CTXT_FLM)
9912 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
9913 	else
9914 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
9915 
9916 	memset(&c, 0, sizeof(c));
9917 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9918 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
9919 					V_FW_LDST_CMD_ADDRSPACE(ret));
9920 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9921 	c.u.idctxt.physid = cpu_to_be32(cid);
9922 
9923 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9924 	if (ret == 0) {
9925 		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9926 		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9927 		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9928 		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9929 		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9930 		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9931 	}
9932 	return ret;
9933 }
9934 
9935 /**
9936  * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9937  * 	@adap: the adapter
9938  * 	@cid: the context id
9939  * 	@ctype: the context type
9940  * 	@data: where to store the context data
9941  *
9942  * 	Reads an SGE context directly, bypassing FW.  This is only for
9943  * 	debugging when FW is unavailable.
9944  */
9945 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9946 		      u32 *data)
9947 {
9948 	int i, ret;
9949 
9950 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9951 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9952 	if (!ret)
9953 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9954 			*data++ = t4_read_reg(adap, i);
9955 	return ret;
9956 }
9957 
9958 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9959     int sleep_ok)
9960 {
9961 	struct fw_sched_cmd cmd;
9962 
9963 	memset(&cmd, 0, sizeof(cmd));
9964 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9965 				      F_FW_CMD_REQUEST |
9966 				      F_FW_CMD_WRITE);
9967 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9968 
9969 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9970 	cmd.u.config.type = type;
9971 	cmd.u.config.minmaxen = minmaxen;
9972 
9973 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9974 			       NULL, sleep_ok);
9975 }
9976 
9977 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9978 		    int rateunit, int ratemode, int channel, int cl,
9979 		    int minrate, int maxrate, int weight, int pktsize,
9980 		    int burstsize, int sleep_ok)
9981 {
9982 	struct fw_sched_cmd cmd;
9983 
9984 	memset(&cmd, 0, sizeof(cmd));
9985 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9986 				      F_FW_CMD_REQUEST |
9987 				      F_FW_CMD_WRITE);
9988 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9989 
9990 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9991 	cmd.u.params.type = type;
9992 	cmd.u.params.level = level;
9993 	cmd.u.params.mode = mode;
9994 	cmd.u.params.ch = channel;
9995 	cmd.u.params.cl = cl;
9996 	cmd.u.params.unit = rateunit;
9997 	cmd.u.params.rate = ratemode;
9998 	cmd.u.params.min = cpu_to_be32(minrate);
9999 	cmd.u.params.max = cpu_to_be32(maxrate);
10000 	cmd.u.params.weight = cpu_to_be16(weight);
10001 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
10002 	cmd.u.params.burstsize = cpu_to_be16(burstsize);
10003 
10004 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10005 			       NULL, sleep_ok);
10006 }
10007 
10008 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
10009     unsigned int maxrate, int sleep_ok)
10010 {
10011 	struct fw_sched_cmd cmd;
10012 
10013 	memset(&cmd, 0, sizeof(cmd));
10014 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10015 				      F_FW_CMD_REQUEST |
10016 				      F_FW_CMD_WRITE);
10017 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10018 
10019 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10020 	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10021 	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
10022 	cmd.u.params.ch = channel;
10023 	cmd.u.params.rate = ratemode;		/* REL or ABS */
10024 	cmd.u.params.max = cpu_to_be32(maxrate);/*  %  or kbps */
10025 
10026 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10027 			       NULL, sleep_ok);
10028 }
10029 
10030 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
10031     int weight, int sleep_ok)
10032 {
10033 	struct fw_sched_cmd cmd;
10034 
10035 	if (weight < 0 || weight > 100)
10036 		return -EINVAL;
10037 
10038 	memset(&cmd, 0, sizeof(cmd));
10039 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10040 				      F_FW_CMD_REQUEST |
10041 				      F_FW_CMD_WRITE);
10042 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10043 
10044 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10045 	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10046 	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
10047 	cmd.u.params.ch = channel;
10048 	cmd.u.params.cl = cl;
10049 	cmd.u.params.weight = cpu_to_be16(weight);
10050 
10051 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10052 			       NULL, sleep_ok);
10053 }
10054 
10055 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
10056     int mode, unsigned int maxrate, int pktsize, int sleep_ok)
10057 {
10058 	struct fw_sched_cmd cmd;
10059 
10060 	memset(&cmd, 0, sizeof(cmd));
10061 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10062 				      F_FW_CMD_REQUEST |
10063 				      F_FW_CMD_WRITE);
10064 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10065 
10066 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10067 	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10068 	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
10069 	cmd.u.params.mode = mode;
10070 	cmd.u.params.ch = channel;
10071 	cmd.u.params.cl = cl;
10072 	cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
10073 	cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
10074 	cmd.u.params.max = cpu_to_be32(maxrate);
10075 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
10076 
10077 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10078 			       NULL, sleep_ok);
10079 }
10080 
10081 /*
10082  *	t4_config_watchdog - configure (enable/disable) a watchdog timer
10083  *	@adapter: the adapter
10084  * 	@mbox: mailbox to use for the FW command
10085  * 	@pf: the PF owning the queue
10086  * 	@vf: the VF owning the queue
10087  *	@timeout: watchdog timeout in ms
10088  *	@action: watchdog timer / action
10089  *
10090  *	There are separate watchdog timers for each possible watchdog
10091  *	action.  Configure one of the watchdog timers by setting a non-zero
10092  *	timeout.  Disable a watchdog timer by using a timeout of zero.
10093  */
10094 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
10095 		       unsigned int pf, unsigned int vf,
10096 		       unsigned int timeout, unsigned int action)
10097 {
10098 	struct fw_watchdog_cmd wdog;
10099 	unsigned int ticks;
10100 
10101 	/*
10102 	 * The watchdog command expects a timeout in units of 10ms so we need
10103 	 * to convert it here (via rounding) and force a minimum of one 10ms
10104 	 * "tick" if the timeout is non-zero but the conversion results in 0
10105 	 * ticks.
10106 	 */
10107 	ticks = (timeout + 5)/10;
10108 	if (timeout && !ticks)
10109 		ticks = 1;
10110 
10111 	memset(&wdog, 0, sizeof wdog);
10112 	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
10113 				     F_FW_CMD_REQUEST |
10114 				     F_FW_CMD_WRITE |
10115 				     V_FW_PARAMS_CMD_PFN(pf) |
10116 				     V_FW_PARAMS_CMD_VFN(vf));
10117 	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
10118 	wdog.timeout = cpu_to_be32(ticks);
10119 	wdog.action = cpu_to_be32(action);
10120 
10121 	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
10122 }
10123 
10124 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
10125 {
10126 	struct fw_devlog_cmd devlog_cmd;
10127 	int ret;
10128 
10129 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10130 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10131 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
10132 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10133 	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10134 			 sizeof(devlog_cmd), &devlog_cmd);
10135 	if (ret)
10136 		return ret;
10137 
10138 	*level = devlog_cmd.level;
10139 	return 0;
10140 }
10141 
10142 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
10143 {
10144 	struct fw_devlog_cmd devlog_cmd;
10145 
10146 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10147 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10148 					     F_FW_CMD_REQUEST |
10149 					     F_FW_CMD_WRITE);
10150 	devlog_cmd.level = level;
10151 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10152 	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10153 			  sizeof(devlog_cmd), &devlog_cmd);
10154 }
10155