xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision 87c1627502a5dde91e5284118eec8682b60f27a2)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include "common.h"
33 #include "t4_regs.h"
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
36 
37 #undef msleep
38 #define msleep(x) do { \
39 	if (cold) \
40 		DELAY((x) * 1000); \
41 	else \
42 		pause("t4hw", (x) * hz / 1000); \
43 } while (0)
44 
45 /**
46  *	t4_wait_op_done_val - wait until an operation is completed
47  *	@adapter: the adapter performing the operation
48  *	@reg: the register to check for completion
49  *	@mask: a single-bit field within @reg that indicates completion
50  *	@polarity: the value of the field when the operation is completed
51  *	@attempts: number of check iterations
52  *	@delay: delay in usecs between iterations
53  *	@valp: where to store the value of the register at completion time
54  *
55  *	Wait until an operation is completed by checking a bit in a register
56  *	up to @attempts times.  If @valp is not NULL the value of the register
57  *	at the time it indicated completion is stored there.  Returns 0 if the
58  *	operation completes and	-EAGAIN	otherwise.
59  */
60 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 		        int polarity, int attempts, int delay, u32 *valp)
62 {
63 	while (1) {
64 		u32 val = t4_read_reg(adapter, reg);
65 
66 		if (!!(val & mask) == polarity) {
67 			if (valp)
68 				*valp = val;
69 			return 0;
70 		}
71 		if (--attempts == 0)
72 			return -EAGAIN;
73 		if (delay)
74 			udelay(delay);
75 	}
76 }
77 
78 /**
79  *	t4_set_reg_field - set a register field to a value
80  *	@adapter: the adapter to program
81  *	@addr: the register address
82  *	@mask: specifies the portion of the register to modify
83  *	@val: the new value for the register field
84  *
85  *	Sets a register field specified by the supplied mask to the
86  *	given value.
87  */
88 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89 		      u32 val)
90 {
91 	u32 v = t4_read_reg(adapter, addr) & ~mask;
92 
93 	t4_write_reg(adapter, addr, v | val);
94 	(void) t4_read_reg(adapter, addr);      /* flush */
95 }
96 
97 /**
98  *	t4_read_indirect - read indirectly addressed registers
99  *	@adap: the adapter
100  *	@addr_reg: register holding the indirect address
101  *	@data_reg: register holding the value of the indirect register
102  *	@vals: where the read register values are stored
103  *	@nregs: how many indirect registers to read
104  *	@start_idx: index of first indirect register to read
105  *
106  *	Reads registers that are accessed indirectly through an address/data
107  *	register pair.
108  */
109 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
111 		      unsigned int start_idx)
112 {
113 	while (nregs--) {
114 		t4_write_reg(adap, addr_reg, start_idx);
115 		*vals++ = t4_read_reg(adap, data_reg);
116 		start_idx++;
117 	}
118 }
119 
120 /**
121  *	t4_write_indirect - write indirectly addressed registers
122  *	@adap: the adapter
123  *	@addr_reg: register holding the indirect addresses
124  *	@data_reg: register holding the value for the indirect registers
125  *	@vals: values to write
126  *	@nregs: how many indirect registers to write
127  *	@start_idx: address of first indirect register to write
128  *
129  *	Writes a sequential block of registers that are accessed indirectly
130  *	through an address/data register pair.
131  */
132 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133 		       unsigned int data_reg, const u32 *vals,
134 		       unsigned int nregs, unsigned int start_idx)
135 {
136 	while (nregs--) {
137 		t4_write_reg(adap, addr_reg, start_idx++);
138 		t4_write_reg(adap, data_reg, *vals++);
139 	}
140 }
141 
142 /*
143  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144  * mechanism.  This guarantees that we get the real value even if we're
145  * operating within a Virtual Machine and the Hypervisor is trapping our
146  * Configuration Space accesses.
147  */
148 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149 {
150 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152 		     V_REGISTER(reg));
153 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154 }
155 
156 /*
157  *	t4_report_fw_error - report firmware error
158  *	@adap: the adapter
159  *
160  *	The adapter firmware can indicate error conditions to the host.
161  *	This routine prints out the reason for the firmware error (as
162  *	reported by the firmware).
163  */
164 static void t4_report_fw_error(struct adapter *adap)
165 {
166 	static const char *reason[] = {
167 		"Crash",			/* PCIE_FW_EVAL_CRASH */
168 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
169 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
170 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
171 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
173 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
174 		"Reserved",			/* reserved */
175 	};
176 	u32 pcie_fw;
177 
178 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179 	if (pcie_fw & F_PCIE_FW_ERR)
180 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
181 		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
182 }
183 
184 /*
185  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
186  */
187 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
188 			 u32 mbox_addr)
189 {
190 	for ( ; nflit; nflit--, mbox_addr += 8)
191 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
192 }
193 
194 /*
195  * Handle a FW assertion reported in a mailbox.
196  */
197 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
198 {
199 	struct fw_debug_cmd asrt;
200 
201 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
202 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
203 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
204 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
205 }
206 
207 #define X_CIM_PF_NOACCESS 0xeeeeeeee
208 /**
209  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
210  *	@adap: the adapter
211  *	@mbox: index of the mailbox to use
212  *	@cmd: the command to write
213  *	@size: command length in bytes
214  *	@rpl: where to optionally store the reply
215  *	@sleep_ok: if true we may sleep while awaiting command completion
216  *
217  *	Sends the given command to FW through the selected mailbox and waits
218  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
219  *	store the FW's reply to the command.  The command and its optional
220  *	reply are of the same length.  Some FW commands like RESET and
221  *	INITIALIZE can take a considerable amount of time to execute.
222  *	@sleep_ok determines whether we may sleep while awaiting the response.
223  *	If sleeping is allowed we use progressive backoff otherwise we spin.
224  *
225  *	The return value is 0 on success or a negative errno on failure.  A
226  *	failure can happen either because we are not able to execute the
227  *	command or FW executes it but signals an error.  In the latter case
228  *	the return value is the error code indicated by FW (negated).
229  */
230 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 		    void *rpl, bool sleep_ok)
232 {
233 	/*
234 	 * We delay in small increments at first in an effort to maintain
235 	 * responsiveness for simple, fast executing commands but then back
236 	 * off to larger delays to a maximum retry delay.
237 	 */
238 	static const int delay[] = {
239 		1, 1, 3, 5, 10, 10, 20, 50, 100
240 	};
241 
242 	u32 v;
243 	u64 res;
244 	int i, ms, delay_idx;
245 	const __be64 *p = cmd;
246 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
247 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
248 
249 	if ((size & 15) || size > MBOX_LEN)
250 		return -EINVAL;
251 
252 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
253 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
254 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255 
256 	if (v != X_MBOWNER_PL)
257 		return v ? -EBUSY : -ETIMEDOUT;
258 
259 	for (i = 0; i < size; i += 8, p++)
260 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
261 
262 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
263 	t4_read_reg(adap, ctl_reg);          /* flush write */
264 
265 	delay_idx = 0;
266 	ms = delay[0];
267 
268 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
269 		if (sleep_ok) {
270 			ms = delay[delay_idx];  /* last element may repeat */
271 			if (delay_idx < ARRAY_SIZE(delay) - 1)
272 				delay_idx++;
273 			msleep(ms);
274 		} else
275 			mdelay(ms);
276 
277 		v = t4_read_reg(adap, ctl_reg);
278 		if (v == X_CIM_PF_NOACCESS)
279 			continue;
280 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
281 			if (!(v & F_MBMSGVALID)) {
282 				t4_write_reg(adap, ctl_reg,
283 					     V_MBOWNER(X_MBOWNER_NONE));
284 				continue;
285 			}
286 
287 			res = t4_read_reg64(adap, data_reg);
288 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
289 				fw_asrt(adap, data_reg);
290 				res = V_FW_CMD_RETVAL(EIO);
291 			} else if (rpl)
292 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
293 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
294 			return -G_FW_CMD_RETVAL((int)res);
295 		}
296 	}
297 
298 	/*
299 	 * We timed out waiting for a reply to our mailbox command.  Report
300 	 * the error and also check to see if the firmware reported any
301 	 * errors ...
302 	 */
303 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
304 	       *(const u8 *)cmd, mbox);
305 	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
306 		t4_report_fw_error(adap);
307 	return -ETIMEDOUT;
308 }
309 
310 /**
311  *	t4_mc_read - read from MC through backdoor accesses
312  *	@adap: the adapter
313  *	@idx: which MC to access
314  *	@addr: address of first byte requested
315  *	@data: 64 bytes of data containing the requested address
316  *	@ecc: where to store the corresponding 64-bit ECC word
317  *
318  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
319  *	that covers the requested address @addr.  If @parity is not %NULL it
320  *	is assigned the 64-bit ECC word for the read data.
321  */
322 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
323 {
324 	int i;
325 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
326 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
327 
328 	if (is_t4(adap)) {
329 		mc_bist_cmd_reg = A_MC_BIST_CMD;
330 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
331 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
332 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
333 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
334 	} else {
335 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
336 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
337 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
338 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
339 						  idx);
340 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
341 						  idx);
342 	}
343 
344 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
345 		return -EBUSY;
346 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
347 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
348 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
349 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
350 		     F_START_BIST | V_BIST_CMD_GAP(1));
351 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
352 	if (i)
353 		return i;
354 
355 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
356 
357 	for (i = 15; i >= 0; i--)
358 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
359 	if (ecc)
360 		*ecc = t4_read_reg64(adap, MC_DATA(16));
361 #undef MC_DATA
362 	return 0;
363 }
364 
365 /**
366  *	t4_edc_read - read from EDC through backdoor accesses
367  *	@adap: the adapter
368  *	@idx: which EDC to access
369  *	@addr: address of first byte requested
370  *	@data: 64 bytes of data containing the requested address
371  *	@ecc: where to store the corresponding 64-bit ECC word
372  *
373  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
374  *	that covers the requested address @addr.  If @parity is not %NULL it
375  *	is assigned the 64-bit ECC word for the read data.
376  */
377 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
378 {
379 	int i;
380 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
381 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
382 
383 	if (is_t4(adap)) {
384 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
385 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
386 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
387 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
388 						    idx);
389 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
390 						    idx);
391 	} else {
392 /*
393  * These macro are missing in t4_regs.h file.
394  * Added temporarily for testing.
395  */
396 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
397 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
398 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
399 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
400 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
401 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
402 						    idx);
403 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
404 						    idx);
405 #undef EDC_REG_T5
406 #undef EDC_STRIDE_T5
407 	}
408 
409 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
410 		return -EBUSY;
411 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
412 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
413 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
414 	t4_write_reg(adap, edc_bist_cmd_reg,
415 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
416 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
417 	if (i)
418 		return i;
419 
420 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
421 
422 	for (i = 15; i >= 0; i--)
423 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
424 	if (ecc)
425 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
426 #undef EDC_DATA
427 	return 0;
428 }
429 
430 /**
431  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
432  *	@adap: the adapter
433  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
434  *	@addr: address within indicated memory type
435  *	@len: amount of memory to read
436  *	@buf: host memory buffer
437  *
438  *	Reads an [almost] arbitrary memory region in the firmware: the
439  *	firmware memory address, length and host buffer must be aligned on
440  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
441  *	the firmware's memory.  If this memory contains data structures which
442  *	contain multi-byte integers, it's the callers responsibility to
443  *	perform appropriate byte order conversions.
444  */
445 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
446 		__be32 *buf)
447 {
448 	u32 pos, start, end, offset;
449 	int ret;
450 
451 	/*
452 	 * Argument sanity checks ...
453 	 */
454 	if ((addr & 0x3) || (len & 0x3))
455 		return -EINVAL;
456 
457 	/*
458 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
459 	 * need to round down the start and round up the end.  We'll start
460 	 * copying out of the first line at (addr - start) a word at a time.
461 	 */
462 	start = addr & ~(64-1);
463 	end = (addr + len + 64-1) & ~(64-1);
464 	offset = (addr - start)/sizeof(__be32);
465 
466 	for (pos = start; pos < end; pos += 64, offset = 0) {
467 		__be32 data[16];
468 
469 		/*
470 		 * Read the chip's memory block and bail if there's an error.
471 		 */
472 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
473 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
474 		else
475 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
476 		if (ret)
477 			return ret;
478 
479 		/*
480 		 * Copy the data into the caller's memory buffer.
481 		 */
482 		while (offset < 16 && len > 0) {
483 			*buf++ = data[offset++];
484 			len -= sizeof(__be32);
485 		}
486 	}
487 
488 	return 0;
489 }
490 
491 /*
492  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
493  * VPD-R header.
494  */
495 struct t4_vpd_hdr {
496 	u8  id_tag;
497 	u8  id_len[2];
498 	u8  id_data[ID_LEN];
499 	u8  vpdr_tag;
500 	u8  vpdr_len[2];
501 };
502 
503 /*
504  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
505  */
506 #define EEPROM_MAX_RD_POLL 40
507 #define EEPROM_MAX_WR_POLL 6
508 #define EEPROM_STAT_ADDR   0x7bfc
509 #define VPD_BASE           0x400
510 #define VPD_BASE_OLD       0
511 #define VPD_LEN            1024
512 #define VPD_INFO_FLD_HDR_SIZE	3
513 #define CHELSIO_VPD_UNIQUE_ID 0x82
514 
515 /**
516  *	t4_seeprom_read - read a serial EEPROM location
517  *	@adapter: adapter to read
518  *	@addr: EEPROM virtual address
519  *	@data: where to store the read data
520  *
521  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
522  *	VPD capability.  Note that this function must be called with a virtual
523  *	address.
524  */
525 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
526 {
527 	u16 val;
528 	int attempts = EEPROM_MAX_RD_POLL;
529 	unsigned int base = adapter->params.pci.vpd_cap_addr;
530 
531 	if (addr >= EEPROMVSIZE || (addr & 3))
532 		return -EINVAL;
533 
534 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
535 	do {
536 		udelay(10);
537 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
538 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
539 
540 	if (!(val & PCI_VPD_ADDR_F)) {
541 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
542 		return -EIO;
543 	}
544 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
545 	*data = le32_to_cpu(*data);
546 	return 0;
547 }
548 
549 /**
550  *	t4_seeprom_write - write a serial EEPROM location
551  *	@adapter: adapter to write
552  *	@addr: virtual EEPROM address
553  *	@data: value to write
554  *
555  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
556  *	VPD capability.  Note that this function must be called with a virtual
557  *	address.
558  */
559 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
560 {
561 	u16 val;
562 	int attempts = EEPROM_MAX_WR_POLL;
563 	unsigned int base = adapter->params.pci.vpd_cap_addr;
564 
565 	if (addr >= EEPROMVSIZE || (addr & 3))
566 		return -EINVAL;
567 
568 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
569 				 cpu_to_le32(data));
570 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
571 				 (u16)addr | PCI_VPD_ADDR_F);
572 	do {
573 		msleep(1);
574 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
575 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
576 
577 	if (val & PCI_VPD_ADDR_F) {
578 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
579 		return -EIO;
580 	}
581 	return 0;
582 }
583 
584 /**
585  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
586  *	@phys_addr: the physical EEPROM address
587  *	@fn: the PCI function number
588  *	@sz: size of function-specific area
589  *
590  *	Translate a physical EEPROM address to virtual.  The first 1K is
591  *	accessed through virtual addresses starting at 31K, the rest is
592  *	accessed through virtual addresses starting at 0.
593  *
594  *	The mapping is as follows:
595  *	[0..1K) -> [31K..32K)
596  *	[1K..1K+A) -> [ES-A..ES)
597  *	[1K+A..ES) -> [0..ES-A-1K)
598  *
599  *	where A = @fn * @sz, and ES = EEPROM size.
600  */
601 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
602 {
603 	fn *= sz;
604 	if (phys_addr < 1024)
605 		return phys_addr + (31 << 10);
606 	if (phys_addr < 1024 + fn)
607 		return EEPROMSIZE - fn + phys_addr - 1024;
608 	if (phys_addr < EEPROMSIZE)
609 		return phys_addr - 1024 - fn;
610 	return -EINVAL;
611 }
612 
613 /**
614  *	t4_seeprom_wp - enable/disable EEPROM write protection
615  *	@adapter: the adapter
616  *	@enable: whether to enable or disable write protection
617  *
618  *	Enables or disables write protection on the serial EEPROM.
619  */
620 int t4_seeprom_wp(struct adapter *adapter, int enable)
621 {
622 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
623 }
624 
625 /**
626  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
627  *	@v: Pointer to buffered vpd data structure
628  *	@kw: The keyword to search for
629  *
630  *	Returns the value of the information field keyword or
631  *	-ENOENT otherwise.
632  */
633 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
634 {
635          int i;
636 	 unsigned int offset , len;
637 	 const u8 *buf = &v->id_tag;
638 	 const u8 *vpdr_len = &v->vpdr_tag;
639 	 offset = sizeof(struct t4_vpd_hdr);
640 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
641 
642 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
643 		 return -ENOENT;
644 	 }
645 
646          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
647 		 if(memcmp(buf + i , kw , 2) == 0){
648 			 i += VPD_INFO_FLD_HDR_SIZE;
649                          return i;
650 		  }
651 
652                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
653          }
654 
655          return -ENOENT;
656 }
657 
658 
659 /**
660  *	get_vpd_params - read VPD parameters from VPD EEPROM
661  *	@adapter: adapter to read
662  *	@p: where to store the parameters
663  *
664  *	Reads card parameters stored in VPD EEPROM.
665  */
666 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
667 {
668 	int i, ret, addr;
669 	int ec, sn, pn, na;
670 	u8 vpd[VPD_LEN], csum;
671 	const struct t4_vpd_hdr *v;
672 
673 	/*
674 	 * Card information normally starts at VPD_BASE but early cards had
675 	 * it at 0.
676 	 */
677 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
678 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
679 
680 	for (i = 0; i < sizeof(vpd); i += 4) {
681 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
682 		if (ret)
683 			return ret;
684 	}
685  	v = (const struct t4_vpd_hdr *)vpd;
686 
687 #define FIND_VPD_KW(var,name) do { \
688 	var = get_vpd_keyword_val(v , name); \
689 	if (var < 0) { \
690 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
691 		return -EINVAL; \
692 	} \
693 } while (0)
694 
695 	FIND_VPD_KW(i, "RV");
696 	for (csum = 0; i >= 0; i--)
697 		csum += vpd[i];
698 
699 	if (csum) {
700 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
701 		return -EINVAL;
702 	}
703 	FIND_VPD_KW(ec, "EC");
704 	FIND_VPD_KW(sn, "SN");
705 	FIND_VPD_KW(pn, "PN");
706 	FIND_VPD_KW(na, "NA");
707 #undef FIND_VPD_KW
708 
709 	memcpy(p->id, v->id_data, ID_LEN);
710 	strstrip(p->id);
711 	memcpy(p->ec, vpd + ec, EC_LEN);
712 	strstrip(p->ec);
713 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
714 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
715 	strstrip(p->sn);
716 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
717 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
718 	strstrip((char *)p->pn);
719 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
720 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
721 	strstrip((char *)p->na);
722 
723 	return 0;
724 }
725 
726 /* serial flash and firmware constants and flash config file constants */
727 enum {
728 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
729 
730 	/* flash command opcodes */
731 	SF_PROG_PAGE    = 2,          /* program page */
732 	SF_WR_DISABLE   = 4,          /* disable writes */
733 	SF_RD_STATUS    = 5,          /* read status register */
734 	SF_WR_ENABLE    = 6,          /* enable writes */
735 	SF_RD_DATA_FAST = 0xb,        /* read flash */
736 	SF_RD_ID        = 0x9f,       /* read ID */
737 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
738 };
739 
740 /**
741  *	sf1_read - read data from the serial flash
742  *	@adapter: the adapter
743  *	@byte_cnt: number of bytes to read
744  *	@cont: whether another operation will be chained
745  *	@lock: whether to lock SF for PL access only
746  *	@valp: where to store the read data
747  *
748  *	Reads up to 4 bytes of data from the serial flash.  The location of
749  *	the read needs to be specified prior to calling this by issuing the
750  *	appropriate commands to the serial flash.
751  */
752 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
753 		    int lock, u32 *valp)
754 {
755 	int ret;
756 
757 	if (!byte_cnt || byte_cnt > 4)
758 		return -EINVAL;
759 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
760 		return -EBUSY;
761 	t4_write_reg(adapter, A_SF_OP,
762 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
763 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
764 	if (!ret)
765 		*valp = t4_read_reg(adapter, A_SF_DATA);
766 	return ret;
767 }
768 
769 /**
770  *	sf1_write - write data to the serial flash
771  *	@adapter: the adapter
772  *	@byte_cnt: number of bytes to write
773  *	@cont: whether another operation will be chained
774  *	@lock: whether to lock SF for PL access only
775  *	@val: value to write
776  *
777  *	Writes up to 4 bytes of data to the serial flash.  The location of
778  *	the write needs to be specified prior to calling this by issuing the
779  *	appropriate commands to the serial flash.
780  */
781 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
782 		     int lock, u32 val)
783 {
784 	if (!byte_cnt || byte_cnt > 4)
785 		return -EINVAL;
786 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
787 		return -EBUSY;
788 	t4_write_reg(adapter, A_SF_DATA, val);
789 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
790 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
791 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
792 }
793 
794 /**
795  *	flash_wait_op - wait for a flash operation to complete
796  *	@adapter: the adapter
797  *	@attempts: max number of polls of the status register
798  *	@delay: delay between polls in ms
799  *
800  *	Wait for a flash operation to complete by polling the status register.
801  */
802 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
803 {
804 	int ret;
805 	u32 status;
806 
807 	while (1) {
808 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
809 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
810 			return ret;
811 		if (!(status & 1))
812 			return 0;
813 		if (--attempts == 0)
814 			return -EAGAIN;
815 		if (delay)
816 			msleep(delay);
817 	}
818 }
819 
820 /**
821  *	t4_read_flash - read words from serial flash
822  *	@adapter: the adapter
823  *	@addr: the start address for the read
824  *	@nwords: how many 32-bit words to read
825  *	@data: where to store the read data
826  *	@byte_oriented: whether to store data as bytes or as words
827  *
828  *	Read the specified number of 32-bit words from the serial flash.
829  *	If @byte_oriented is set the read data is stored as a byte array
830  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
831  *	natural endianess.
832  */
833 int t4_read_flash(struct adapter *adapter, unsigned int addr,
834 		  unsigned int nwords, u32 *data, int byte_oriented)
835 {
836 	int ret;
837 
838 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
839 		return -EINVAL;
840 
841 	addr = swab32(addr) | SF_RD_DATA_FAST;
842 
843 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
844 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
845 		return ret;
846 
847 	for ( ; nwords; nwords--, data++) {
848 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
849 		if (nwords == 1)
850 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
851 		if (ret)
852 			return ret;
853 		if (byte_oriented)
854 			*data = htonl(*data);
855 	}
856 	return 0;
857 }
858 
859 /**
860  *	t4_write_flash - write up to a page of data to the serial flash
861  *	@adapter: the adapter
862  *	@addr: the start address to write
863  *	@n: length of data to write in bytes
864  *	@data: the data to write
865  *	@byte_oriented: whether to store data as bytes or as words
866  *
867  *	Writes up to a page of data (256 bytes) to the serial flash starting
868  *	at the given address.  All the data must be written to the same page.
869  *	If @byte_oriented is set the write data is stored as byte stream
870  *	(i.e. matches what on disk), otherwise in big-endian.
871  */
872 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
873 			  unsigned int n, const u8 *data, int byte_oriented)
874 {
875 	int ret;
876 	u32 buf[SF_PAGE_SIZE / 4];
877 	unsigned int i, c, left, val, offset = addr & 0xff;
878 
879 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
880 		return -EINVAL;
881 
882 	val = swab32(addr) | SF_PROG_PAGE;
883 
884 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
885 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
886 		goto unlock;
887 
888 	for (left = n; left; left -= c) {
889 		c = min(left, 4U);
890 		for (val = 0, i = 0; i < c; ++i)
891 			val = (val << 8) + *data++;
892 
893 		if (!byte_oriented)
894 			val = htonl(val);
895 
896 		ret = sf1_write(adapter, c, c != left, 1, val);
897 		if (ret)
898 			goto unlock;
899 	}
900 	ret = flash_wait_op(adapter, 8, 1);
901 	if (ret)
902 		goto unlock;
903 
904 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
905 
906 	/* Read the page to verify the write succeeded */
907 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
908 			    byte_oriented);
909 	if (ret)
910 		return ret;
911 
912 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
913 		CH_ERR(adapter, "failed to correctly write the flash page "
914 		       "at %#x\n", addr);
915 		return -EIO;
916 	}
917 	return 0;
918 
919 unlock:
920 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
921 	return ret;
922 }
923 
924 /**
925  *	t4_get_fw_version - read the firmware version
926  *	@adapter: the adapter
927  *	@vers: where to place the version
928  *
929  *	Reads the FW version from flash.
930  */
931 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
932 {
933 	return t4_read_flash(adapter,
934 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
935 			     vers, 0);
936 }
937 
938 /**
939  *	t4_get_tp_version - read the TP microcode version
940  *	@adapter: the adapter
941  *	@vers: where to place the version
942  *
943  *	Reads the TP microcode version from flash.
944  */
945 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
946 {
947 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
948 							      tp_microcode_ver),
949 			     1, vers, 0);
950 }
951 
952 /**
953  *	t4_check_fw_version - check if the FW is compatible with this driver
954  *	@adapter: the adapter
955  *
956  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
957  *	if there's exact match, a negative error if the version could not be
958  *	read or there's a major version mismatch, and a positive value if the
959  *	expected major version is found but there's a minor version mismatch.
960  */
961 int t4_check_fw_version(struct adapter *adapter)
962 {
963 	int ret, major, minor, micro;
964 	int exp_major, exp_minor, exp_micro;
965 
966 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
967 	if (!ret)
968 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
969 	if (ret)
970 		return ret;
971 
972 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
973 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
974 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
975 
976 	switch (chip_id(adapter)) {
977 	case CHELSIO_T4:
978 		exp_major = FW_VERSION_MAJOR_T4;
979 		exp_minor = FW_VERSION_MINOR_T4;
980 		exp_micro = FW_VERSION_MICRO_T4;
981 		break;
982 	case CHELSIO_T5:
983 		exp_major = FW_VERSION_MAJOR_T5;
984 		exp_minor = FW_VERSION_MINOR_T5;
985 		exp_micro = FW_VERSION_MICRO_T5;
986 		break;
987 	default:
988 		CH_ERR(adapter, "Unsupported chip type, %x\n",
989 		    chip_id(adapter));
990 		return -EINVAL;
991 	}
992 
993 	if (major != exp_major) {            /* major mismatch - fail */
994 		CH_ERR(adapter, "card FW has major version %u, driver wants "
995 		       "%u\n", major, exp_major);
996 		return -EINVAL;
997 	}
998 
999 	if (minor == exp_minor && micro == exp_micro)
1000 		return 0;                                   /* perfect match */
1001 
1002 	/* Minor/micro version mismatch.  Report it but often it's OK. */
1003 	return 1;
1004 }
1005 
1006 /**
1007  *	t4_flash_erase_sectors - erase a range of flash sectors
1008  *	@adapter: the adapter
1009  *	@start: the first sector to erase
1010  *	@end: the last sector to erase
1011  *
1012  *	Erases the sectors in the given inclusive range.
1013  */
1014 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1015 {
1016 	int ret = 0;
1017 
1018 	while (start <= end) {
1019 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1020 		    (ret = sf1_write(adapter, 4, 0, 1,
1021 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1022 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1023 			CH_ERR(adapter, "erase of flash sector %d failed, "
1024 			       "error %d\n", start, ret);
1025 			break;
1026 		}
1027 		start++;
1028 	}
1029 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1030 	return ret;
1031 }
1032 
1033 /**
1034  *	t4_flash_cfg_addr - return the address of the flash configuration file
1035  *	@adapter: the adapter
1036  *
1037  *	Return the address within the flash where the Firmware Configuration
1038  *	File is stored, or an error if the device FLASH is too small to contain
1039  *	a Firmware Configuration File.
1040  */
1041 int t4_flash_cfg_addr(struct adapter *adapter)
1042 {
1043 	/*
1044 	 * If the device FLASH isn't large enough to hold a Firmware
1045 	 * Configuration File, return an error.
1046 	 */
1047 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1048 		return -ENOSPC;
1049 
1050 	return FLASH_CFG_START;
1051 }
1052 
1053 /**
1054  *	t4_load_cfg - download config file
1055  *	@adap: the adapter
1056  *	@cfg_data: the cfg text file to write
1057  *	@size: text file size
1058  *
1059  *	Write the supplied config text file to the card's serial flash.
1060  */
1061 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1062 {
1063 	int ret, i, n, cfg_addr;
1064 	unsigned int addr;
1065 	unsigned int flash_cfg_start_sec;
1066 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1067 
1068 	cfg_addr = t4_flash_cfg_addr(adap);
1069 	if (cfg_addr < 0)
1070 		return cfg_addr;
1071 
1072 	addr = cfg_addr;
1073 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1074 
1075 	if (size > FLASH_CFG_MAX_SIZE) {
1076 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1077 		       FLASH_CFG_MAX_SIZE);
1078 		return -EFBIG;
1079 	}
1080 
1081 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1082 			 sf_sec_size);
1083 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1084 				     flash_cfg_start_sec + i - 1);
1085 	/*
1086 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1087 	 * with the on-adapter Firmware Configuration File.
1088 	 */
1089 	if (ret || size == 0)
1090 		goto out;
1091 
1092 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1093 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1094 		if ( (size - i) <  SF_PAGE_SIZE)
1095 			n = size - i;
1096 		else
1097 			n = SF_PAGE_SIZE;
1098 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1099 		if (ret)
1100 			goto out;
1101 
1102 		addr += SF_PAGE_SIZE;
1103 		cfg_data += SF_PAGE_SIZE;
1104 	}
1105 
1106 out:
1107 	if (ret)
1108 		CH_ERR(adap, "config file %s failed %d\n",
1109 		       (size == 0 ? "clear" : "download"), ret);
1110 	return ret;
1111 }
1112 
1113 
1114 /**
1115  *	t4_load_fw - download firmware
1116  *	@adap: the adapter
1117  *	@fw_data: the firmware image to write
1118  *	@size: image size
1119  *
1120  *	Write the supplied firmware image to the card's serial flash.
1121  */
1122 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1123 {
1124 	u32 csum;
1125 	int ret, addr;
1126 	unsigned int i;
1127 	u8 first_page[SF_PAGE_SIZE];
1128 	const u32 *p = (const u32 *)fw_data;
1129 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1130 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1131 
1132 	if (!size) {
1133 		CH_ERR(adap, "FW image has no data\n");
1134 		return -EINVAL;
1135 	}
1136 	if (size & 511) {
1137 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1138 		return -EINVAL;
1139 	}
1140 	if (ntohs(hdr->len512) * 512 != size) {
1141 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1142 		return -EINVAL;
1143 	}
1144 	if (size > FLASH_FW_MAX_SIZE) {
1145 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1146 		       FLASH_FW_MAX_SIZE);
1147 		return -EFBIG;
1148 	}
1149 	if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1150 	    (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1151 		CH_ERR(adap,
1152 		    "FW image (%d) is not suitable for this adapter (%d)\n",
1153 		    hdr->chip, chip_id(adap));
1154 		return -EINVAL;
1155 	}
1156 
1157 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1158 		csum += ntohl(p[i]);
1159 
1160 	if (csum != 0xffffffff) {
1161 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1162 		       csum);
1163 		return -EINVAL;
1164 	}
1165 
1166 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1167 	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1168 	    FLASH_FW_START_SEC + i - 1);
1169 	if (ret)
1170 		goto out;
1171 
1172 	/*
1173 	 * We write the correct version at the end so the driver can see a bad
1174 	 * version if the FW write fails.  Start by writing a copy of the
1175 	 * first page with a bad version.
1176 	 */
1177 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1178 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1179 	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1180 	if (ret)
1181 		goto out;
1182 
1183 	addr = FLASH_FW_START;
1184 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1185 		addr += SF_PAGE_SIZE;
1186 		fw_data += SF_PAGE_SIZE;
1187 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1188 		if (ret)
1189 			goto out;
1190 	}
1191 
1192 	ret = t4_write_flash(adap,
1193 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1194 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1195 out:
1196 	if (ret)
1197 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1198 	return ret;
1199 }
1200 
1201 /* BIOS boot headers */
1202 typedef struct pci_expansion_rom_header {
1203 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1204 	u8	reserved[22]; /* Reserved per processor Architecture data */
1205 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1206 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1207 
1208 /* Legacy PCI Expansion ROM Header */
1209 typedef struct legacy_pci_expansion_rom_header {
1210 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1211 	u8	size512; /* Current Image Size in units of 512 bytes */
1212 	u8	initentry_point[4];
1213 	u8	cksum; /* Checksum computed on the entire Image */
1214 	u8	reserved[16]; /* Reserved */
1215 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1216 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1217 
1218 /* EFI PCI Expansion ROM Header */
1219 typedef struct efi_pci_expansion_rom_header {
1220 	u8	signature[2]; // ROM signature. The value 0xaa55
1221 	u8	initialization_size[2]; /* Units 512. Includes this header */
1222 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1223 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1224 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1225 	u8	compression_type[2]; /* Compression type. */
1226 		/*
1227 		 * Compression type definition
1228 		 * 0x0: uncompressed
1229 		 * 0x1: Compressed
1230 		 * 0x2-0xFFFF: Reserved
1231 		 */
1232 	u8	reserved[8]; /* Reserved */
1233 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1234 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1235 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1236 
1237 /* PCI Data Structure Format */
1238 typedef struct pcir_data_structure { /* PCI Data Structure */
1239 	u8	signature[4]; /* Signature. The string "PCIR" */
1240 	u8	vendor_id[2]; /* Vendor Identification */
1241 	u8	device_id[2]; /* Device Identification */
1242 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1243 	u8	length[2]; /* PCIR Data Structure Length */
1244 	u8	revision; /* PCIR Data Structure Revision */
1245 	u8	class_code[3]; /* Class Code */
1246 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1247 	u8	code_revision[2]; /* Revision Level of Code/Data */
1248 	u8	code_type; /* Code Type. */
1249 		/*
1250 		 * PCI Expansion ROM Code Types
1251 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1252 		 * 0x01: Open Firmware standard for PCI. FCODE
1253 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1254 		 * 0x03: EFI Image. EFI
1255 		 * 0x04-0xFF: Reserved.
1256 		 */
1257 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1258 	u8	reserved[2]; /* Reserved */
1259 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1260 
1261 /* BOOT constants */
1262 enum {
1263 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1264 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1265 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1266 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1267 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1268 	VENDOR_ID = 0x1425, /* Vendor ID */
1269 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1270 };
1271 
1272 /*
1273  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1274  *	@adatper: the device ID to write.
1275  *	@boot_data: the boot image to modify.
1276  *
1277  *	Write the supplied device ID to the boot BIOS image.
1278  */
1279 static void modify_device_id(int device_id, u8 *boot_data)
1280 {
1281 	legacy_pci_exp_rom_header_t *header;
1282 	pcir_data_t *pcir_header;
1283 	u32 cur_header = 0;
1284 
1285 	/*
1286 	 * Loop through all chained images and change the device ID's
1287 	 */
1288 	while (1) {
1289 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1290 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1291 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1292 
1293 		/*
1294 		 * Only modify the Device ID if code type is Legacy or HP.
1295 		 * 0x00: Okay to modify
1296 		 * 0x01: FCODE. Do not be modify
1297 		 * 0x03: Okay to modify
1298 		 * 0x04-0xFF: Do not modify
1299 		 */
1300 		if (pcir_header->code_type == 0x00) {
1301 			u8 csum = 0;
1302 			int i;
1303 
1304 			/*
1305 			 * Modify Device ID to match current adatper
1306 			 */
1307 			*(u16*) pcir_header->device_id = device_id;
1308 
1309 			/*
1310 			 * Set checksum temporarily to 0.
1311 			 * We will recalculate it later.
1312 			 */
1313 			header->cksum = 0x0;
1314 
1315 			/*
1316 			 * Calculate and update checksum
1317 			 */
1318 			for (i = 0; i < (header->size512 * 512); i++)
1319 				csum += (u8)boot_data[cur_header + i];
1320 
1321 			/*
1322 			 * Invert summed value to create the checksum
1323 			 * Writing new checksum value directly to the boot data
1324 			 */
1325 			boot_data[cur_header + 7] = -csum;
1326 
1327 		} else if (pcir_header->code_type == 0x03) {
1328 
1329 			/*
1330 			 * Modify Device ID to match current adatper
1331 			 */
1332 			*(u16*) pcir_header->device_id = device_id;
1333 
1334 		}
1335 
1336 
1337 		/*
1338 		 * Check indicator element to identify if this is the last
1339 		 * image in the ROM.
1340 		 */
1341 		if (pcir_header->indicator & 0x80)
1342 			break;
1343 
1344 		/*
1345 		 * Move header pointer up to the next image in the ROM.
1346 		 */
1347 		cur_header += header->size512 * 512;
1348 	}
1349 }
1350 
1351 /*
1352  *	t4_load_boot - download boot flash
1353  *	@adapter: the adapter
1354  *	@boot_data: the boot image to write
1355  *	@boot_addr: offset in flash to write boot_data
1356  *	@size: image size
1357  *
1358  *	Write the supplied boot image to the card's serial flash.
1359  *	The boot image has the following sections: a 28-byte header and the
1360  *	boot image.
1361  */
1362 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1363 		 unsigned int boot_addr, unsigned int size)
1364 {
1365 	pci_exp_rom_header_t *header;
1366 	int pcir_offset ;
1367 	pcir_data_t *pcir_header;
1368 	int ret, addr;
1369 	uint16_t device_id;
1370 	unsigned int i;
1371 	unsigned int boot_sector = boot_addr * 1024;
1372 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1373 
1374 	/*
1375 	 * Make sure the boot image does not encroach on the firmware region
1376 	 */
1377 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1378 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1379 		return -EFBIG;
1380 	}
1381 
1382 	/*
1383 	 * Number of sectors spanned
1384 	 */
1385 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1386 			sf_sec_size);
1387 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1388 				     (boot_sector >> 16) + i - 1);
1389 
1390 	/*
1391 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1392 	 * with the on-adapter option ROM file
1393 	 */
1394 	if (ret || (size == 0))
1395 		goto out;
1396 
1397 	/* Get boot header */
1398 	header = (pci_exp_rom_header_t *)boot_data;
1399 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1400 	/* PCIR Data Structure */
1401 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1402 
1403 	/*
1404 	 * Perform some primitive sanity testing to avoid accidentally
1405 	 * writing garbage over the boot sectors.  We ought to check for
1406 	 * more but it's not worth it for now ...
1407 	 */
1408 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1409 		CH_ERR(adap, "boot image too small/large\n");
1410 		return -EFBIG;
1411 	}
1412 
1413 	/*
1414 	 * Check BOOT ROM header signature
1415 	 */
1416 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1417 		CH_ERR(adap, "Boot image missing signature\n");
1418 		return -EINVAL;
1419 	}
1420 
1421 	/*
1422 	 * Check PCI header signature
1423 	 */
1424 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1425 		CH_ERR(adap, "PCI header missing signature\n");
1426 		return -EINVAL;
1427 	}
1428 
1429 	/*
1430 	 * Check Vendor ID matches Chelsio ID
1431 	 */
1432 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1433 		CH_ERR(adap, "Vendor ID missing signature\n");
1434 		return -EINVAL;
1435 	}
1436 
1437 	/*
1438 	 * Retrieve adapter's device ID
1439 	 */
1440 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1441 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1442 	device_id = (device_id & 0xff) | 0x4000;
1443 
1444 	/*
1445 	 * Check PCIE Device ID
1446 	 */
1447 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1448 		/*
1449 		 * Change the device ID in the Boot BIOS image to match
1450 		 * the Device ID of the current adapter.
1451 		 */
1452 		modify_device_id(device_id, boot_data);
1453 	}
1454 
1455 	/*
1456 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1457 	 * we finish copying the rest of the boot image. This will ensure
1458 	 * that the BIOS boot header will only be written if the boot image
1459 	 * was written in full.
1460 	 */
1461 	addr = boot_sector;
1462 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1463 		addr += SF_PAGE_SIZE;
1464 		boot_data += SF_PAGE_SIZE;
1465 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1466 		if (ret)
1467 			goto out;
1468 	}
1469 
1470 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1471 
1472 out:
1473 	if (ret)
1474 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1475 	return ret;
1476 }
1477 
1478 /**
1479  *	t4_read_cimq_cfg - read CIM queue configuration
1480  *	@adap: the adapter
1481  *	@base: holds the queue base addresses in bytes
1482  *	@size: holds the queue sizes in bytes
1483  *	@thres: holds the queue full thresholds in bytes
1484  *
1485  *	Returns the current configuration of the CIM queues, starting with
1486  *	the IBQs, then the OBQs.
1487  */
1488 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1489 {
1490 	unsigned int i, v;
1491 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1492 
1493 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1494 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1495 			     V_QUENUMSELECT(i));
1496 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1497 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1498 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1499 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1500 	}
1501 	for (i = 0; i < cim_num_obq; i++) {
1502 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1503 			     V_QUENUMSELECT(i));
1504 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1505 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1506 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1507 	}
1508 }
1509 
1510 /**
1511  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1512  *	@adap: the adapter
1513  *	@qid: the queue index
1514  *	@data: where to store the queue contents
1515  *	@n: capacity of @data in 32-bit words
1516  *
1517  *	Reads the contents of the selected CIM queue starting at address 0 up
1518  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1519  *	error and the number of 32-bit words actually read on success.
1520  */
1521 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1522 {
1523 	int i, err;
1524 	unsigned int addr;
1525 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1526 
1527 	if (qid > 5 || (n & 3))
1528 		return -EINVAL;
1529 
1530 	addr = qid * nwords;
1531 	if (n > nwords)
1532 		n = nwords;
1533 
1534 	for (i = 0; i < n; i++, addr++) {
1535 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1536 			     F_IBQDBGEN);
1537 		/*
1538 		 * It might take 3-10ms before the IBQ debug read access is
1539 		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1540 		 */
1541 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1542 				      1000000, 1);
1543 		if (err)
1544 			return err;
1545 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1546 	}
1547 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1548 	return i;
1549 }
1550 
1551 /**
1552  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1553  *	@adap: the adapter
1554  *	@qid: the queue index
1555  *	@data: where to store the queue contents
1556  *	@n: capacity of @data in 32-bit words
1557  *
1558  *	Reads the contents of the selected CIM queue starting at address 0 up
1559  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1560  *	error and the number of 32-bit words actually read on success.
1561  */
1562 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1563 {
1564 	int i, err;
1565 	unsigned int addr, v, nwords;
1566 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1567 
1568 	if (qid >= cim_num_obq || (n & 3))
1569 		return -EINVAL;
1570 
1571 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1572 		     V_QUENUMSELECT(qid));
1573 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1574 
1575 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1576 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1577 	if (n > nwords)
1578 		n = nwords;
1579 
1580 	for (i = 0; i < n; i++, addr++) {
1581 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1582 			     F_OBQDBGEN);
1583 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1584 				      2, 1);
1585 		if (err)
1586 			return err;
1587 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1588 	}
1589 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1590 	return i;
1591 }
1592 
1593 enum {
1594 	CIM_QCTL_BASE     = 0,
1595 	CIM_CTL_BASE      = 0x2000,
1596 	CIM_PBT_ADDR_BASE = 0x2800,
1597 	CIM_PBT_LRF_BASE  = 0x3000,
1598 	CIM_PBT_DATA_BASE = 0x3800
1599 };
1600 
1601 /**
1602  *	t4_cim_read - read a block from CIM internal address space
1603  *	@adap: the adapter
1604  *	@addr: the start address within the CIM address space
1605  *	@n: number of words to read
1606  *	@valp: where to store the result
1607  *
1608  *	Reads a block of 4-byte words from the CIM intenal address space.
1609  */
1610 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1611 		unsigned int *valp)
1612 {
1613 	int ret = 0;
1614 
1615 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1616 		return -EBUSY;
1617 
1618 	for ( ; !ret && n--; addr += 4) {
1619 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1620 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1621 				      0, 5, 2);
1622 		if (!ret)
1623 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1624 	}
1625 	return ret;
1626 }
1627 
1628 /**
1629  *	t4_cim_write - write a block into CIM internal address space
1630  *	@adap: the adapter
1631  *	@addr: the start address within the CIM address space
1632  *	@n: number of words to write
1633  *	@valp: set of values to write
1634  *
1635  *	Writes a block of 4-byte words into the CIM intenal address space.
1636  */
1637 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1638 		 const unsigned int *valp)
1639 {
1640 	int ret = 0;
1641 
1642 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1643 		return -EBUSY;
1644 
1645 	for ( ; !ret && n--; addr += 4) {
1646 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1647 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1648 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1649 				      0, 5, 2);
1650 	}
1651 	return ret;
1652 }
1653 
1654 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1655 {
1656 	return t4_cim_write(adap, addr, 1, &val);
1657 }
1658 
1659 /**
1660  *	t4_cim_ctl_read - read a block from CIM control region
1661  *	@adap: the adapter
1662  *	@addr: the start address within the CIM control region
1663  *	@n: number of words to read
1664  *	@valp: where to store the result
1665  *
1666  *	Reads a block of 4-byte words from the CIM control region.
1667  */
1668 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1669 		    unsigned int *valp)
1670 {
1671 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1672 }
1673 
1674 /**
1675  *	t4_cim_read_la - read CIM LA capture buffer
1676  *	@adap: the adapter
1677  *	@la_buf: where to store the LA data
1678  *	@wrptr: the HW write pointer within the capture buffer
1679  *
1680  *	Reads the contents of the CIM LA buffer with the most recent entry at
1681  *	the end	of the returned data and with the entry at @wrptr first.
1682  *	We try to leave the LA in the running state we find it in.
1683  */
1684 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1685 {
1686 	int i, ret;
1687 	unsigned int cfg, val, idx;
1688 
1689 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1690 	if (ret)
1691 		return ret;
1692 
1693 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1694 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1695 		if (ret)
1696 			return ret;
1697 	}
1698 
1699 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1700 	if (ret)
1701 		goto restart;
1702 
1703 	idx = G_UPDBGLAWRPTR(val);
1704 	if (wrptr)
1705 		*wrptr = idx;
1706 
1707 	for (i = 0; i < adap->params.cim_la_size; i++) {
1708 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1709 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1710 		if (ret)
1711 			break;
1712 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1713 		if (ret)
1714 			break;
1715 		if (val & F_UPDBGLARDEN) {
1716 			ret = -ETIMEDOUT;
1717 			break;
1718 		}
1719 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1720 		if (ret)
1721 			break;
1722 		idx = (idx + 1) & M_UPDBGLARDPTR;
1723 	}
1724 restart:
1725 	if (cfg & F_UPDBGLAEN) {
1726 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1727 				      cfg & ~F_UPDBGLARDEN);
1728 		if (!ret)
1729 			ret = r;
1730 	}
1731 	return ret;
1732 }
1733 
1734 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1735 			unsigned int *pif_req_wrptr,
1736 			unsigned int *pif_rsp_wrptr)
1737 {
1738 	int i, j;
1739 	u32 cfg, val, req, rsp;
1740 
1741 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1742 	if (cfg & F_LADBGEN)
1743 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1744 
1745 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1746 	req = G_POLADBGWRPTR(val);
1747 	rsp = G_PILADBGWRPTR(val);
1748 	if (pif_req_wrptr)
1749 		*pif_req_wrptr = req;
1750 	if (pif_rsp_wrptr)
1751 		*pif_rsp_wrptr = rsp;
1752 
1753 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1754 		for (j = 0; j < 6; j++) {
1755 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1756 				     V_PILADBGRDPTR(rsp));
1757 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1758 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1759 			req++;
1760 			rsp++;
1761 		}
1762 		req = (req + 2) & M_POLADBGRDPTR;
1763 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1764 	}
1765 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1766 }
1767 
1768 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1769 {
1770 	u32 cfg;
1771 	int i, j, idx;
1772 
1773 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1774 	if (cfg & F_LADBGEN)
1775 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1776 
1777 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1778 		for (j = 0; j < 5; j++) {
1779 			idx = 8 * i + j;
1780 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1781 				     V_PILADBGRDPTR(idx));
1782 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1783 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1784 		}
1785 	}
1786 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1787 }
1788 
1789 /**
1790  *	t4_tp_read_la - read TP LA capture buffer
1791  *	@adap: the adapter
1792  *	@la_buf: where to store the LA data
1793  *	@wrptr: the HW write pointer within the capture buffer
1794  *
1795  *	Reads the contents of the TP LA buffer with the most recent entry at
1796  *	the end	of the returned data and with the entry at @wrptr first.
1797  *	We leave the LA in the running state we find it in.
1798  */
1799 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1800 {
1801 	bool last_incomplete;
1802 	unsigned int i, cfg, val, idx;
1803 
1804 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1805 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1806 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1807 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1808 
1809 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1810 	idx = G_DBGLAWPTR(val);
1811 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1812 	if (last_incomplete)
1813 		idx = (idx + 1) & M_DBGLARPTR;
1814 	if (wrptr)
1815 		*wrptr = idx;
1816 
1817 	val &= 0xffff;
1818 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1819 	val |= adap->params.tp.la_mask;
1820 
1821 	for (i = 0; i < TPLA_SIZE; i++) {
1822 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1823 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1824 		idx = (idx + 1) & M_DBGLARPTR;
1825 	}
1826 
1827 	/* Wipe out last entry if it isn't valid */
1828 	if (last_incomplete)
1829 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1830 
1831 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1832 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1833 			     cfg | adap->params.tp.la_mask);
1834 }
1835 
1836 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1837 {
1838 	unsigned int i, j;
1839 
1840 	for (i = 0; i < 8; i++) {
1841 		u32 *p = la_buf + i;
1842 
1843 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1844 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1845 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1846 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1847 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1848 	}
1849 }
1850 
1851 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1852 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1853 		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1854 
1855 /**
1856  *	t4_link_start - apply link configuration to MAC/PHY
1857  *	@phy: the PHY to setup
1858  *	@mac: the MAC to setup
1859  *	@lc: the requested link configuration
1860  *
1861  *	Set up a port's MAC and PHY according to a desired link configuration.
1862  *	- If the PHY can auto-negotiate first decide what to advertise, then
1863  *	  enable/disable auto-negotiation as desired, and reset.
1864  *	- If the PHY does not auto-negotiate just reset it.
1865  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1866  *	  otherwise do it later based on the outcome of auto-negotiation.
1867  */
1868 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1869 		  struct link_config *lc)
1870 {
1871 	struct fw_port_cmd c;
1872 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1873 
1874 	lc->link_ok = 0;
1875 	if (lc->requested_fc & PAUSE_RX)
1876 		fc |= FW_PORT_CAP_FC_RX;
1877 	if (lc->requested_fc & PAUSE_TX)
1878 		fc |= FW_PORT_CAP_FC_TX;
1879 
1880 	memset(&c, 0, sizeof(c));
1881 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1882 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1883 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1884 				  FW_LEN16(c));
1885 
1886 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1887 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1888 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1889 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1890 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1891 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1892 	} else
1893 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1894 
1895 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1896 }
1897 
1898 /**
1899  *	t4_restart_aneg - restart autonegotiation
1900  *	@adap: the adapter
1901  *	@mbox: mbox to use for the FW command
1902  *	@port: the port id
1903  *
1904  *	Restarts autonegotiation for the selected port.
1905  */
1906 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1907 {
1908 	struct fw_port_cmd c;
1909 
1910 	memset(&c, 0, sizeof(c));
1911 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1912 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1913 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1914 				  FW_LEN16(c));
1915 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1916 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1917 }
1918 
1919 struct intr_info {
1920 	unsigned int mask;       /* bits to check in interrupt status */
1921 	const char *msg;         /* message to print or NULL */
1922 	short stat_idx;          /* stat counter to increment or -1 */
1923 	unsigned short fatal;    /* whether the condition reported is fatal */
1924 };
1925 
1926 /**
1927  *	t4_handle_intr_status - table driven interrupt handler
1928  *	@adapter: the adapter that generated the interrupt
1929  *	@reg: the interrupt status register to process
1930  *	@acts: table of interrupt actions
1931  *
1932  *	A table driven interrupt handler that applies a set of masks to an
1933  *	interrupt status word and performs the corresponding actions if the
1934  *	interrupts described by the mask have occured.  The actions include
1935  *	optionally emitting a warning or alert message.  The table is terminated
1936  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1937  *	conditions.
1938  */
1939 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1940 				 const struct intr_info *acts)
1941 {
1942 	int fatal = 0;
1943 	unsigned int mask = 0;
1944 	unsigned int status = t4_read_reg(adapter, reg);
1945 
1946 	for ( ; acts->mask; ++acts) {
1947 		if (!(status & acts->mask))
1948 			continue;
1949 		if (acts->fatal) {
1950 			fatal++;
1951 			CH_ALERT(adapter, "%s (0x%x)\n",
1952 				 acts->msg, status & acts->mask);
1953 		} else if (acts->msg)
1954 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1955 					  acts->msg, status & acts->mask);
1956 		mask |= acts->mask;
1957 	}
1958 	status &= mask;
1959 	if (status)                           /* clear processed interrupts */
1960 		t4_write_reg(adapter, reg, status);
1961 	return fatal;
1962 }
1963 
1964 /*
1965  * Interrupt handler for the PCIE module.
1966  */
1967 static void pcie_intr_handler(struct adapter *adapter)
1968 {
1969 	static struct intr_info sysbus_intr_info[] = {
1970 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1971 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1972 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1973 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1974 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1975 		{ 0 }
1976 	};
1977 	static struct intr_info pcie_port_intr_info[] = {
1978 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1979 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1980 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1981 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1982 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1983 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1984 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1985 		{ F_RDPE, "Rx data parity error", -1, 1 },
1986 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1987 		{ 0 }
1988 	};
1989 	static struct intr_info pcie_intr_info[] = {
1990 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1991 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1992 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1993 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1994 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1995 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1996 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1997 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1998 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1999 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2000 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2001 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2002 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2003 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2004 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2005 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2006 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2007 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2008 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2009 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2010 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2011 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2012 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2013 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2014 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2015 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2016 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2017 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2018 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2019 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2020 		  0 },
2021 		{ 0 }
2022 	};
2023 
2024 	static struct intr_info t5_pcie_intr_info[] = {
2025 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2026 		  -1, 1 },
2027 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2028 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2029 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2030 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2031 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2032 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2033 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2034 		  -1, 1 },
2035 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2036 		  -1, 1 },
2037 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2038 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2039 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2040 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2041 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2042 		  -1, 1 },
2043 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2044 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2045 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2046 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2047 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2048 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2049 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2050 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2051 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2052 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2053 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2054 		  -1, 1 },
2055 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2056 		  -1, 1 },
2057 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2058 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2059 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2060 		{ F_READRSPERR, "Outbound read error", -1,
2061 		  0 },
2062 		{ 0 }
2063 	};
2064 
2065 	int fat;
2066 
2067 	fat = t4_handle_intr_status(adapter,
2068 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2069 				    sysbus_intr_info) +
2070 	      t4_handle_intr_status(adapter,
2071 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2072 				    pcie_port_intr_info) +
2073 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2074 				    is_t4(adapter) ?
2075 				    pcie_intr_info : t5_pcie_intr_info);
2076 	if (fat)
2077 		t4_fatal_err(adapter);
2078 }
2079 
2080 /*
2081  * TP interrupt handler.
2082  */
2083 static void tp_intr_handler(struct adapter *adapter)
2084 {
2085 	static struct intr_info tp_intr_info[] = {
2086 		{ 0x3fffffff, "TP parity error", -1, 1 },
2087 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2088 		{ 0 }
2089 	};
2090 
2091 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2092 		t4_fatal_err(adapter);
2093 }
2094 
2095 /*
2096  * SGE interrupt handler.
2097  */
2098 static void sge_intr_handler(struct adapter *adapter)
2099 {
2100 	u64 v;
2101 	u32 err;
2102 
2103 	static struct intr_info sge_intr_info[] = {
2104 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2105 		  "SGE received CPL exceeding IQE size", -1, 1 },
2106 		{ F_ERR_INVALID_CIDX_INC,
2107 		  "SGE GTS CIDX increment too large", -1, 0 },
2108 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2109 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2110 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2111 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2112 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2113 		  0 },
2114 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2115 		  0 },
2116 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2117 		  0 },
2118 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2119 		  0 },
2120 		{ F_ERR_ING_CTXT_PRIO,
2121 		  "SGE too many priority ingress contexts", -1, 0 },
2122 		{ F_ERR_EGR_CTXT_PRIO,
2123 		  "SGE too many priority egress contexts", -1, 0 },
2124 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2125 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2126 		{ 0 }
2127 	};
2128 
2129 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2130 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2131 	if (v) {
2132 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2133 			 (unsigned long long)v);
2134 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2135 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2136 	}
2137 
2138 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2139 
2140 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2141 	if (err & F_ERROR_QID_VALID) {
2142 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2143 		if (err & F_UNCAPTURED_ERROR)
2144 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2145 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2146 			     F_UNCAPTURED_ERROR);
2147 	}
2148 
2149 	if (v != 0)
2150 		t4_fatal_err(adapter);
2151 }
2152 
2153 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2154 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2155 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2156 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2157 
2158 /*
2159  * CIM interrupt handler.
2160  */
2161 static void cim_intr_handler(struct adapter *adapter)
2162 {
2163 	static struct intr_info cim_intr_info[] = {
2164 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2165 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2166 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2167 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2168 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2169 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2170 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2171 		{ 0 }
2172 	};
2173 	static struct intr_info cim_upintr_info[] = {
2174 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2175 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2176 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2177 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2178 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2179 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2180 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2181 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2182 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2183 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2184 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2185 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2186 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2187 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2188 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2189 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2190 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2191 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2192 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2193 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2194 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2195 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2196 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2197 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2198 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2199 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2200 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2201 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2202 		{ 0 }
2203 	};
2204 	int fat;
2205 
2206 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2207 		t4_report_fw_error(adapter);
2208 
2209 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2210 				    cim_intr_info) +
2211 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2212 				    cim_upintr_info);
2213 	if (fat)
2214 		t4_fatal_err(adapter);
2215 }
2216 
2217 /*
2218  * ULP RX interrupt handler.
2219  */
2220 static void ulprx_intr_handler(struct adapter *adapter)
2221 {
2222 	static struct intr_info ulprx_intr_info[] = {
2223 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2224 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2225 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2226 		{ 0 }
2227 	};
2228 
2229 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2230 		t4_fatal_err(adapter);
2231 }
2232 
2233 /*
2234  * ULP TX interrupt handler.
2235  */
2236 static void ulptx_intr_handler(struct adapter *adapter)
2237 {
2238 	static struct intr_info ulptx_intr_info[] = {
2239 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2240 		  0 },
2241 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2242 		  0 },
2243 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2244 		  0 },
2245 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2246 		  0 },
2247 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2248 		{ 0 }
2249 	};
2250 
2251 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2252 		t4_fatal_err(adapter);
2253 }
2254 
2255 /*
2256  * PM TX interrupt handler.
2257  */
2258 static void pmtx_intr_handler(struct adapter *adapter)
2259 {
2260 	static struct intr_info pmtx_intr_info[] = {
2261 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2262 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2263 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2264 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2265 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2266 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2267 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2268 		  1 },
2269 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2270 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2271 		{ 0 }
2272 	};
2273 
2274 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2275 		t4_fatal_err(adapter);
2276 }
2277 
2278 /*
2279  * PM RX interrupt handler.
2280  */
2281 static void pmrx_intr_handler(struct adapter *adapter)
2282 {
2283 	static struct intr_info pmrx_intr_info[] = {
2284 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2285 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2286 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2287 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2288 		  1 },
2289 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2290 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2291 		{ 0 }
2292 	};
2293 
2294 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2295 		t4_fatal_err(adapter);
2296 }
2297 
2298 /*
2299  * CPL switch interrupt handler.
2300  */
2301 static void cplsw_intr_handler(struct adapter *adapter)
2302 {
2303 	static struct intr_info cplsw_intr_info[] = {
2304 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2305 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2306 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2307 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2308 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2309 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2310 		{ 0 }
2311 	};
2312 
2313 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2314 		t4_fatal_err(adapter);
2315 }
2316 
2317 /*
2318  * LE interrupt handler.
2319  */
2320 static void le_intr_handler(struct adapter *adap)
2321 {
2322 	static struct intr_info le_intr_info[] = {
2323 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2324 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2325 		{ F_PARITYERR, "LE parity error", -1, 1 },
2326 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2327 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2328 		{ 0 }
2329 	};
2330 
2331 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2332 		t4_fatal_err(adap);
2333 }
2334 
2335 /*
2336  * MPS interrupt handler.
2337  */
2338 static void mps_intr_handler(struct adapter *adapter)
2339 {
2340 	static struct intr_info mps_rx_intr_info[] = {
2341 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2342 		{ 0 }
2343 	};
2344 	static struct intr_info mps_tx_intr_info[] = {
2345 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2346 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2347 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2348 		  -1, 1 },
2349 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2350 		  -1, 1 },
2351 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2352 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2353 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2354 		{ 0 }
2355 	};
2356 	static struct intr_info mps_trc_intr_info[] = {
2357 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2358 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2359 		  1 },
2360 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2361 		{ 0 }
2362 	};
2363 	static struct intr_info mps_stat_sram_intr_info[] = {
2364 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2365 		{ 0 }
2366 	};
2367 	static struct intr_info mps_stat_tx_intr_info[] = {
2368 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2369 		{ 0 }
2370 	};
2371 	static struct intr_info mps_stat_rx_intr_info[] = {
2372 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2373 		{ 0 }
2374 	};
2375 	static struct intr_info mps_cls_intr_info[] = {
2376 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2377 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2378 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2379 		{ 0 }
2380 	};
2381 
2382 	int fat;
2383 
2384 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2385 				    mps_rx_intr_info) +
2386 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2387 				    mps_tx_intr_info) +
2388 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2389 				    mps_trc_intr_info) +
2390 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2391 				    mps_stat_sram_intr_info) +
2392 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2393 				    mps_stat_tx_intr_info) +
2394 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2395 				    mps_stat_rx_intr_info) +
2396 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2397 				    mps_cls_intr_info);
2398 
2399 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2400 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2401 	if (fat)
2402 		t4_fatal_err(adapter);
2403 }
2404 
2405 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2406 
2407 /*
2408  * EDC/MC interrupt handler.
2409  */
2410 static void mem_intr_handler(struct adapter *adapter, int idx)
2411 {
2412 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2413 
2414 	unsigned int addr, cnt_addr, v;
2415 
2416 	if (idx <= MEM_EDC1) {
2417 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2418 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2419 	} else {
2420 		if (is_t4(adapter)) {
2421 			addr = A_MC_INT_CAUSE;
2422 			cnt_addr = A_MC_ECC_STATUS;
2423 		} else {
2424 			addr = A_MC_P_INT_CAUSE;
2425 			cnt_addr = A_MC_P_ECC_STATUS;
2426 		}
2427 	}
2428 
2429 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2430 	if (v & F_PERR_INT_CAUSE)
2431 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2432 	if (v & F_ECC_CE_INT_CAUSE) {
2433 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2434 
2435 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2436 		CH_WARN_RATELIMIT(adapter,
2437 				  "%u %s correctable ECC data error%s\n",
2438 				  cnt, name[idx], cnt > 1 ? "s" : "");
2439 	}
2440 	if (v & F_ECC_UE_INT_CAUSE)
2441 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2442 			 name[idx]);
2443 
2444 	t4_write_reg(adapter, addr, v);
2445 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2446 		t4_fatal_err(adapter);
2447 }
2448 
2449 /*
2450  * MA interrupt handler.
2451  */
2452 static void ma_intr_handler(struct adapter *adapter)
2453 {
2454 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2455 
2456 	if (status & F_MEM_PERR_INT_CAUSE)
2457 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2458 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2459 	if (status & F_MEM_WRAP_INT_CAUSE) {
2460 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2461 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2462 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2463 			 G_MEM_WRAP_ADDRESS(v) << 4);
2464 	}
2465 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2466 	t4_fatal_err(adapter);
2467 }
2468 
2469 /*
2470  * SMB interrupt handler.
2471  */
2472 static void smb_intr_handler(struct adapter *adap)
2473 {
2474 	static struct intr_info smb_intr_info[] = {
2475 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2476 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2477 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2478 		{ 0 }
2479 	};
2480 
2481 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2482 		t4_fatal_err(adap);
2483 }
2484 
2485 /*
2486  * NC-SI interrupt handler.
2487  */
2488 static void ncsi_intr_handler(struct adapter *adap)
2489 {
2490 	static struct intr_info ncsi_intr_info[] = {
2491 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2492 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2493 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2494 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2495 		{ 0 }
2496 	};
2497 
2498 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2499 		t4_fatal_err(adap);
2500 }
2501 
2502 /*
2503  * XGMAC interrupt handler.
2504  */
2505 static void xgmac_intr_handler(struct adapter *adap, int port)
2506 {
2507 	u32 v, int_cause_reg;
2508 
2509 	if (is_t4(adap))
2510 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2511 	else
2512 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2513 
2514 	v = t4_read_reg(adap, int_cause_reg);
2515 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2516 	if (!v)
2517 		return;
2518 
2519 	if (v & F_TXFIFO_PRTY_ERR)
2520 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2521 	if (v & F_RXFIFO_PRTY_ERR)
2522 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2523 	t4_write_reg(adap, int_cause_reg, v);
2524 	t4_fatal_err(adap);
2525 }
2526 
2527 /*
2528  * PL interrupt handler.
2529  */
2530 static void pl_intr_handler(struct adapter *adap)
2531 {
2532 	static struct intr_info pl_intr_info[] = {
2533 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2534 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2535 		{ 0 }
2536 	};
2537 
2538 	static struct intr_info t5_pl_intr_info[] = {
2539 		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2540 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2541 		{ 0 }
2542 	};
2543 
2544 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2545 	    is_t4(adap) ?  pl_intr_info : t5_pl_intr_info))
2546 		t4_fatal_err(adap);
2547 }
2548 
2549 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2550 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2551 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2552 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2553 
2554 /**
2555  *	t4_slow_intr_handler - control path interrupt handler
2556  *	@adapter: the adapter
2557  *
2558  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2559  *	The designation 'slow' is because it involves register reads, while
2560  *	data interrupts typically don't involve any MMIOs.
2561  */
2562 int t4_slow_intr_handler(struct adapter *adapter)
2563 {
2564 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2565 
2566 	if (!(cause & GLBL_INTR_MASK))
2567 		return 0;
2568 	if (cause & F_CIM)
2569 		cim_intr_handler(adapter);
2570 	if (cause & F_MPS)
2571 		mps_intr_handler(adapter);
2572 	if (cause & F_NCSI)
2573 		ncsi_intr_handler(adapter);
2574 	if (cause & F_PL)
2575 		pl_intr_handler(adapter);
2576 	if (cause & F_SMB)
2577 		smb_intr_handler(adapter);
2578 	if (cause & F_XGMAC0)
2579 		xgmac_intr_handler(adapter, 0);
2580 	if (cause & F_XGMAC1)
2581 		xgmac_intr_handler(adapter, 1);
2582 	if (cause & F_XGMAC_KR0)
2583 		xgmac_intr_handler(adapter, 2);
2584 	if (cause & F_XGMAC_KR1)
2585 		xgmac_intr_handler(adapter, 3);
2586 	if (cause & F_PCIE)
2587 		pcie_intr_handler(adapter);
2588 	if (cause & F_MC)
2589 		mem_intr_handler(adapter, MEM_MC);
2590 	if (cause & F_EDC0)
2591 		mem_intr_handler(adapter, MEM_EDC0);
2592 	if (cause & F_EDC1)
2593 		mem_intr_handler(adapter, MEM_EDC1);
2594 	if (cause & F_LE)
2595 		le_intr_handler(adapter);
2596 	if (cause & F_TP)
2597 		tp_intr_handler(adapter);
2598 	if (cause & F_MA)
2599 		ma_intr_handler(adapter);
2600 	if (cause & F_PM_TX)
2601 		pmtx_intr_handler(adapter);
2602 	if (cause & F_PM_RX)
2603 		pmrx_intr_handler(adapter);
2604 	if (cause & F_ULP_RX)
2605 		ulprx_intr_handler(adapter);
2606 	if (cause & F_CPL_SWITCH)
2607 		cplsw_intr_handler(adapter);
2608 	if (cause & F_SGE)
2609 		sge_intr_handler(adapter);
2610 	if (cause & F_ULP_TX)
2611 		ulptx_intr_handler(adapter);
2612 
2613 	/* Clear the interrupts just processed for which we are the master. */
2614 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2615 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2616 	return 1;
2617 }
2618 
2619 /**
2620  *	t4_intr_enable - enable interrupts
2621  *	@adapter: the adapter whose interrupts should be enabled
2622  *
2623  *	Enable PF-specific interrupts for the calling function and the top-level
2624  *	interrupt concentrator for global interrupts.  Interrupts are already
2625  *	enabled at each module,	here we just enable the roots of the interrupt
2626  *	hierarchies.
2627  *
2628  *	Note: this function should be called only when the driver manages
2629  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2630  *	function at a time should be doing this.
2631  */
2632 void t4_intr_enable(struct adapter *adapter)
2633 {
2634 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2635 
2636 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2637 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2638 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2639 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2640 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2641 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2642 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2643 		     F_EGRESS_SIZE_ERR);
2644 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2645 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2646 }
2647 
2648 /**
2649  *	t4_intr_disable - disable interrupts
2650  *	@adapter: the adapter whose interrupts should be disabled
2651  *
2652  *	Disable interrupts.  We only disable the top-level interrupt
2653  *	concentrators.  The caller must be a PCI function managing global
2654  *	interrupts.
2655  */
2656 void t4_intr_disable(struct adapter *adapter)
2657 {
2658 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2659 
2660 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2661 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2662 }
2663 
2664 /**
2665  *	t4_intr_clear - clear all interrupts
2666  *	@adapter: the adapter whose interrupts should be cleared
2667  *
2668  *	Clears all interrupts.  The caller must be a PCI function managing
2669  *	global interrupts.
2670  */
2671 void t4_intr_clear(struct adapter *adapter)
2672 {
2673 	static const unsigned int cause_reg[] = {
2674 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2675 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2676 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2677 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2678 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2679 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2680 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2681 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2682 		A_TP_INT_CAUSE,
2683 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2684 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2685 		A_MPS_RX_PERR_INT_CAUSE,
2686 		A_CPL_INTR_CAUSE,
2687 		MYPF_REG(A_PL_PF_INT_CAUSE),
2688 		A_PL_PL_INT_CAUSE,
2689 		A_LE_DB_INT_CAUSE,
2690 	};
2691 
2692 	unsigned int i;
2693 
2694 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2695 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2696 
2697 	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2698 				A_MC_P_INT_CAUSE, 0xffffffff);
2699 
2700 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2701 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2702 }
2703 
2704 /**
2705  *	hash_mac_addr - return the hash value of a MAC address
2706  *	@addr: the 48-bit Ethernet MAC address
2707  *
2708  *	Hashes a MAC address according to the hash function used by HW inexact
2709  *	(hash) address matching.
2710  */
2711 static int hash_mac_addr(const u8 *addr)
2712 {
2713 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2714 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2715 	a ^= b;
2716 	a ^= (a >> 12);
2717 	a ^= (a >> 6);
2718 	return a & 0x3f;
2719 }
2720 
2721 /**
2722  *	t4_config_rss_range - configure a portion of the RSS mapping table
2723  *	@adapter: the adapter
2724  *	@mbox: mbox to use for the FW command
2725  *	@viid: virtual interface whose RSS subtable is to be written
2726  *	@start: start entry in the table to write
2727  *	@n: how many table entries to write
2728  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2729  *	@nrspq: number of values in @rspq
2730  *
2731  *	Programs the selected part of the VI's RSS mapping table with the
2732  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2733  *	until the full table range is populated.
2734  *
2735  *	The caller must ensure the values in @rspq are in the range allowed for
2736  *	@viid.
2737  */
2738 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2739 			int start, int n, const u16 *rspq, unsigned int nrspq)
2740 {
2741 	int ret;
2742 	const u16 *rsp = rspq;
2743 	const u16 *rsp_end = rspq + nrspq;
2744 	struct fw_rss_ind_tbl_cmd cmd;
2745 
2746 	memset(&cmd, 0, sizeof(cmd));
2747 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2748 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2749 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2750 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2751 
2752 
2753 	/*
2754 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2755 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2756 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2757 	 * reserved.
2758 	 */
2759 	while (n > 0) {
2760 		int nq = min(n, 32);
2761 		int nq_packed = 0;
2762 		__be32 *qp = &cmd.iq0_to_iq2;
2763 
2764 		/*
2765 		 * Set up the firmware RSS command header to send the next
2766 		 * "nq" Ingress Queue IDs to the firmware.
2767 		 */
2768 		cmd.niqid = htons(nq);
2769 		cmd.startidx = htons(start);
2770 
2771 		/*
2772 		 * "nq" more done for the start of the next loop.
2773 		 */
2774 		start += nq;
2775 		n -= nq;
2776 
2777 		/*
2778 		 * While there are still Ingress Queue IDs to stuff into the
2779 		 * current firmware RSS command, retrieve them from the
2780 		 * Ingress Queue ID array and insert them into the command.
2781 		 */
2782 		while (nq > 0) {
2783 			/*
2784 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2785 			 * around the Ingress Queue ID array if necessary) and
2786 			 * insert them into the firmware RSS command at the
2787 			 * current 3-tuple position within the commad.
2788 			 */
2789 			u16 qbuf[3];
2790 			u16 *qbp = qbuf;
2791 			int nqbuf = min(3, nq);
2792 
2793 			nq -= nqbuf;
2794 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2795 			while (nqbuf && nq_packed < 32) {
2796 				nqbuf--;
2797 				nq_packed++;
2798 				*qbp++ = *rsp++;
2799 				if (rsp >= rsp_end)
2800 					rsp = rspq;
2801 			}
2802 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2803 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2804 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2805 		}
2806 
2807 		/*
2808 		 * Send this portion of the RRS table update to the firmware;
2809 		 * bail out on any errors.
2810 		 */
2811 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2812 		if (ret)
2813 			return ret;
2814 	}
2815 
2816 	return 0;
2817 }
2818 
2819 /**
2820  *	t4_config_glbl_rss - configure the global RSS mode
2821  *	@adapter: the adapter
2822  *	@mbox: mbox to use for the FW command
2823  *	@mode: global RSS mode
2824  *	@flags: mode-specific flags
2825  *
2826  *	Sets the global RSS mode.
2827  */
2828 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2829 		       unsigned int flags)
2830 {
2831 	struct fw_rss_glb_config_cmd c;
2832 
2833 	memset(&c, 0, sizeof(c));
2834 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2835 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2836 	c.retval_len16 = htonl(FW_LEN16(c));
2837 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2838 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2839 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2840 		c.u.basicvirtual.mode_pkd =
2841 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2842 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2843 	} else
2844 		return -EINVAL;
2845 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2846 }
2847 
2848 /**
2849  *	t4_config_vi_rss - configure per VI RSS settings
2850  *	@adapter: the adapter
2851  *	@mbox: mbox to use for the FW command
2852  *	@viid: the VI id
2853  *	@flags: RSS flags
2854  *	@defq: id of the default RSS queue for the VI.
2855  *
2856  *	Configures VI-specific RSS properties.
2857  */
2858 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2859 		     unsigned int flags, unsigned int defq)
2860 {
2861 	struct fw_rss_vi_config_cmd c;
2862 
2863 	memset(&c, 0, sizeof(c));
2864 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2865 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2866 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2867 	c.retval_len16 = htonl(FW_LEN16(c));
2868 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2869 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2870 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2871 }
2872 
2873 /* Read an RSS table row */
2874 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2875 {
2876 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2877 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2878 				   5, 0, val);
2879 }
2880 
2881 /**
2882  *	t4_read_rss - read the contents of the RSS mapping table
2883  *	@adapter: the adapter
2884  *	@map: holds the contents of the RSS mapping table
2885  *
2886  *	Reads the contents of the RSS hash->queue mapping table.
2887  */
2888 int t4_read_rss(struct adapter *adapter, u16 *map)
2889 {
2890 	u32 val;
2891 	int i, ret;
2892 
2893 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2894 		ret = rd_rss_row(adapter, i, &val);
2895 		if (ret)
2896 			return ret;
2897 		*map++ = G_LKPTBLQUEUE0(val);
2898 		*map++ = G_LKPTBLQUEUE1(val);
2899 	}
2900 	return 0;
2901 }
2902 
2903 /**
2904  *	t4_read_rss_key - read the global RSS key
2905  *	@adap: the adapter
2906  *	@key: 10-entry array holding the 320-bit RSS key
2907  *
2908  *	Reads the global 320-bit RSS key.
2909  */
2910 void t4_read_rss_key(struct adapter *adap, u32 *key)
2911 {
2912 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2913 			 A_TP_RSS_SECRET_KEY0);
2914 }
2915 
2916 /**
2917  *	t4_write_rss_key - program one of the RSS keys
2918  *	@adap: the adapter
2919  *	@key: 10-entry array holding the 320-bit RSS key
2920  *	@idx: which RSS key to write
2921  *
2922  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2923  *	0..15 the corresponding entry in the RSS key table is written,
2924  *	otherwise the global RSS key is written.
2925  */
2926 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2927 {
2928 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2929 			  A_TP_RSS_SECRET_KEY0);
2930 	if (idx >= 0 && idx < 16)
2931 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2932 			     V_KEYWRADDR(idx) | F_KEYWREN);
2933 }
2934 
2935 /**
2936  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2937  *	@adapter: the adapter
2938  *	@index: the entry in the PF RSS table to read
2939  *	@valp: where to store the returned value
2940  *
2941  *	Reads the PF RSS Configuration Table at the specified index and returns
2942  *	the value found there.
2943  */
2944 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2945 {
2946 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2947 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2948 }
2949 
2950 /**
2951  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2952  *	@adapter: the adapter
2953  *	@index: the entry in the VF RSS table to read
2954  *	@val: the value to store
2955  *
2956  *	Writes the PF RSS Configuration Table at the specified index with the
2957  *	specified value.
2958  */
2959 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2960 {
2961 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2962 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2963 }
2964 
2965 /**
2966  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2967  *	@adapter: the adapter
2968  *	@index: the entry in the VF RSS table to read
2969  *	@vfl: where to store the returned VFL
2970  *	@vfh: where to store the returned VFH
2971  *
2972  *	Reads the VF RSS Configuration Table at the specified index and returns
2973  *	the (VFL, VFH) values found there.
2974  */
2975 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2976 			   u32 *vfl, u32 *vfh)
2977 {
2978 	u32 vrt;
2979 
2980 	/*
2981 	 * Request that the index'th VF Table values be read into VFL/VFH.
2982 	 */
2983 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2984 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2985 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2986 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2987 
2988 	/*
2989 	 * Grab the VFL/VFH values ...
2990 	 */
2991 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2992 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2993 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2994 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2995 }
2996 
2997 /**
2998  *	t4_write_rss_vf_config - write VF RSS Configuration Table
2999  *
3000  *	@adapter: the adapter
3001  *	@index: the entry in the VF RSS table to write
3002  *	@vfl: the VFL to store
3003  *	@vfh: the VFH to store
3004  *
3005  *	Writes the VF RSS Configuration Table at the specified index with the
3006  *	specified (VFL, VFH) values.
3007  */
3008 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3009 			    u32 vfl, u32 vfh)
3010 {
3011 	u32 vrt;
3012 
3013 	/*
3014 	 * Load up VFL/VFH with the values to be written ...
3015 	 */
3016 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3017 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
3018 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3019 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
3020 
3021 	/*
3022 	 * Write the VFL/VFH into the VF Table at index'th location.
3023 	 */
3024 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3025 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3026 	vrt |= V_VFWRADDR(index) | F_VFWREN;
3027 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3028 }
3029 
3030 /**
3031  *	t4_read_rss_pf_map - read PF RSS Map
3032  *	@adapter: the adapter
3033  *
3034  *	Reads the PF RSS Map register and returns its value.
3035  */
3036 u32 t4_read_rss_pf_map(struct adapter *adapter)
3037 {
3038 	u32 pfmap;
3039 
3040 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3041 			 &pfmap, 1, A_TP_RSS_PF_MAP);
3042 	return pfmap;
3043 }
3044 
3045 /**
3046  *	t4_write_rss_pf_map - write PF RSS Map
3047  *	@adapter: the adapter
3048  *	@pfmap: PF RSS Map value
3049  *
3050  *	Writes the specified value to the PF RSS Map register.
3051  */
3052 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3053 {
3054 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3055 			  &pfmap, 1, A_TP_RSS_PF_MAP);
3056 }
3057 
3058 /**
3059  *	t4_read_rss_pf_mask - read PF RSS Mask
3060  *	@adapter: the adapter
3061  *
3062  *	Reads the PF RSS Mask register and returns its value.
3063  */
3064 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3065 {
3066 	u32 pfmask;
3067 
3068 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3069 			 &pfmask, 1, A_TP_RSS_PF_MSK);
3070 	return pfmask;
3071 }
3072 
3073 /**
3074  *	t4_write_rss_pf_mask - write PF RSS Mask
3075  *	@adapter: the adapter
3076  *	@pfmask: PF RSS Mask value
3077  *
3078  *	Writes the specified value to the PF RSS Mask register.
3079  */
3080 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3081 {
3082 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3083 			  &pfmask, 1, A_TP_RSS_PF_MSK);
3084 }
3085 
3086 /**
3087  *	t4_set_filter_mode - configure the optional components of filter tuples
3088  *	@adap: the adapter
3089  *	@mode_map: a bitmap selcting which optional filter components to enable
3090  *
3091  *	Sets the filter mode by selecting the optional components to enable
3092  *	in filter tuples.  Returns 0 on success and a negative error if the
3093  *	requested mode needs more bits than are available for optional
3094  *	components.
3095  */
3096 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3097 {
3098 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3099 
3100 	int i, nbits = 0;
3101 
3102 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3103 		if (mode_map & (1 << i))
3104 			nbits += width[i];
3105 	if (nbits > FILTER_OPT_LEN)
3106 		return -EINVAL;
3107 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3108 			  A_TP_VLAN_PRI_MAP);
3109 	return 0;
3110 }
3111 
3112 /**
3113  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3114  *	@adap: the adapter
3115  *	@v4: holds the TCP/IP counter values
3116  *	@v6: holds the TCP/IPv6 counter values
3117  *
3118  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3119  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3120  */
3121 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3122 			 struct tp_tcp_stats *v6)
3123 {
3124 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3125 
3126 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3127 #define STAT(x)     val[STAT_IDX(x)]
3128 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3129 
3130 	if (v4) {
3131 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3132 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3133 		v4->tcpOutRsts = STAT(OUT_RST);
3134 		v4->tcpInSegs  = STAT64(IN_SEG);
3135 		v4->tcpOutSegs = STAT64(OUT_SEG);
3136 		v4->tcpRetransSegs = STAT64(RXT_SEG);
3137 	}
3138 	if (v6) {
3139 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3140 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3141 		v6->tcpOutRsts = STAT(OUT_RST);
3142 		v6->tcpInSegs  = STAT64(IN_SEG);
3143 		v6->tcpOutSegs = STAT64(OUT_SEG);
3144 		v6->tcpRetransSegs = STAT64(RXT_SEG);
3145 	}
3146 #undef STAT64
3147 #undef STAT
3148 #undef STAT_IDX
3149 }
3150 
3151 /**
3152  *	t4_tp_get_err_stats - read TP's error MIB counters
3153  *	@adap: the adapter
3154  *	@st: holds the counter values
3155  *
3156  *	Returns the values of TP's error counters.
3157  */
3158 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3159 {
3160 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3161 			 12, A_TP_MIB_MAC_IN_ERR_0);
3162 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3163 			 8, A_TP_MIB_TNL_CNG_DROP_0);
3164 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3165 			 4, A_TP_MIB_TNL_DROP_0);
3166 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3167 			 4, A_TP_MIB_OFD_VLN_DROP_0);
3168 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3169 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3170 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3171 			 2, A_TP_MIB_OFD_ARP_DROP);
3172 }
3173 
3174 /**
3175  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3176  *	@adap: the adapter
3177  *	@st: holds the counter values
3178  *
3179  *	Returns the values of TP's proxy counters.
3180  */
3181 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3182 {
3183 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3184 			 4, A_TP_MIB_TNL_LPBK_0);
3185 }
3186 
3187 /**
3188  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3189  *	@adap: the adapter
3190  *	@st: holds the counter values
3191  *
3192  *	Returns the values of TP's CPL counters.
3193  */
3194 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3195 {
3196 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3197 			 8, A_TP_MIB_CPL_IN_REQ_0);
3198 }
3199 
3200 /**
3201  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3202  *	@adap: the adapter
3203  *	@st: holds the counter values
3204  *
3205  *	Returns the values of TP's RDMA counters.
3206  */
3207 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3208 {
3209 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3210 			 2, A_TP_MIB_RQE_DFR_MOD);
3211 }
3212 
3213 /**
3214  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3215  *	@adap: the adapter
3216  *	@idx: the port index
3217  *	@st: holds the counter values
3218  *
3219  *	Returns the values of TP's FCoE counters for the selected port.
3220  */
3221 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3222 		       struct tp_fcoe_stats *st)
3223 {
3224 	u32 val[2];
3225 
3226 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3227 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3228 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3229 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3230 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3231 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3232 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3233 }
3234 
3235 /**
3236  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3237  *	@adap: the adapter
3238  *	@st: holds the counter values
3239  *
3240  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3241  */
3242 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3243 {
3244 	u32 val[4];
3245 
3246 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3247 			 A_TP_MIB_USM_PKTS);
3248 	st->frames = val[0];
3249 	st->drops = val[1];
3250 	st->octets = ((u64)val[2] << 32) | val[3];
3251 }
3252 
3253 /**
3254  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3255  *	@adap: the adapter
3256  *	@mtus: where to store the MTU values
3257  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3258  *
3259  *	Reads the HW path MTU table.
3260  */
3261 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3262 {
3263 	u32 v;
3264 	int i;
3265 
3266 	for (i = 0; i < NMTUS; ++i) {
3267 		t4_write_reg(adap, A_TP_MTU_TABLE,
3268 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3269 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3270 		mtus[i] = G_MTUVALUE(v);
3271 		if (mtu_log)
3272 			mtu_log[i] = G_MTUWIDTH(v);
3273 	}
3274 }
3275 
3276 /**
3277  *	t4_read_cong_tbl - reads the congestion control table
3278  *	@adap: the adapter
3279  *	@incr: where to store the alpha values
3280  *
3281  *	Reads the additive increments programmed into the HW congestion
3282  *	control table.
3283  */
3284 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3285 {
3286 	unsigned int mtu, w;
3287 
3288 	for (mtu = 0; mtu < NMTUS; ++mtu)
3289 		for (w = 0; w < NCCTRL_WIN; ++w) {
3290 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3291 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3292 			incr[mtu][w] = (u16)t4_read_reg(adap,
3293 						A_TP_CCTRL_TABLE) & 0x1fff;
3294 		}
3295 }
3296 
3297 /**
3298  *	t4_read_pace_tbl - read the pace table
3299  *	@adap: the adapter
3300  *	@pace_vals: holds the returned values
3301  *
3302  *	Returns the values of TP's pace table in microseconds.
3303  */
3304 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3305 {
3306 	unsigned int i, v;
3307 
3308 	for (i = 0; i < NTX_SCHED; i++) {
3309 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3310 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3311 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3312 	}
3313 }
3314 
3315 /**
3316  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3317  *	@adap: the adapter
3318  *	@addr: the indirect TP register address
3319  *	@mask: specifies the field within the register to modify
3320  *	@val: new value for the field
3321  *
3322  *	Sets a field of an indirect TP register to the given value.
3323  */
3324 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3325 			    unsigned int mask, unsigned int val)
3326 {
3327 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3328 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3329 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3330 }
3331 
3332 /**
3333  *	init_cong_ctrl - initialize congestion control parameters
3334  *	@a: the alpha values for congestion control
3335  *	@b: the beta values for congestion control
3336  *
3337  *	Initialize the congestion control parameters.
3338  */
3339 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3340 {
3341 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3342 	a[9] = 2;
3343 	a[10] = 3;
3344 	a[11] = 4;
3345 	a[12] = 5;
3346 	a[13] = 6;
3347 	a[14] = 7;
3348 	a[15] = 8;
3349 	a[16] = 9;
3350 	a[17] = 10;
3351 	a[18] = 14;
3352 	a[19] = 17;
3353 	a[20] = 21;
3354 	a[21] = 25;
3355 	a[22] = 30;
3356 	a[23] = 35;
3357 	a[24] = 45;
3358 	a[25] = 60;
3359 	a[26] = 80;
3360 	a[27] = 100;
3361 	a[28] = 200;
3362 	a[29] = 300;
3363 	a[30] = 400;
3364 	a[31] = 500;
3365 
3366 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3367 	b[9] = b[10] = 1;
3368 	b[11] = b[12] = 2;
3369 	b[13] = b[14] = b[15] = b[16] = 3;
3370 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3371 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3372 	b[28] = b[29] = 6;
3373 	b[30] = b[31] = 7;
3374 }
3375 
3376 /* The minimum additive increment value for the congestion control table */
3377 #define CC_MIN_INCR 2U
3378 
3379 /**
3380  *	t4_load_mtus - write the MTU and congestion control HW tables
3381  *	@adap: the adapter
3382  *	@mtus: the values for the MTU table
3383  *	@alpha: the values for the congestion control alpha parameter
3384  *	@beta: the values for the congestion control beta parameter
3385  *
3386  *	Write the HW MTU table with the supplied MTUs and the high-speed
3387  *	congestion control table with the supplied alpha, beta, and MTUs.
3388  *	We write the two tables together because the additive increments
3389  *	depend on the MTUs.
3390  */
3391 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3392 		  const unsigned short *alpha, const unsigned short *beta)
3393 {
3394 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3395 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3396 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3397 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3398 	};
3399 
3400 	unsigned int i, w;
3401 
3402 	for (i = 0; i < NMTUS; ++i) {
3403 		unsigned int mtu = mtus[i];
3404 		unsigned int log2 = fls(mtu);
3405 
3406 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3407 			log2--;
3408 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3409 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3410 
3411 		for (w = 0; w < NCCTRL_WIN; ++w) {
3412 			unsigned int inc;
3413 
3414 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3415 				  CC_MIN_INCR);
3416 
3417 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3418 				     (w << 16) | (beta[w] << 13) | inc);
3419 		}
3420 	}
3421 }
3422 
3423 /**
3424  *	t4_set_pace_tbl - set the pace table
3425  *	@adap: the adapter
3426  *	@pace_vals: the pace values in microseconds
3427  *	@start: index of the first entry in the HW pace table to set
3428  *	@n: how many entries to set
3429  *
3430  *	Sets (a subset of the) HW pace table.
3431  */
3432 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3433 		     unsigned int start, unsigned int n)
3434 {
3435 	unsigned int vals[NTX_SCHED], i;
3436 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3437 
3438 	if (n > NTX_SCHED)
3439 	    return -ERANGE;
3440 
3441 	/* convert values from us to dack ticks, rounding to closest value */
3442 	for (i = 0; i < n; i++, pace_vals++) {
3443 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3444 		if (vals[i] > 0x7ff)
3445 			return -ERANGE;
3446 		if (*pace_vals && vals[i] == 0)
3447 			return -ERANGE;
3448 	}
3449 	for (i = 0; i < n; i++, start++)
3450 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3451 	return 0;
3452 }
3453 
3454 /**
3455  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3456  *	@adap: the adapter
3457  *	@kbps: target rate in Kbps
3458  *	@sched: the scheduler index
3459  *
3460  *	Configure a Tx HW scheduler for the target rate.
3461  */
3462 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3463 {
3464 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3465 	unsigned int clk = adap->params.vpd.cclk * 1000;
3466 	unsigned int selected_cpt = 0, selected_bpt = 0;
3467 
3468 	if (kbps > 0) {
3469 		kbps *= 125;     /* -> bytes */
3470 		for (cpt = 1; cpt <= 255; cpt++) {
3471 			tps = clk / cpt;
3472 			bpt = (kbps + tps / 2) / tps;
3473 			if (bpt > 0 && bpt <= 255) {
3474 				v = bpt * tps;
3475 				delta = v >= kbps ? v - kbps : kbps - v;
3476 				if (delta < mindelta) {
3477 					mindelta = delta;
3478 					selected_cpt = cpt;
3479 					selected_bpt = bpt;
3480 				}
3481 			} else if (selected_cpt)
3482 				break;
3483 		}
3484 		if (!selected_cpt)
3485 			return -EINVAL;
3486 	}
3487 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3488 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3489 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3490 	if (sched & 1)
3491 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3492 	else
3493 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3494 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3495 	return 0;
3496 }
3497 
3498 /**
3499  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3500  *	@adap: the adapter
3501  *	@sched: the scheduler index
3502  *	@ipg: the interpacket delay in tenths of nanoseconds
3503  *
3504  *	Set the interpacket delay for a HW packet rate scheduler.
3505  */
3506 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3507 {
3508 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3509 
3510 	/* convert ipg to nearest number of core clocks */
3511 	ipg *= core_ticks_per_usec(adap);
3512 	ipg = (ipg + 5000) / 10000;
3513 	if (ipg > M_TXTIMERSEPQ0)
3514 		return -EINVAL;
3515 
3516 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3517 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3518 	if (sched & 1)
3519 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3520 	else
3521 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3522 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3523 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3524 	return 0;
3525 }
3526 
3527 /**
3528  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3529  *	@adap: the adapter
3530  *	@sched: the scheduler index
3531  *	@kbps: the byte rate in Kbps
3532  *	@ipg: the interpacket delay in tenths of nanoseconds
3533  *
3534  *	Return the current configuration of a HW Tx scheduler.
3535  */
3536 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3537 		     unsigned int *ipg)
3538 {
3539 	unsigned int v, addr, bpt, cpt;
3540 
3541 	if (kbps) {
3542 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3543 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3544 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3545 		if (sched & 1)
3546 			v >>= 16;
3547 		bpt = (v >> 8) & 0xff;
3548 		cpt = v & 0xff;
3549 		if (!cpt)
3550 			*kbps = 0;        /* scheduler disabled */
3551 		else {
3552 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3553 			*kbps = (v * bpt) / 125;
3554 		}
3555 	}
3556 	if (ipg) {
3557 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3558 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3559 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3560 		if (sched & 1)
3561 			v >>= 16;
3562 		v &= 0xffff;
3563 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3564 	}
3565 }
3566 
3567 /*
3568  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3569  * clocks.  The formula is
3570  *
3571  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3572  *
3573  * which is equivalent to
3574  *
3575  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3576  */
3577 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3578 {
3579 	u64 v = bytes256 * adap->params.vpd.cclk;
3580 
3581 	return v * 62 + v / 2;
3582 }
3583 
3584 /**
3585  *	t4_get_chan_txrate - get the current per channel Tx rates
3586  *	@adap: the adapter
3587  *	@nic_rate: rates for NIC traffic
3588  *	@ofld_rate: rates for offloaded traffic
3589  *
3590  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3591  *	for each channel.
3592  */
3593 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3594 {
3595 	u32 v;
3596 
3597 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3598 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3599 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3600 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3601 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3602 
3603 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3604 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3605 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3606 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3607 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3608 }
3609 
3610 /**
3611  *	t4_set_trace_filter - configure one of the tracing filters
3612  *	@adap: the adapter
3613  *	@tp: the desired trace filter parameters
3614  *	@idx: which filter to configure
3615  *	@enable: whether to enable or disable the filter
3616  *
3617  *	Configures one of the tracing filters available in HW.  If @enable is
3618  *	%0 @tp is not examined and may be %NULL. The user is responsible to
3619  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3620  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3621  *	docs/readme.txt for a complete description of how to setup traceing on
3622  *	T4.
3623  */
3624 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3625 			int enable)
3626 {
3627 	int i, ofst = idx * 4;
3628 	u32 data_reg, mask_reg, cfg;
3629 	u32 multitrc = F_TRCMULTIFILTER;
3630 
3631 	if (!enable) {
3632 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3633 		return 0;
3634 	}
3635 
3636 	/*
3637 	 * TODO - After T4 data book is updated, specify the exact
3638 	 * section below.
3639 	 *
3640 	 * See T4 data book - MPS section for a complete description
3641 	 * of the below if..else handling of A_MPS_TRC_CFG register
3642 	 * value.
3643 	 */
3644 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3645 	if (cfg & F_TRCMULTIFILTER) {
3646 		/*
3647 		 * If multiple tracers are enabled, then maximum
3648 		 * capture size is 2.5KB (FIFO size of a single channel)
3649 		 * minus 2 flits for CPL_TRACE_PKT header.
3650 		 */
3651 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3652 			return -EINVAL;
3653 	}
3654 	else {
3655 		/*
3656 		 * If multiple tracers are disabled, to avoid deadlocks
3657 		 * maximum packet capture size of 9600 bytes is recommended.
3658 		 * Also in this mode, only trace0 can be enabled and running.
3659 		 */
3660 		multitrc = 0;
3661 		if (tp->snap_len > 9600 || idx)
3662 			return -EINVAL;
3663 	}
3664 
3665 	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3666 	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3667 		return -EINVAL;
3668 
3669 	/* stop the tracer we'll be changing */
3670 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3671 
3672 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3673 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3674 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3675 
3676 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3677 		t4_write_reg(adap, data_reg, tp->data[i]);
3678 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3679 	}
3680 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3681 		     V_TFCAPTUREMAX(tp->snap_len) |
3682 		     V_TFMINPKTSIZE(tp->min_len));
3683 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3684 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3685 		     is_t4(adap) ?
3686 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
3687 		     V_T5_TFPORT(tp->port) | F_T5_TFEN |
3688 		     V_T5_TFINVERTMATCH(tp->invert));
3689 
3690 	return 0;
3691 }
3692 
3693 /**
3694  *	t4_get_trace_filter - query one of the tracing filters
3695  *	@adap: the adapter
3696  *	@tp: the current trace filter parameters
3697  *	@idx: which trace filter to query
3698  *	@enabled: non-zero if the filter is enabled
3699  *
3700  *	Returns the current settings of one of the HW tracing filters.
3701  */
3702 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3703 			 int *enabled)
3704 {
3705 	u32 ctla, ctlb;
3706 	int i, ofst = idx * 4;
3707 	u32 data_reg, mask_reg;
3708 
3709 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3710 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3711 
3712 	if (is_t4(adap)) {
3713 		*enabled = !!(ctla & F_TFEN);
3714 		tp->port =  G_TFPORT(ctla);
3715 	} else {
3716 		*enabled = !!(ctla & F_T5_TFEN);
3717 		tp->port = G_T5_TFPORT(ctla);
3718 	}
3719 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3720 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3721 	tp->skip_ofst = G_TFOFFSET(ctla);
3722 	tp->skip_len = G_TFLENGTH(ctla);
3723 	tp->invert = !!(ctla & F_TFINVERTMATCH);
3724 
3725 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3726 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3727 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3728 
3729 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3730 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3731 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3732 	}
3733 }
3734 
3735 /**
3736  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3737  *	@adap: the adapter
3738  *	@cnt: where to store the count statistics
3739  *	@cycles: where to store the cycle statistics
3740  *
3741  *	Returns performance statistics from PMTX.
3742  */
3743 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3744 {
3745 	int i;
3746 	u32 data[2];
3747 
3748 	for (i = 0; i < PM_NSTATS; i++) {
3749 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3750 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3751 		if (is_t4(adap))
3752 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3753 		else {
3754 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3755 					 A_PM_TX_DBG_DATA, data, 2,
3756 					 A_PM_TX_DBG_STAT_MSB);
3757 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3758 		}
3759 	}
3760 }
3761 
3762 /**
3763  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3764  *	@adap: the adapter
3765  *	@cnt: where to store the count statistics
3766  *	@cycles: where to store the cycle statistics
3767  *
3768  *	Returns performance statistics from PMRX.
3769  */
3770 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3771 {
3772 	int i;
3773 	u32 data[2];
3774 
3775 	for (i = 0; i < PM_NSTATS; i++) {
3776 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3777 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3778 		if (is_t4(adap))
3779 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3780 		else {
3781 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3782 					 A_PM_RX_DBG_DATA, data, 2,
3783 					 A_PM_RX_DBG_STAT_MSB);
3784 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3785 		}
3786 	}
3787 }
3788 
3789 /**
3790  *	get_mps_bg_map - return the buffer groups associated with a port
3791  *	@adap: the adapter
3792  *	@idx: the port index
3793  *
3794  *	Returns a bitmap indicating which MPS buffer groups are associated
3795  *	with the given port.  Bit i is set if buffer group i is used by the
3796  *	port.
3797  */
3798 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3799 {
3800 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3801 
3802 	if (n == 0)
3803 		return idx == 0 ? 0xf : 0;
3804 	if (n == 1)
3805 		return idx < 2 ? (3 << (2 * idx)) : 0;
3806 	return 1 << idx;
3807 }
3808 
3809 /**
3810  *      t4_get_port_stats_offset - collect port stats relative to a previous
3811  *                                 snapshot
3812  *      @adap: The adapter
3813  *      @idx: The port
3814  *      @stats: Current stats to fill
3815  *      @offset: Previous stats snapshot
3816  */
3817 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3818 		struct port_stats *stats,
3819 		struct port_stats *offset)
3820 {
3821 	u64 *s, *o;
3822 	int i;
3823 
3824 	t4_get_port_stats(adap, idx, stats);
3825 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3826 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3827 			i++, s++, o++)
3828 		*s -= *o;
3829 }
3830 
3831 /**
3832  *	t4_get_port_stats - collect port statistics
3833  *	@adap: the adapter
3834  *	@idx: the port index
3835  *	@p: the stats structure to fill
3836  *
3837  *	Collect statistics related to the given port from HW.
3838  */
3839 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3840 {
3841 	u32 bgmap = get_mps_bg_map(adap, idx);
3842 
3843 #define GET_STAT(name) \
3844 	t4_read_reg64(adap, \
3845 	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3846 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3847 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3848 
3849 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3850 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3851 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3852 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3853 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3854 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3855 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3856 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3857 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3858 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3859 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3860 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3861 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3862 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3863 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3864 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3865 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3866 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3867 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3868 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3869 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3870 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3871 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3872 
3873 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3874 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3875 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3876 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3877 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3878 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3879 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3880 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3881 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3882 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3883 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3884 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3885 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3886 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3887 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3888 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3889 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3890 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3891 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3892 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3893 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3894 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3895 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3896 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3897 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3898 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3899 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3900 
3901 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3902 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3903 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3904 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3905 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3906 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3907 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3908 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3909 
3910 #undef GET_STAT
3911 #undef GET_STAT_COM
3912 }
3913 
3914 /**
3915  *	t4_clr_port_stats - clear port statistics
3916  *	@adap: the adapter
3917  *	@idx: the port index
3918  *
3919  *	Clear HW statistics for the given port.
3920  */
3921 void t4_clr_port_stats(struct adapter *adap, int idx)
3922 {
3923 	unsigned int i;
3924 	u32 bgmap = get_mps_bg_map(adap, idx);
3925 	u32 port_base_addr;
3926 
3927 	if (is_t4(adap))
3928 		port_base_addr = PORT_BASE(idx);
3929 	else
3930 		port_base_addr = T5_PORT_BASE(idx);
3931 
3932 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3933 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3934 		t4_write_reg(adap, port_base_addr + i, 0);
3935 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3936 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3937 		t4_write_reg(adap, port_base_addr + i, 0);
3938 	for (i = 0; i < 4; i++)
3939 		if (bgmap & (1 << i)) {
3940 			t4_write_reg(adap,
3941 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3942 			t4_write_reg(adap,
3943 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3944 		}
3945 }
3946 
3947 /**
3948  *	t4_get_lb_stats - collect loopback port statistics
3949  *	@adap: the adapter
3950  *	@idx: the loopback port index
3951  *	@p: the stats structure to fill
3952  *
3953  *	Return HW statistics for the given loopback port.
3954  */
3955 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3956 {
3957 	u32 bgmap = get_mps_bg_map(adap, idx);
3958 
3959 #define GET_STAT(name) \
3960 	t4_read_reg64(adap, \
3961 	(is_t4(adap) ? \
3962 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
3963 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
3964 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3965 
3966 	p->octets           = GET_STAT(BYTES);
3967 	p->frames           = GET_STAT(FRAMES);
3968 	p->bcast_frames     = GET_STAT(BCAST);
3969 	p->mcast_frames     = GET_STAT(MCAST);
3970 	p->ucast_frames     = GET_STAT(UCAST);
3971 	p->error_frames     = GET_STAT(ERROR);
3972 
3973 	p->frames_64        = GET_STAT(64B);
3974 	p->frames_65_127    = GET_STAT(65B_127B);
3975 	p->frames_128_255   = GET_STAT(128B_255B);
3976 	p->frames_256_511   = GET_STAT(256B_511B);
3977 	p->frames_512_1023  = GET_STAT(512B_1023B);
3978 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3979 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3980 	p->drop             = GET_STAT(DROP_FRAMES);
3981 
3982 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3983 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3984 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3985 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3986 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3987 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3988 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3989 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3990 
3991 #undef GET_STAT
3992 #undef GET_STAT_COM
3993 }
3994 
3995 /**
3996  *	t4_wol_magic_enable - enable/disable magic packet WoL
3997  *	@adap: the adapter
3998  *	@port: the physical port index
3999  *	@addr: MAC address expected in magic packets, %NULL to disable
4000  *
4001  *	Enables/disables magic packet wake-on-LAN for the selected port.
4002  */
4003 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4004 			 const u8 *addr)
4005 {
4006 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4007 
4008 	if (is_t4(adap)) {
4009 		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4010 		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4011 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4012 	} else {
4013 		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4014 		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4015 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4016 	}
4017 
4018 	if (addr) {
4019 		t4_write_reg(adap, mag_id_reg_l,
4020 			     (addr[2] << 24) | (addr[3] << 16) |
4021 			     (addr[4] << 8) | addr[5]);
4022 		t4_write_reg(adap, mag_id_reg_h,
4023 			     (addr[0] << 8) | addr[1]);
4024 	}
4025 	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4026 			 V_MAGICEN(addr != NULL));
4027 }
4028 
4029 /**
4030  *	t4_wol_pat_enable - enable/disable pattern-based WoL
4031  *	@adap: the adapter
4032  *	@port: the physical port index
4033  *	@map: bitmap of which HW pattern filters to set
4034  *	@mask0: byte mask for bytes 0-63 of a packet
4035  *	@mask1: byte mask for bytes 64-127 of a packet
4036  *	@crc: Ethernet CRC for selected bytes
4037  *	@enable: enable/disable switch
4038  *
4039  *	Sets the pattern filters indicated in @map to mask out the bytes
4040  *	specified in @mask0/@mask1 in received packets and compare the CRC of
4041  *	the resulting packet against @crc.  If @enable is %true pattern-based
4042  *	WoL is enabled, otherwise disabled.
4043  */
4044 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4045 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4046 {
4047 	int i;
4048 	u32 port_cfg_reg;
4049 
4050 	if (is_t4(adap))
4051 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4052 	else
4053 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4054 
4055 	if (!enable) {
4056 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4057 		return 0;
4058 	}
4059 	if (map > 0xff)
4060 		return -EINVAL;
4061 
4062 #define EPIO_REG(name) \
4063 	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4064 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4065 
4066 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4067 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4068 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4069 
4070 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4071 		if (!(map & 1))
4072 			continue;
4073 
4074 		/* write byte masks */
4075 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4076 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4077 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4078 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4079 			return -ETIMEDOUT;
4080 
4081 		/* write CRC */
4082 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4083 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4084 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4085 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4086 			return -ETIMEDOUT;
4087 	}
4088 #undef EPIO_REG
4089 
4090 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4091 	return 0;
4092 }
4093 
4094 /**
4095  *	t4_mk_filtdelwr - create a delete filter WR
4096  *	@ftid: the filter ID
4097  *	@wr: the filter work request to populate
4098  *	@qid: ingress queue to receive the delete notification
4099  *
4100  *	Creates a filter work request to delete the supplied filter.  If @qid is
4101  *	negative the delete notification is suppressed.
4102  */
4103 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4104 {
4105 	memset(wr, 0, sizeof(*wr));
4106 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4107 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4108 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4109 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4110 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4111 	if (qid >= 0)
4112 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4113 }
4114 
4115 #define INIT_CMD(var, cmd, rd_wr) do { \
4116 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4117 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4118 	(var).retval_len16 = htonl(FW_LEN16(var)); \
4119 } while (0)
4120 
4121 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4122 {
4123 	struct fw_ldst_cmd c;
4124 
4125 	memset(&c, 0, sizeof(c));
4126 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4127 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4128 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4129 	c.u.addrval.addr = htonl(addr);
4130 	c.u.addrval.val = htonl(val);
4131 
4132 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4133 }
4134 
4135 /**
4136  *	t4_i2c_rd - read a byte from an i2c addressable device
4137  *	@adap: the adapter
4138  *	@mbox: mailbox to use for the FW command
4139  *	@port_id: the port id
4140  *	@dev_addr: the i2c device address
4141  *	@offset: the byte offset to read from
4142  *	@valp: where to store the value
4143  */
4144 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
4145 	       u8 dev_addr, u8 offset, u8 *valp)
4146 {
4147 	int ret;
4148 	struct fw_ldst_cmd c;
4149 
4150 	memset(&c, 0, sizeof(c));
4151 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4152 		F_FW_CMD_READ |
4153 		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
4154 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4155 	c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
4156 	c.u.i2c_deprecated.base = dev_addr;
4157 	c.u.i2c_deprecated.boffset = offset;
4158 
4159 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4160 	if (ret == 0)
4161 		*valp = c.u.i2c_deprecated.data;
4162 	return ret;
4163 }
4164 
4165 /**
4166  *	t4_mdio_rd - read a PHY register through MDIO
4167  *	@adap: the adapter
4168  *	@mbox: mailbox to use for the FW command
4169  *	@phy_addr: the PHY address
4170  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4171  *	@reg: the register to read
4172  *	@valp: where to store the value
4173  *
4174  *	Issues a FW command through the given mailbox to read a PHY register.
4175  */
4176 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4177 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4178 {
4179 	int ret;
4180 	struct fw_ldst_cmd c;
4181 
4182 	memset(&c, 0, sizeof(c));
4183 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4184 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4185 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4186 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4187 				   V_FW_LDST_CMD_MMD(mmd));
4188 	c.u.mdio.raddr = htons(reg);
4189 
4190 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4191 	if (ret == 0)
4192 		*valp = ntohs(c.u.mdio.rval);
4193 	return ret;
4194 }
4195 
4196 /**
4197  *	t4_mdio_wr - write a PHY register through MDIO
4198  *	@adap: the adapter
4199  *	@mbox: mailbox to use for the FW command
4200  *	@phy_addr: the PHY address
4201  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4202  *	@reg: the register to write
4203  *	@valp: value to write
4204  *
4205  *	Issues a FW command through the given mailbox to write a PHY register.
4206  */
4207 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4208 	       unsigned int mmd, unsigned int reg, unsigned int val)
4209 {
4210 	struct fw_ldst_cmd c;
4211 
4212 	memset(&c, 0, sizeof(c));
4213 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4214 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4215 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4216 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4217 				   V_FW_LDST_CMD_MMD(mmd));
4218 	c.u.mdio.raddr = htons(reg);
4219 	c.u.mdio.rval = htons(val);
4220 
4221 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4222 }
4223 
4224 /**
4225  *	t4_sge_ctxt_flush - flush the SGE context cache
4226  *	@adap: the adapter
4227  *	@mbox: mailbox to use for the FW command
4228  *
4229  *	Issues a FW command through the given mailbox to flush the
4230  *	SGE context cache.
4231  */
4232 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4233 {
4234 	int ret;
4235 	struct fw_ldst_cmd c;
4236 
4237 	memset(&c, 0, sizeof(c));
4238 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4239 			F_FW_CMD_READ |
4240 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4241 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4242 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4243 
4244 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4245 	return ret;
4246 }
4247 
4248 /**
4249  *	t4_sge_ctxt_rd - read an SGE context through FW
4250  *	@adap: the adapter
4251  *	@mbox: mailbox to use for the FW command
4252  *	@cid: the context id
4253  *	@ctype: the context type
4254  *	@data: where to store the context data
4255  *
4256  *	Issues a FW command through the given mailbox to read an SGE context.
4257  */
4258 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4259 		   enum ctxt_type ctype, u32 *data)
4260 {
4261 	int ret;
4262 	struct fw_ldst_cmd c;
4263 
4264 	if (ctype == CTXT_EGRESS)
4265 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4266 	else if (ctype == CTXT_INGRESS)
4267 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4268 	else if (ctype == CTXT_FLM)
4269 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4270 	else
4271 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4272 
4273 	memset(&c, 0, sizeof(c));
4274 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4275 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4276 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4277 	c.u.idctxt.physid = htonl(cid);
4278 
4279 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4280 	if (ret == 0) {
4281 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4282 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4283 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4284 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4285 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4286 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4287 	}
4288 	return ret;
4289 }
4290 
4291 /**
4292  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4293  *	@adap: the adapter
4294  *	@cid: the context id
4295  *	@ctype: the context type
4296  *	@data: where to store the context data
4297  *
4298  *	Reads an SGE context directly, bypassing FW.  This is only for
4299  *	debugging when FW is unavailable.
4300  */
4301 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4302 		      u32 *data)
4303 {
4304 	int i, ret;
4305 
4306 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4307 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4308 	if (!ret)
4309 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4310 			*data++ = t4_read_reg(adap, i);
4311 	return ret;
4312 }
4313 
4314 /**
4315  *	t4_fw_hello - establish communication with FW
4316  *	@adap: the adapter
4317  *	@mbox: mailbox to use for the FW command
4318  *	@evt_mbox: mailbox to receive async FW events
4319  *	@master: specifies the caller's willingness to be the device master
4320  *	@state: returns the current device state (if non-NULL)
4321  *
4322  *	Issues a command to establish communication with FW.  Returns either
4323  *	an error (negative integer) or the mailbox of the Master PF.
4324  */
4325 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4326 		enum dev_master master, enum dev_state *state)
4327 {
4328 	int ret;
4329 	struct fw_hello_cmd c;
4330 	u32 v;
4331 	unsigned int master_mbox;
4332 	int retries = FW_CMD_HELLO_RETRIES;
4333 
4334 retry:
4335 	memset(&c, 0, sizeof(c));
4336 	INIT_CMD(c, HELLO, WRITE);
4337 	c.err_to_clearinit = htonl(
4338 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4339 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4340 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4341 			M_FW_HELLO_CMD_MBMASTER) |
4342 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4343 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4344 		F_FW_HELLO_CMD_CLEARINIT);
4345 
4346 	/*
4347 	 * Issue the HELLO command to the firmware.  If it's not successful
4348 	 * but indicates that we got a "busy" or "timeout" condition, retry
4349 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4350 	 * retry limit, check to see if the firmware left us any error
4351 	 * information and report that if so ...
4352 	 */
4353 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4354 	if (ret != FW_SUCCESS) {
4355 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4356 			goto retry;
4357 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4358 			t4_report_fw_error(adap);
4359 		return ret;
4360 	}
4361 
4362 	v = ntohl(c.err_to_clearinit);
4363 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4364 	if (state) {
4365 		if (v & F_FW_HELLO_CMD_ERR)
4366 			*state = DEV_STATE_ERR;
4367 		else if (v & F_FW_HELLO_CMD_INIT)
4368 			*state = DEV_STATE_INIT;
4369 		else
4370 			*state = DEV_STATE_UNINIT;
4371 	}
4372 
4373 	/*
4374 	 * If we're not the Master PF then we need to wait around for the
4375 	 * Master PF Driver to finish setting up the adapter.
4376 	 *
4377 	 * Note that we also do this wait if we're a non-Master-capable PF and
4378 	 * there is no current Master PF; a Master PF may show up momentarily
4379 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4380 	 * OS loads lots of different drivers rapidly at the same time).  In
4381 	 * this case, the Master PF returned by the firmware will be
4382 	 * M_PCIE_FW_MASTER so the test below will work ...
4383 	 */
4384 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4385 	    master_mbox != mbox) {
4386 		int waiting = FW_CMD_HELLO_TIMEOUT;
4387 
4388 		/*
4389 		 * Wait for the firmware to either indicate an error or
4390 		 * initialized state.  If we see either of these we bail out
4391 		 * and report the issue to the caller.  If we exhaust the
4392 		 * "hello timeout" and we haven't exhausted our retries, try
4393 		 * again.  Otherwise bail with a timeout error.
4394 		 */
4395 		for (;;) {
4396 			u32 pcie_fw;
4397 
4398 			msleep(50);
4399 			waiting -= 50;
4400 
4401 			/*
4402 			 * If neither Error nor Initialialized are indicated
4403 			 * by the firmware keep waiting till we exhaust our
4404 			 * timeout ... and then retry if we haven't exhausted
4405 			 * our retries ...
4406 			 */
4407 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4408 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4409 				if (waiting <= 0) {
4410 					if (retries-- > 0)
4411 						goto retry;
4412 
4413 					return -ETIMEDOUT;
4414 				}
4415 				continue;
4416 			}
4417 
4418 			/*
4419 			 * We either have an Error or Initialized condition
4420 			 * report errors preferentially.
4421 			 */
4422 			if (state) {
4423 				if (pcie_fw & F_PCIE_FW_ERR)
4424 					*state = DEV_STATE_ERR;
4425 				else if (pcie_fw & F_PCIE_FW_INIT)
4426 					*state = DEV_STATE_INIT;
4427 			}
4428 
4429 			/*
4430 			 * If we arrived before a Master PF was selected and
4431 			 * there's not a valid Master PF, grab its identity
4432 			 * for our caller.
4433 			 */
4434 			if (master_mbox == M_PCIE_FW_MASTER &&
4435 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4436 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4437 			break;
4438 		}
4439 	}
4440 
4441 	return master_mbox;
4442 }
4443 
4444 /**
4445  *	t4_fw_bye - end communication with FW
4446  *	@adap: the adapter
4447  *	@mbox: mailbox to use for the FW command
4448  *
4449  *	Issues a command to terminate communication with FW.
4450  */
4451 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4452 {
4453 	struct fw_bye_cmd c;
4454 
4455 	memset(&c, 0, sizeof(c));
4456 	INIT_CMD(c, BYE, WRITE);
4457 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4458 }
4459 
4460 /**
4461  *	t4_fw_reset - issue a reset to FW
4462  *	@adap: the adapter
4463  *	@mbox: mailbox to use for the FW command
4464  *	@reset: specifies the type of reset to perform
4465  *
4466  *	Issues a reset command of the specified type to FW.
4467  */
4468 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4469 {
4470 	struct fw_reset_cmd c;
4471 
4472 	memset(&c, 0, sizeof(c));
4473 	INIT_CMD(c, RESET, WRITE);
4474 	c.val = htonl(reset);
4475 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4476 }
4477 
4478 /**
4479  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4480  *	@adap: the adapter
4481  *	@mbox: mailbox to use for the FW RESET command (if desired)
4482  *	@force: force uP into RESET even if FW RESET command fails
4483  *
4484  *	Issues a RESET command to firmware (if desired) with a HALT indication
4485  *	and then puts the microprocessor into RESET state.  The RESET command
4486  *	will only be issued if a legitimate mailbox is provided (mbox <=
4487  *	M_PCIE_FW_MASTER).
4488  *
4489  *	This is generally used in order for the host to safely manipulate the
4490  *	adapter without fear of conflicting with whatever the firmware might
4491  *	be doing.  The only way out of this state is to RESTART the firmware
4492  *	...
4493  */
4494 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4495 {
4496 	int ret = 0;
4497 
4498 	/*
4499 	 * If a legitimate mailbox is provided, issue a RESET command
4500 	 * with a HALT indication.
4501 	 */
4502 	if (mbox <= M_PCIE_FW_MASTER) {
4503 		struct fw_reset_cmd c;
4504 
4505 		memset(&c, 0, sizeof(c));
4506 		INIT_CMD(c, RESET, WRITE);
4507 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4508 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4509 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4510 	}
4511 
4512 	/*
4513 	 * Normally we won't complete the operation if the firmware RESET
4514 	 * command fails but if our caller insists we'll go ahead and put the
4515 	 * uP into RESET.  This can be useful if the firmware is hung or even
4516 	 * missing ...  We'll have to take the risk of putting the uP into
4517 	 * RESET without the cooperation of firmware in that case.
4518 	 *
4519 	 * We also force the firmware's HALT flag to be on in case we bypassed
4520 	 * the firmware RESET command above or we're dealing with old firmware
4521 	 * which doesn't have the HALT capability.  This will serve as a flag
4522 	 * for the incoming firmware to know that it's coming out of a HALT
4523 	 * rather than a RESET ... if it's new enough to understand that ...
4524 	 */
4525 	if (ret == 0 || force) {
4526 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4527 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4528 	}
4529 
4530 	/*
4531 	 * And we always return the result of the firmware RESET command
4532 	 * even when we force the uP into RESET ...
4533 	 */
4534 	return ret;
4535 }
4536 
4537 /**
4538  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4539  *	@adap: the adapter
4540  *	@reset: if we want to do a RESET to restart things
4541  *
4542  *	Restart firmware previously halted by t4_fw_halt().  On successful
4543  *	return the previous PF Master remains as the new PF Master and there
4544  *	is no need to issue a new HELLO command, etc.
4545  *
4546  *	We do this in two ways:
4547  *
4548  *	 1. If we're dealing with newer firmware we'll simply want to take
4549  *	    the chip's microprocessor out of RESET.  This will cause the
4550  *	    firmware to start up from its start vector.  And then we'll loop
4551  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4552  *	    reset to 0) or we timeout.
4553  *
4554  *	 2. If we're dealing with older firmware then we'll need to RESET
4555  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4556  *	    flag and automatically RESET itself on startup.
4557  */
4558 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4559 {
4560 	if (reset) {
4561 		/*
4562 		 * Since we're directing the RESET instead of the firmware
4563 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4564 		 * bit.
4565 		 */
4566 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4567 
4568 		/*
4569 		 * If we've been given a valid mailbox, first try to get the
4570 		 * firmware to do the RESET.  If that works, great and we can
4571 		 * return success.  Otherwise, if we haven't been given a
4572 		 * valid mailbox or the RESET command failed, fall back to
4573 		 * hitting the chip with a hammer.
4574 		 */
4575 		if (mbox <= M_PCIE_FW_MASTER) {
4576 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4577 			msleep(100);
4578 			if (t4_fw_reset(adap, mbox,
4579 					F_PIORST | F_PIORSTMODE) == 0)
4580 				return 0;
4581 		}
4582 
4583 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4584 		msleep(2000);
4585 	} else {
4586 		int ms;
4587 
4588 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4589 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4590 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4591 				return FW_SUCCESS;
4592 			msleep(100);
4593 			ms += 100;
4594 		}
4595 		return -ETIMEDOUT;
4596 	}
4597 	return 0;
4598 }
4599 
4600 /**
4601  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4602  *	@adap: the adapter
4603  *	@mbox: mailbox to use for the FW RESET command (if desired)
4604  *	@fw_data: the firmware image to write
4605  *	@size: image size
4606  *	@force: force upgrade even if firmware doesn't cooperate
4607  *
4608  *	Perform all of the steps necessary for upgrading an adapter's
4609  *	firmware image.  Normally this requires the cooperation of the
4610  *	existing firmware in order to halt all existing activities
4611  *	but if an invalid mailbox token is passed in we skip that step
4612  *	(though we'll still put the adapter microprocessor into RESET in
4613  *	that case).
4614  *
4615  *	On successful return the new firmware will have been loaded and
4616  *	the adapter will have been fully RESET losing all previous setup
4617  *	state.  On unsuccessful return the adapter may be completely hosed ...
4618  *	positive errno indicates that the adapter is ~probably~ intact, a
4619  *	negative errno indicates that things are looking bad ...
4620  */
4621 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4622 		  const u8 *fw_data, unsigned int size, int force)
4623 {
4624 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4625 	int reset, ret;
4626 
4627 	ret = t4_fw_halt(adap, mbox, force);
4628 	if (ret < 0 && !force)
4629 		return ret;
4630 
4631 	ret = t4_load_fw(adap, fw_data, size);
4632 	if (ret < 0)
4633 		return ret;
4634 
4635 	/*
4636 	 * Older versions of the firmware don't understand the new
4637 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4638 	 * restart.  So for newly loaded older firmware we'll have to do the
4639 	 * RESET for it so it starts up on a clean slate.  We can tell if
4640 	 * the newly loaded firmware will handle this right by checking
4641 	 * its header flags to see if it advertises the capability.
4642 	 */
4643 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4644 	return t4_fw_restart(adap, mbox, reset);
4645 }
4646 
4647 /**
4648  *	t4_fw_initialize - ask FW to initialize the device
4649  *	@adap: the adapter
4650  *	@mbox: mailbox to use for the FW command
4651  *
4652  *	Issues a command to FW to partially initialize the device.  This
4653  *	performs initialization that generally doesn't depend on user input.
4654  */
4655 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4656 {
4657 	struct fw_initialize_cmd c;
4658 
4659 	memset(&c, 0, sizeof(c));
4660 	INIT_CMD(c, INITIALIZE, WRITE);
4661 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4662 }
4663 
4664 /**
4665  *	t4_query_params - query FW or device parameters
4666  *	@adap: the adapter
4667  *	@mbox: mailbox to use for the FW command
4668  *	@pf: the PF
4669  *	@vf: the VF
4670  *	@nparams: the number of parameters
4671  *	@params: the parameter names
4672  *	@val: the parameter values
4673  *
4674  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4675  *	queried at once.
4676  */
4677 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4678 		    unsigned int vf, unsigned int nparams, const u32 *params,
4679 		    u32 *val)
4680 {
4681 	int i, ret;
4682 	struct fw_params_cmd c;
4683 	__be32 *p = &c.param[0].mnem;
4684 
4685 	if (nparams > 7)
4686 		return -EINVAL;
4687 
4688 	memset(&c, 0, sizeof(c));
4689 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4690 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4691 			    V_FW_PARAMS_CMD_VFN(vf));
4692 	c.retval_len16 = htonl(FW_LEN16(c));
4693 
4694 	for (i = 0; i < nparams; i++, p += 2, params++)
4695 		*p = htonl(*params);
4696 
4697 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4698 	if (ret == 0)
4699 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4700 			*val++ = ntohl(*p);
4701 	return ret;
4702 }
4703 
4704 /**
4705  *	t4_set_params - sets FW or device parameters
4706  *	@adap: the adapter
4707  *	@mbox: mailbox to use for the FW command
4708  *	@pf: the PF
4709  *	@vf: the VF
4710  *	@nparams: the number of parameters
4711  *	@params: the parameter names
4712  *	@val: the parameter values
4713  *
4714  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4715  *	specified at once.
4716  */
4717 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4718 		  unsigned int vf, unsigned int nparams, const u32 *params,
4719 		  const u32 *val)
4720 {
4721 	struct fw_params_cmd c;
4722 	__be32 *p = &c.param[0].mnem;
4723 
4724 	if (nparams > 7)
4725 		return -EINVAL;
4726 
4727 	memset(&c, 0, sizeof(c));
4728 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4729 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4730 			    V_FW_PARAMS_CMD_VFN(vf));
4731 	c.retval_len16 = htonl(FW_LEN16(c));
4732 
4733 	while (nparams--) {
4734 		*p++ = htonl(*params);
4735 		params++;
4736 		*p++ = htonl(*val);
4737 		val++;
4738 	}
4739 
4740 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4741 }
4742 
4743 /**
4744  *	t4_cfg_pfvf - configure PF/VF resource limits
4745  *	@adap: the adapter
4746  *	@mbox: mailbox to use for the FW command
4747  *	@pf: the PF being configured
4748  *	@vf: the VF being configured
4749  *	@txq: the max number of egress queues
4750  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4751  *	@rxqi: the max number of interrupt-capable ingress queues
4752  *	@rxq: the max number of interruptless ingress queues
4753  *	@tc: the PCI traffic class
4754  *	@vi: the max number of virtual interfaces
4755  *	@cmask: the channel access rights mask for the PF/VF
4756  *	@pmask: the port access rights mask for the PF/VF
4757  *	@nexact: the maximum number of exact MPS filters
4758  *	@rcaps: read capabilities
4759  *	@wxcaps: write/execute capabilities
4760  *
4761  *	Configures resource limits and capabilities for a physical or virtual
4762  *	function.
4763  */
4764 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4765 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4766 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4767 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4768 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4769 {
4770 	struct fw_pfvf_cmd c;
4771 
4772 	memset(&c, 0, sizeof(c));
4773 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4774 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4775 			    V_FW_PFVF_CMD_VFN(vf));
4776 	c.retval_len16 = htonl(FW_LEN16(c));
4777 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4778 			       V_FW_PFVF_CMD_NIQ(rxq));
4779 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4780 			      V_FW_PFVF_CMD_PMASK(pmask) |
4781 			      V_FW_PFVF_CMD_NEQ(txq));
4782 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4783 				V_FW_PFVF_CMD_NEXACTF(nexact));
4784 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4785 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4786 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4787 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4788 }
4789 
4790 /**
4791  *	t4_alloc_vi_func - allocate a virtual interface
4792  *	@adap: the adapter
4793  *	@mbox: mailbox to use for the FW command
4794  *	@port: physical port associated with the VI
4795  *	@pf: the PF owning the VI
4796  *	@vf: the VF owning the VI
4797  *	@nmac: number of MAC addresses needed (1 to 5)
4798  *	@mac: the MAC addresses of the VI
4799  *	@rss_size: size of RSS table slice associated with this VI
4800  *	@portfunc: which Port Application Function MAC Address is desired
4801  *	@idstype: Intrusion Detection Type
4802  *
4803  *	Allocates a virtual interface for the given physical port.  If @mac is
4804  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4805  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4806  *	stored consecutively so the space needed is @nmac * 6 bytes.
4807  *	Returns a negative error number or the non-negative VI id.
4808  */
4809 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4810 		     unsigned int port, unsigned int pf, unsigned int vf,
4811 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4812 		     unsigned int portfunc, unsigned int idstype)
4813 {
4814 	int ret;
4815 	struct fw_vi_cmd c;
4816 
4817 	memset(&c, 0, sizeof(c));
4818 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4819 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4820 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4821 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4822 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4823 			       V_FW_VI_CMD_FUNC(portfunc));
4824 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4825 	c.nmac = nmac - 1;
4826 
4827 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4828 	if (ret)
4829 		return ret;
4830 
4831 	if (mac) {
4832 		memcpy(mac, c.mac, sizeof(c.mac));
4833 		switch (nmac) {
4834 		case 5:
4835 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4836 		case 4:
4837 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4838 		case 3:
4839 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4840 		case 2:
4841 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4842 		}
4843 	}
4844 	if (rss_size)
4845 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4846 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4847 }
4848 
4849 /**
4850  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4851  *	@adap: the adapter
4852  *	@mbox: mailbox to use for the FW command
4853  *	@port: physical port associated with the VI
4854  *	@pf: the PF owning the VI
4855  *	@vf: the VF owning the VI
4856  *	@nmac: number of MAC addresses needed (1 to 5)
4857  *	@mac: the MAC addresses of the VI
4858  *	@rss_size: size of RSS table slice associated with this VI
4859  *
4860  *	backwards compatible and convieniance routine to allocate a Virtual
4861  *	Interface with a Ethernet Port Application Function and Intrustion
4862  *	Detection System disabled.
4863  */
4864 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4865 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4866 		unsigned int *rss_size)
4867 {
4868 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4869 				FW_VI_FUNC_ETH, 0);
4870 }
4871 
4872 /**
4873  *	t4_free_vi - free a virtual interface
4874  *	@adap: the adapter
4875  *	@mbox: mailbox to use for the FW command
4876  *	@pf: the PF owning the VI
4877  *	@vf: the VF owning the VI
4878  *	@viid: virtual interface identifiler
4879  *
4880  *	Free a previously allocated virtual interface.
4881  */
4882 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4883 	       unsigned int vf, unsigned int viid)
4884 {
4885 	struct fw_vi_cmd c;
4886 
4887 	memset(&c, 0, sizeof(c));
4888 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4889 			    F_FW_CMD_REQUEST |
4890 			    F_FW_CMD_EXEC |
4891 			    V_FW_VI_CMD_PFN(pf) |
4892 			    V_FW_VI_CMD_VFN(vf));
4893 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4894 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4895 
4896 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4897 }
4898 
4899 /**
4900  *	t4_set_rxmode - set Rx properties of a virtual interface
4901  *	@adap: the adapter
4902  *	@mbox: mailbox to use for the FW command
4903  *	@viid: the VI id
4904  *	@mtu: the new MTU or -1
4905  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4906  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4907  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4908  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4909  *	@sleep_ok: if true we may sleep while awaiting command completion
4910  *
4911  *	Sets Rx properties of a virtual interface.
4912  */
4913 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4914 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4915 		  bool sleep_ok)
4916 {
4917 	struct fw_vi_rxmode_cmd c;
4918 
4919 	/* convert to FW values */
4920 	if (mtu < 0)
4921 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4922 	if (promisc < 0)
4923 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4924 	if (all_multi < 0)
4925 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4926 	if (bcast < 0)
4927 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4928 	if (vlanex < 0)
4929 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4930 
4931 	memset(&c, 0, sizeof(c));
4932 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4933 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4934 	c.retval_len16 = htonl(FW_LEN16(c));
4935 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4936 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4937 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4938 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4939 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4940 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4941 }
4942 
4943 /**
4944  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4945  *	@adap: the adapter
4946  *	@mbox: mailbox to use for the FW command
4947  *	@viid: the VI id
4948  *	@free: if true any existing filters for this VI id are first removed
4949  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4950  *	@addr: the MAC address(es)
4951  *	@idx: where to store the index of each allocated filter
4952  *	@hash: pointer to hash address filter bitmap
4953  *	@sleep_ok: call is allowed to sleep
4954  *
4955  *	Allocates an exact-match filter for each of the supplied addresses and
4956  *	sets it to the corresponding address.  If @idx is not %NULL it should
4957  *	have at least @naddr entries, each of which will be set to the index of
4958  *	the filter allocated for the corresponding MAC address.  If a filter
4959  *	could not be allocated for an address its index is set to 0xffff.
4960  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4961  *	are hashed and update the hash filter bitmap pointed at by @hash.
4962  *
4963  *	Returns a negative error number or the number of filters allocated.
4964  */
4965 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4966 		      unsigned int viid, bool free, unsigned int naddr,
4967 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4968 {
4969 	int offset, ret = 0;
4970 	struct fw_vi_mac_cmd c;
4971 	unsigned int nfilters = 0;
4972 	unsigned int max_naddr = is_t4(adap) ?
4973 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
4974 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4975 	unsigned int rem = naddr;
4976 
4977 	if (naddr > max_naddr)
4978 		return -EINVAL;
4979 
4980 	for (offset = 0; offset < naddr ; /**/) {
4981 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4982 					 ? rem
4983 					 : ARRAY_SIZE(c.u.exact));
4984 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4985 						     u.exact[fw_naddr]), 16);
4986 		struct fw_vi_mac_exact *p;
4987 		int i;
4988 
4989 		memset(&c, 0, sizeof(c));
4990 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4991 				     F_FW_CMD_REQUEST |
4992 				     F_FW_CMD_WRITE |
4993 				     V_FW_CMD_EXEC(free) |
4994 				     V_FW_VI_MAC_CMD_VIID(viid));
4995 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4996 					    V_FW_CMD_LEN16(len16));
4997 
4998 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4999 			p->valid_to_idx = htons(
5000 				F_FW_VI_MAC_CMD_VALID |
5001 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5002 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5003 		}
5004 
5005 		/*
5006 		 * It's okay if we run out of space in our MAC address arena.
5007 		 * Some of the addresses we submit may get stored so we need
5008 		 * to run through the reply to see what the results were ...
5009 		 */
5010 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5011 		if (ret && ret != -FW_ENOMEM)
5012 			break;
5013 
5014 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5015 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5016 
5017 			if (idx)
5018 				idx[offset+i] = (index >=  max_naddr
5019 						 ? 0xffff
5020 						 : index);
5021 			if (index < max_naddr)
5022 				nfilters++;
5023 			else if (hash)
5024 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5025 		}
5026 
5027 		free = false;
5028 		offset += fw_naddr;
5029 		rem -= fw_naddr;
5030 	}
5031 
5032 	if (ret == 0 || ret == -FW_ENOMEM)
5033 		ret = nfilters;
5034 	return ret;
5035 }
5036 
5037 /**
5038  *	t4_change_mac - modifies the exact-match filter for a MAC address
5039  *	@adap: the adapter
5040  *	@mbox: mailbox to use for the FW command
5041  *	@viid: the VI id
5042  *	@idx: index of existing filter for old value of MAC address, or -1
5043  *	@addr: the new MAC address value
5044  *	@persist: whether a new MAC allocation should be persistent
5045  *	@add_smt: if true also add the address to the HW SMT
5046  *
5047  *	Modifies an exact-match filter and sets it to the new MAC address if
5048  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5049  *	latter case the address is added persistently if @persist is %true.
5050  *
5051  *	Note that in general it is not possible to modify the value of a given
5052  *	filter so the generic way to modify an address filter is to free the one
5053  *	being used by the old address value and allocate a new filter for the
5054  *	new address value.
5055  *
5056  *	Returns a negative error number or the index of the filter with the new
5057  *	MAC value.  Note that this index may differ from @idx.
5058  */
5059 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5060 		  int idx, const u8 *addr, bool persist, bool add_smt)
5061 {
5062 	int ret, mode;
5063 	struct fw_vi_mac_cmd c;
5064 	struct fw_vi_mac_exact *p = c.u.exact;
5065 	unsigned int max_mac_addr = is_t4(adap) ?
5066 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5067 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5068 
5069 	if (idx < 0)                             /* new allocation */
5070 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5071 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5072 
5073 	memset(&c, 0, sizeof(c));
5074 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5075 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5076 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5077 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5078 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5079 				V_FW_VI_MAC_CMD_IDX(idx));
5080 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5081 
5082 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5083 	if (ret == 0) {
5084 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5085 		if (ret >= max_mac_addr)
5086 			ret = -ENOMEM;
5087 	}
5088 	return ret;
5089 }
5090 
5091 /**
5092  *	t4_set_addr_hash - program the MAC inexact-match hash filter
5093  *	@adap: the adapter
5094  *	@mbox: mailbox to use for the FW command
5095  *	@viid: the VI id
5096  *	@ucast: whether the hash filter should also match unicast addresses
5097  *	@vec: the value to be written to the hash filter
5098  *	@sleep_ok: call is allowed to sleep
5099  *
5100  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5101  */
5102 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5103 		     bool ucast, u64 vec, bool sleep_ok)
5104 {
5105 	struct fw_vi_mac_cmd c;
5106 
5107 	memset(&c, 0, sizeof(c));
5108 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5109 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5110 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5111 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5112 				    V_FW_CMD_LEN16(1));
5113 	c.u.hash.hashvec = cpu_to_be64(vec);
5114 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5115 }
5116 
5117 /**
5118  *	t4_enable_vi - enable/disable a virtual interface
5119  *	@adap: the adapter
5120  *	@mbox: mailbox to use for the FW command
5121  *	@viid: the VI id
5122  *	@rx_en: 1=enable Rx, 0=disable Rx
5123  *	@tx_en: 1=enable Tx, 0=disable Tx
5124  *
5125  *	Enables/disables a virtual interface.
5126  */
5127 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5128 		 bool rx_en, bool tx_en)
5129 {
5130 	struct fw_vi_enable_cmd c;
5131 
5132 	memset(&c, 0, sizeof(c));
5133 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5134 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5135 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5136 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5137 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5138 }
5139 
5140 /**
5141  *	t4_identify_port - identify a VI's port by blinking its LED
5142  *	@adap: the adapter
5143  *	@mbox: mailbox to use for the FW command
5144  *	@viid: the VI id
5145  *	@nblinks: how many times to blink LED at 2.5 Hz
5146  *
5147  *	Identifies a VI's port by blinking its LED.
5148  */
5149 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5150 		     unsigned int nblinks)
5151 {
5152 	struct fw_vi_enable_cmd c;
5153 
5154 	memset(&c, 0, sizeof(c));
5155 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5156 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5157 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5158 	c.blinkdur = htons(nblinks);
5159 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5160 }
5161 
5162 /**
5163  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
5164  *	@adap: the adapter
5165  *	@mbox: mailbox to use for the FW command
5166  *	@start: %true to enable the queues, %false to disable them
5167  *	@pf: the PF owning the queues
5168  *	@vf: the VF owning the queues
5169  *	@iqid: ingress queue id
5170  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5171  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5172  *
5173  *	Starts or stops an ingress queue and its associated FLs, if any.
5174  */
5175 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5176 		     unsigned int pf, unsigned int vf, unsigned int iqid,
5177 		     unsigned int fl0id, unsigned int fl1id)
5178 {
5179 	struct fw_iq_cmd c;
5180 
5181 	memset(&c, 0, sizeof(c));
5182 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5183 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5184 			    V_FW_IQ_CMD_VFN(vf));
5185 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5186 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5187 	c.iqid = htons(iqid);
5188 	c.fl0id = htons(fl0id);
5189 	c.fl1id = htons(fl1id);
5190 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5191 }
5192 
5193 /**
5194  *	t4_iq_free - free an ingress queue and its FLs
5195  *	@adap: the adapter
5196  *	@mbox: mailbox to use for the FW command
5197  *	@pf: the PF owning the queues
5198  *	@vf: the VF owning the queues
5199  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5200  *	@iqid: ingress queue id
5201  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5202  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5203  *
5204  *	Frees an ingress queue and its associated FLs, if any.
5205  */
5206 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5207 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5208 	       unsigned int fl0id, unsigned int fl1id)
5209 {
5210 	struct fw_iq_cmd c;
5211 
5212 	memset(&c, 0, sizeof(c));
5213 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5214 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5215 			    V_FW_IQ_CMD_VFN(vf));
5216 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5217 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5218 	c.iqid = htons(iqid);
5219 	c.fl0id = htons(fl0id);
5220 	c.fl1id = htons(fl1id);
5221 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5222 }
5223 
5224 /**
5225  *	t4_eth_eq_free - free an Ethernet egress queue
5226  *	@adap: the adapter
5227  *	@mbox: mailbox to use for the FW command
5228  *	@pf: the PF owning the queue
5229  *	@vf: the VF owning the queue
5230  *	@eqid: egress queue id
5231  *
5232  *	Frees an Ethernet egress queue.
5233  */
5234 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5235 		   unsigned int vf, unsigned int eqid)
5236 {
5237 	struct fw_eq_eth_cmd c;
5238 
5239 	memset(&c, 0, sizeof(c));
5240 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5241 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5242 			    V_FW_EQ_ETH_CMD_VFN(vf));
5243 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5244 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5245 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5246 }
5247 
5248 /**
5249  *	t4_ctrl_eq_free - free a control egress queue
5250  *	@adap: the adapter
5251  *	@mbox: mailbox to use for the FW command
5252  *	@pf: the PF owning the queue
5253  *	@vf: the VF owning the queue
5254  *	@eqid: egress queue id
5255  *
5256  *	Frees a control egress queue.
5257  */
5258 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5259 		    unsigned int vf, unsigned int eqid)
5260 {
5261 	struct fw_eq_ctrl_cmd c;
5262 
5263 	memset(&c, 0, sizeof(c));
5264 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5265 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5266 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5267 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5268 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5269 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5270 }
5271 
5272 /**
5273  *	t4_ofld_eq_free - free an offload egress queue
5274  *	@adap: the adapter
5275  *	@mbox: mailbox to use for the FW command
5276  *	@pf: the PF owning the queue
5277  *	@vf: the VF owning the queue
5278  *	@eqid: egress queue id
5279  *
5280  *	Frees a control egress queue.
5281  */
5282 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5283 		    unsigned int vf, unsigned int eqid)
5284 {
5285 	struct fw_eq_ofld_cmd c;
5286 
5287 	memset(&c, 0, sizeof(c));
5288 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5289 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5290 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5291 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5292 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5293 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5294 }
5295 
5296 /**
5297  *	t4_handle_fw_rpl - process a FW reply message
5298  *	@adap: the adapter
5299  *	@rpl: start of the FW message
5300  *
5301  *	Processes a FW message, such as link state change messages.
5302  */
5303 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5304 {
5305 	u8 opcode = *(const u8 *)rpl;
5306 	const struct fw_port_cmd *p = (const void *)rpl;
5307 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5308 
5309 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5310 		/* link/module state change message */
5311 		int speed = 0, fc = 0, i;
5312 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5313 		struct port_info *pi = NULL;
5314 		struct link_config *lc;
5315 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5316 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5317 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5318 
5319 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5320 			fc |= PAUSE_RX;
5321 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5322 			fc |= PAUSE_TX;
5323 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5324 			speed = SPEED_100;
5325 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5326 			speed = SPEED_1000;
5327 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5328 			speed = SPEED_10000;
5329 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5330 			speed = SPEED_40000;
5331 
5332 		for_each_port(adap, i) {
5333 			pi = adap2pinfo(adap, i);
5334 			if (pi->tx_chan == chan)
5335 				break;
5336 		}
5337 		lc = &pi->link_cfg;
5338 
5339 		if (link_ok != lc->link_ok || speed != lc->speed ||
5340 		    fc != lc->fc) {                    /* something changed */
5341 			lc->link_ok = link_ok;
5342 			lc->speed = speed;
5343 			lc->fc = fc;
5344 			lc->supported = ntohs(p->u.info.pcap);
5345 			t4_os_link_changed(adap, i, link_ok);
5346 		}
5347 		if (mod != pi->mod_type) {
5348 			pi->mod_type = mod;
5349 			t4_os_portmod_changed(adap, i);
5350 		}
5351 	} else {
5352 		CH_WARN_RATELIMIT(adap,
5353 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5354 		return -EINVAL;
5355 	}
5356 	return 0;
5357 }
5358 
5359 /**
5360  *	get_pci_mode - determine a card's PCI mode
5361  *	@adapter: the adapter
5362  *	@p: where to store the PCI settings
5363  *
5364  *	Determines a card's PCI mode and associated parameters, such as speed
5365  *	and width.
5366  */
5367 static void __devinit get_pci_mode(struct adapter *adapter,
5368 				   struct pci_params *p)
5369 {
5370 	u16 val;
5371 	u32 pcie_cap;
5372 
5373 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5374 	if (pcie_cap) {
5375 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5376 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5377 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5378 	}
5379 }
5380 
5381 /**
5382  *	init_link_config - initialize a link's SW state
5383  *	@lc: structure holding the link state
5384  *	@caps: link capabilities
5385  *
5386  *	Initializes the SW state maintained for each link, including the link's
5387  *	capabilities and default speed/flow-control/autonegotiation settings.
5388  */
5389 static void __devinit init_link_config(struct link_config *lc,
5390 				       unsigned int caps)
5391 {
5392 	lc->supported = caps;
5393 	lc->requested_speed = 0;
5394 	lc->speed = 0;
5395 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5396 	if (lc->supported & FW_PORT_CAP_ANEG) {
5397 		lc->advertising = lc->supported & ADVERT_MASK;
5398 		lc->autoneg = AUTONEG_ENABLE;
5399 		lc->requested_fc |= PAUSE_AUTONEG;
5400 	} else {
5401 		lc->advertising = 0;
5402 		lc->autoneg = AUTONEG_DISABLE;
5403 	}
5404 }
5405 
5406 static int __devinit get_flash_params(struct adapter *adapter)
5407 {
5408 	int ret;
5409 	u32 info = 0;
5410 
5411 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5412 	if (!ret)
5413 		ret = sf1_read(adapter, 3, 0, 1, &info);
5414 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5415 	if (ret < 0)
5416 		return ret;
5417 
5418 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5419 		return -EINVAL;
5420 	info >>= 16;                           /* log2 of size */
5421 	if (info >= 0x14 && info < 0x18)
5422 		adapter->params.sf_nsec = 1 << (info - 16);
5423 	else if (info == 0x18)
5424 		adapter->params.sf_nsec = 64;
5425 	else
5426 		return -EINVAL;
5427 	adapter->params.sf_size = 1 << info;
5428 	return 0;
5429 }
5430 
5431 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5432 						  u8 range)
5433 {
5434 	u16 val;
5435 	u32 pcie_cap;
5436 
5437 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5438 	if (pcie_cap) {
5439 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5440 		val &= 0xfff0;
5441 		val |= range ;
5442 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5443 	}
5444 }
5445 
5446 /**
5447  *	t4_prep_adapter - prepare SW and HW for operation
5448  *	@adapter: the adapter
5449  *	@reset: if true perform a HW reset
5450  *
5451  *	Initialize adapter SW state for the various HW modules, set initial
5452  *	values for some adapter tunables, take PHYs out of reset, and
5453  *	initialize the MDIO interface.
5454  */
5455 int __devinit t4_prep_adapter(struct adapter *adapter)
5456 {
5457 	int ret;
5458 	uint16_t device_id;
5459 	uint32_t pl_rev;
5460 
5461 	get_pci_mode(adapter, &adapter->params.pci);
5462 
5463 	pl_rev = t4_read_reg(adapter, A_PL_REV);
5464 	adapter->params.chipid = G_CHIPID(pl_rev);
5465 	adapter->params.rev = G_REV(pl_rev);
5466 	if (adapter->params.chipid == 0) {
5467 		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5468 		adapter->params.chipid = CHELSIO_T4;
5469 
5470 		/* T4A1 chip is not supported */
5471 		if (adapter->params.rev == 1) {
5472 			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5473 			return -EINVAL;
5474 		}
5475 	}
5476 	adapter->params.pci.vpd_cap_addr =
5477 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5478 
5479 	ret = get_flash_params(adapter);
5480 	if (ret < 0)
5481 		return ret;
5482 
5483 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5484 	if (ret < 0)
5485 		return ret;
5486 
5487 	/* Cards with real ASICs have the chipid in the PCIe device id */
5488 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5489 	if (device_id >> 12 == adapter->params.chipid)
5490 		adapter->params.cim_la_size = CIMLA_SIZE;
5491 	else {
5492 		/* FPGA */
5493 		adapter->params.fpga = 1;
5494 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5495 	}
5496 
5497 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5498 
5499 	/*
5500 	 * Default port and clock for debugging in case we can't reach FW.
5501 	 */
5502 	adapter->params.nports = 1;
5503 	adapter->params.portvec = 1;
5504 	adapter->params.vpd.cclk = 50000;
5505 
5506 	/* Set pci completion timeout value to 4 seconds. */
5507 	set_pcie_completion_timeout(adapter, 0xd);
5508 	return 0;
5509 }
5510 
5511 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5512 {
5513 	u8 addr[6];
5514 	int ret, i, j;
5515 	struct fw_port_cmd c;
5516 	unsigned int rss_size;
5517 	adapter_t *adap = p->adapter;
5518 
5519 	memset(&c, 0, sizeof(c));
5520 
5521 	for (i = 0, j = -1; i <= p->port_id; i++) {
5522 		do {
5523 			j++;
5524 		} while ((adap->params.portvec & (1 << j)) == 0);
5525 	}
5526 
5527 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5528 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5529 			       V_FW_PORT_CMD_PORTID(j));
5530 	c.action_to_len16 = htonl(
5531 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5532 		FW_LEN16(c));
5533 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5534 	if (ret)
5535 		return ret;
5536 
5537 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5538 	if (ret < 0)
5539 		return ret;
5540 
5541 	p->viid = ret;
5542 	p->tx_chan = j;
5543 	p->lport = j;
5544 	p->rss_size = rss_size;
5545 	t4_os_set_hw_addr(adap, p->port_id, addr);
5546 
5547 	ret = ntohl(c.u.info.lstatus_to_modtype);
5548 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5549 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5550 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5551 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5552 
5553 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5554 
5555 	return 0;
5556 }
5557