xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision e1a528369708afb723290916ad8ea9c79399e933)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 
35 #include "common.h"
36 #include "t4_regs.h"
37 #include "t4_regs_values.h"
38 #include "firmware/t4fw_interface.h"
39 
40 #undef msleep
41 #define msleep(x) do { \
42 	if (cold) \
43 		DELAY((x) * 1000); \
44 	else \
45 		pause("t4hw", (x) * hz / 1000); \
46 } while (0)
47 
48 /**
49  *	t4_wait_op_done_val - wait until an operation is completed
50  *	@adapter: the adapter performing the operation
51  *	@reg: the register to check for completion
52  *	@mask: a single-bit field within @reg that indicates completion
53  *	@polarity: the value of the field when the operation is completed
54  *	@attempts: number of check iterations
55  *	@delay: delay in usecs between iterations
56  *	@valp: where to store the value of the register at completion time
57  *
58  *	Wait until an operation is completed by checking a bit in a register
59  *	up to @attempts times.  If @valp is not NULL the value of the register
60  *	at the time it indicated completion is stored there.  Returns 0 if the
61  *	operation completes and	-EAGAIN	otherwise.
62  */
63 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
64 		        int polarity, int attempts, int delay, u32 *valp)
65 {
66 	while (1) {
67 		u32 val = t4_read_reg(adapter, reg);
68 
69 		if (!!(val & mask) == polarity) {
70 			if (valp)
71 				*valp = val;
72 			return 0;
73 		}
74 		if (--attempts == 0)
75 			return -EAGAIN;
76 		if (delay)
77 			udelay(delay);
78 	}
79 }
80 
81 /**
82  *	t4_set_reg_field - set a register field to a value
83  *	@adapter: the adapter to program
84  *	@addr: the register address
85  *	@mask: specifies the portion of the register to modify
86  *	@val: the new value for the register field
87  *
88  *	Sets a register field specified by the supplied mask to the
89  *	given value.
90  */
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 		      u32 val)
93 {
94 	u32 v = t4_read_reg(adapter, addr) & ~mask;
95 
96 	t4_write_reg(adapter, addr, v | val);
97 	(void) t4_read_reg(adapter, addr);      /* flush */
98 }
99 
100 /**
101  *	t4_read_indirect - read indirectly addressed registers
102  *	@adap: the adapter
103  *	@addr_reg: register holding the indirect address
104  *	@data_reg: register holding the value of the indirect register
105  *	@vals: where the read register values are stored
106  *	@nregs: how many indirect registers to read
107  *	@start_idx: index of first indirect register to read
108  *
109  *	Reads registers that are accessed indirectly through an address/data
110  *	register pair.
111  */
112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
114 		      unsigned int start_idx)
115 {
116 	while (nregs--) {
117 		t4_write_reg(adap, addr_reg, start_idx);
118 		*vals++ = t4_read_reg(adap, data_reg);
119 		start_idx++;
120 	}
121 }
122 
123 /**
124  *	t4_write_indirect - write indirectly addressed registers
125  *	@adap: the adapter
126  *	@addr_reg: register holding the indirect addresses
127  *	@data_reg: register holding the value for the indirect registers
128  *	@vals: values to write
129  *	@nregs: how many indirect registers to write
130  *	@start_idx: address of first indirect register to write
131  *
132  *	Writes a sequential block of registers that are accessed indirectly
133  *	through an address/data register pair.
134  */
135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 		       unsigned int data_reg, const u32 *vals,
137 		       unsigned int nregs, unsigned int start_idx)
138 {
139 	while (nregs--) {
140 		t4_write_reg(adap, addr_reg, start_idx++);
141 		t4_write_reg(adap, data_reg, *vals++);
142 	}
143 }
144 
145 /*
146  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147  * mechanism.  This guarantees that we get the real value even if we're
148  * operating within a Virtual Machine and the Hypervisor is trapping our
149  * Configuration Space accesses.
150  */
151 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
152 {
153 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
154 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
155 		     V_REGISTER(reg));
156 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
157 }
158 
159 /*
160  *	t4_report_fw_error - report firmware error
161  *	@adap: the adapter
162  *
163  *	The adapter firmware can indicate error conditions to the host.
164  *	This routine prints out the reason for the firmware error (as
165  *	reported by the firmware).
166  */
167 static void t4_report_fw_error(struct adapter *adap)
168 {
169 	static const char *reason[] = {
170 		"Crash",			/* PCIE_FW_EVAL_CRASH */
171 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
172 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
173 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
174 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
175 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
176 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
177 		"Reserved",			/* reserved */
178 	};
179 	u32 pcie_fw;
180 
181 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
182 	if (pcie_fw & F_PCIE_FW_ERR)
183 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
184 		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
185 }
186 
187 /*
188  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
189  */
190 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
191 			 u32 mbox_addr)
192 {
193 	for ( ; nflit; nflit--, mbox_addr += 8)
194 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
195 }
196 
197 /*
198  * Handle a FW assertion reported in a mailbox.
199  */
200 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
201 {
202 	struct fw_debug_cmd asrt;
203 
204 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
205 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
206 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
207 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
208 }
209 
210 #define X_CIM_PF_NOACCESS 0xeeeeeeee
211 /**
212  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
213  *	@adap: the adapter
214  *	@mbox: index of the mailbox to use
215  *	@cmd: the command to write
216  *	@size: command length in bytes
217  *	@rpl: where to optionally store the reply
218  *	@sleep_ok: if true we may sleep while awaiting command completion
219  *
220  *	Sends the given command to FW through the selected mailbox and waits
221  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
222  *	store the FW's reply to the command.  The command and its optional
223  *	reply are of the same length.  Some FW commands like RESET and
224  *	INITIALIZE can take a considerable amount of time to execute.
225  *	@sleep_ok determines whether we may sleep while awaiting the response.
226  *	If sleeping is allowed we use progressive backoff otherwise we spin.
227  *
228  *	The return value is 0 on success or a negative errno on failure.  A
229  *	failure can happen either because we are not able to execute the
230  *	command or FW executes it but signals an error.  In the latter case
231  *	the return value is the error code indicated by FW (negated).
232  */
233 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
234 		    void *rpl, bool sleep_ok)
235 {
236 	/*
237 	 * We delay in small increments at first in an effort to maintain
238 	 * responsiveness for simple, fast executing commands but then back
239 	 * off to larger delays to a maximum retry delay.
240 	 */
241 	static const int delay[] = {
242 		1, 1, 3, 5, 10, 10, 20, 50, 100
243 	};
244 
245 	u32 v;
246 	u64 res;
247 	int i, ms, delay_idx;
248 	const __be64 *p = cmd;
249 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
250 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
251 
252 	if ((size & 15) || size > MBOX_LEN)
253 		return -EINVAL;
254 
255 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
256 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
257 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
258 
259 	if (v != X_MBOWNER_PL)
260 		return v ? -EBUSY : -ETIMEDOUT;
261 
262 	for (i = 0; i < size; i += 8, p++)
263 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
264 
265 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
266 	t4_read_reg(adap, ctl_reg);          /* flush write */
267 
268 	delay_idx = 0;
269 	ms = delay[0];
270 
271 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
272 		if (sleep_ok) {
273 			ms = delay[delay_idx];  /* last element may repeat */
274 			if (delay_idx < ARRAY_SIZE(delay) - 1)
275 				delay_idx++;
276 			msleep(ms);
277 		} else
278 			mdelay(ms);
279 
280 		v = t4_read_reg(adap, ctl_reg);
281 		if (v == X_CIM_PF_NOACCESS)
282 			continue;
283 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
284 			if (!(v & F_MBMSGVALID)) {
285 				t4_write_reg(adap, ctl_reg,
286 					     V_MBOWNER(X_MBOWNER_NONE));
287 				continue;
288 			}
289 
290 			res = t4_read_reg64(adap, data_reg);
291 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
292 				fw_asrt(adap, data_reg);
293 				res = V_FW_CMD_RETVAL(EIO);
294 			} else if (rpl)
295 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
296 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
297 			return -G_FW_CMD_RETVAL((int)res);
298 		}
299 	}
300 
301 	/*
302 	 * We timed out waiting for a reply to our mailbox command.  Report
303 	 * the error and also check to see if the firmware reported any
304 	 * errors ...
305 	 */
306 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
307 	       *(const u8 *)cmd, mbox);
308 	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
309 		t4_report_fw_error(adap);
310 	return -ETIMEDOUT;
311 }
312 
313 /**
314  *	t4_mc_read - read from MC through backdoor accesses
315  *	@adap: the adapter
316  *	@idx: which MC to access
317  *	@addr: address of first byte requested
318  *	@data: 64 bytes of data containing the requested address
319  *	@ecc: where to store the corresponding 64-bit ECC word
320  *
321  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
322  *	that covers the requested address @addr.  If @parity is not %NULL it
323  *	is assigned the 64-bit ECC word for the read data.
324  */
325 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
326 {
327 	int i;
328 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
329 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
330 
331 	if (is_t4(adap)) {
332 		mc_bist_cmd_reg = A_MC_BIST_CMD;
333 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
334 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
335 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
336 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
337 	} else {
338 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
339 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
340 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
341 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
342 						  idx);
343 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
344 						  idx);
345 	}
346 
347 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
348 		return -EBUSY;
349 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
350 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
351 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
352 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
353 		     F_START_BIST | V_BIST_CMD_GAP(1));
354 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
355 	if (i)
356 		return i;
357 
358 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
359 
360 	for (i = 15; i >= 0; i--)
361 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
362 	if (ecc)
363 		*ecc = t4_read_reg64(adap, MC_DATA(16));
364 #undef MC_DATA
365 	return 0;
366 }
367 
368 /**
369  *	t4_edc_read - read from EDC through backdoor accesses
370  *	@adap: the adapter
371  *	@idx: which EDC to access
372  *	@addr: address of first byte requested
373  *	@data: 64 bytes of data containing the requested address
374  *	@ecc: where to store the corresponding 64-bit ECC word
375  *
376  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
377  *	that covers the requested address @addr.  If @parity is not %NULL it
378  *	is assigned the 64-bit ECC word for the read data.
379  */
380 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
381 {
382 	int i;
383 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
384 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
385 
386 	if (is_t4(adap)) {
387 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
388 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
389 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
390 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
391 						    idx);
392 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
393 						    idx);
394 	} else {
395 /*
396  * These macro are missing in t4_regs.h file.
397  * Added temporarily for testing.
398  */
399 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
400 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
401 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
402 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
403 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
404 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
405 						    idx);
406 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
407 						    idx);
408 #undef EDC_REG_T5
409 #undef EDC_STRIDE_T5
410 	}
411 
412 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
413 		return -EBUSY;
414 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
415 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
416 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
417 	t4_write_reg(adap, edc_bist_cmd_reg,
418 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
419 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
420 	if (i)
421 		return i;
422 
423 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
424 
425 	for (i = 15; i >= 0; i--)
426 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
427 	if (ecc)
428 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
429 #undef EDC_DATA
430 	return 0;
431 }
432 
433 /**
434  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
435  *	@adap: the adapter
436  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
437  *	@addr: address within indicated memory type
438  *	@len: amount of memory to read
439  *	@buf: host memory buffer
440  *
441  *	Reads an [almost] arbitrary memory region in the firmware: the
442  *	firmware memory address, length and host buffer must be aligned on
443  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
444  *	the firmware's memory.  If this memory contains data structures which
445  *	contain multi-byte integers, it's the callers responsibility to
446  *	perform appropriate byte order conversions.
447  */
448 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
449 		__be32 *buf)
450 {
451 	u32 pos, start, end, offset;
452 	int ret;
453 
454 	/*
455 	 * Argument sanity checks ...
456 	 */
457 	if ((addr & 0x3) || (len & 0x3))
458 		return -EINVAL;
459 
460 	/*
461 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
462 	 * need to round down the start and round up the end.  We'll start
463 	 * copying out of the first line at (addr - start) a word at a time.
464 	 */
465 	start = addr & ~(64-1);
466 	end = (addr + len + 64-1) & ~(64-1);
467 	offset = (addr - start)/sizeof(__be32);
468 
469 	for (pos = start; pos < end; pos += 64, offset = 0) {
470 		__be32 data[16];
471 
472 		/*
473 		 * Read the chip's memory block and bail if there's an error.
474 		 */
475 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
476 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
477 		else
478 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
479 		if (ret)
480 			return ret;
481 
482 		/*
483 		 * Copy the data into the caller's memory buffer.
484 		 */
485 		while (offset < 16 && len > 0) {
486 			*buf++ = data[offset++];
487 			len -= sizeof(__be32);
488 		}
489 	}
490 
491 	return 0;
492 }
493 
494 /*
495  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
496  * VPD-R header.
497  */
498 struct t4_vpd_hdr {
499 	u8  id_tag;
500 	u8  id_len[2];
501 	u8  id_data[ID_LEN];
502 	u8  vpdr_tag;
503 	u8  vpdr_len[2];
504 };
505 
506 /*
507  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
508  */
509 #define EEPROM_MAX_RD_POLL 40
510 #define EEPROM_MAX_WR_POLL 6
511 #define EEPROM_STAT_ADDR   0x7bfc
512 #define VPD_BASE           0x400
513 #define VPD_BASE_OLD       0
514 #define VPD_LEN            1024
515 #define VPD_INFO_FLD_HDR_SIZE	3
516 #define CHELSIO_VPD_UNIQUE_ID 0x82
517 
518 /**
519  *	t4_seeprom_read - read a serial EEPROM location
520  *	@adapter: adapter to read
521  *	@addr: EEPROM virtual address
522  *	@data: where to store the read data
523  *
524  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
525  *	VPD capability.  Note that this function must be called with a virtual
526  *	address.
527  */
528 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
529 {
530 	u16 val;
531 	int attempts = EEPROM_MAX_RD_POLL;
532 	unsigned int base = adapter->params.pci.vpd_cap_addr;
533 
534 	if (addr >= EEPROMVSIZE || (addr & 3))
535 		return -EINVAL;
536 
537 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
538 	do {
539 		udelay(10);
540 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
541 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
542 
543 	if (!(val & PCI_VPD_ADDR_F)) {
544 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
545 		return -EIO;
546 	}
547 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
548 	*data = le32_to_cpu(*data);
549 	return 0;
550 }
551 
552 /**
553  *	t4_seeprom_write - write a serial EEPROM location
554  *	@adapter: adapter to write
555  *	@addr: virtual EEPROM address
556  *	@data: value to write
557  *
558  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
559  *	VPD capability.  Note that this function must be called with a virtual
560  *	address.
561  */
562 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
563 {
564 	u16 val;
565 	int attempts = EEPROM_MAX_WR_POLL;
566 	unsigned int base = adapter->params.pci.vpd_cap_addr;
567 
568 	if (addr >= EEPROMVSIZE || (addr & 3))
569 		return -EINVAL;
570 
571 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
572 				 cpu_to_le32(data));
573 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
574 				 (u16)addr | PCI_VPD_ADDR_F);
575 	do {
576 		msleep(1);
577 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
578 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
579 
580 	if (val & PCI_VPD_ADDR_F) {
581 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
582 		return -EIO;
583 	}
584 	return 0;
585 }
586 
587 /**
588  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
589  *	@phys_addr: the physical EEPROM address
590  *	@fn: the PCI function number
591  *	@sz: size of function-specific area
592  *
593  *	Translate a physical EEPROM address to virtual.  The first 1K is
594  *	accessed through virtual addresses starting at 31K, the rest is
595  *	accessed through virtual addresses starting at 0.
596  *
597  *	The mapping is as follows:
598  *	[0..1K) -> [31K..32K)
599  *	[1K..1K+A) -> [ES-A..ES)
600  *	[1K+A..ES) -> [0..ES-A-1K)
601  *
602  *	where A = @fn * @sz, and ES = EEPROM size.
603  */
604 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
605 {
606 	fn *= sz;
607 	if (phys_addr < 1024)
608 		return phys_addr + (31 << 10);
609 	if (phys_addr < 1024 + fn)
610 		return EEPROMSIZE - fn + phys_addr - 1024;
611 	if (phys_addr < EEPROMSIZE)
612 		return phys_addr - 1024 - fn;
613 	return -EINVAL;
614 }
615 
616 /**
617  *	t4_seeprom_wp - enable/disable EEPROM write protection
618  *	@adapter: the adapter
619  *	@enable: whether to enable or disable write protection
620  *
621  *	Enables or disables write protection on the serial EEPROM.
622  */
623 int t4_seeprom_wp(struct adapter *adapter, int enable)
624 {
625 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
626 }
627 
628 /**
629  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
630  *	@v: Pointer to buffered vpd data structure
631  *	@kw: The keyword to search for
632  *
633  *	Returns the value of the information field keyword or
634  *	-ENOENT otherwise.
635  */
636 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
637 {
638          int i;
639 	 unsigned int offset , len;
640 	 const u8 *buf = &v->id_tag;
641 	 const u8 *vpdr_len = &v->vpdr_tag;
642 	 offset = sizeof(struct t4_vpd_hdr);
643 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
644 
645 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
646 		 return -ENOENT;
647 	 }
648 
649          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
650 		 if(memcmp(buf + i , kw , 2) == 0){
651 			 i += VPD_INFO_FLD_HDR_SIZE;
652                          return i;
653 		  }
654 
655                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
656          }
657 
658          return -ENOENT;
659 }
660 
661 
662 /**
663  *	get_vpd_params - read VPD parameters from VPD EEPROM
664  *	@adapter: adapter to read
665  *	@p: where to store the parameters
666  *
667  *	Reads card parameters stored in VPD EEPROM.
668  */
669 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
670 {
671 	int i, ret, addr;
672 	int ec, sn, pn, na;
673 	u8 vpd[VPD_LEN], csum;
674 	const struct t4_vpd_hdr *v;
675 
676 	/*
677 	 * Card information normally starts at VPD_BASE but early cards had
678 	 * it at 0.
679 	 */
680 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
681 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
682 
683 	for (i = 0; i < sizeof(vpd); i += 4) {
684 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
685 		if (ret)
686 			return ret;
687 	}
688  	v = (const struct t4_vpd_hdr *)vpd;
689 
690 #define FIND_VPD_KW(var,name) do { \
691 	var = get_vpd_keyword_val(v , name); \
692 	if (var < 0) { \
693 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
694 		return -EINVAL; \
695 	} \
696 } while (0)
697 
698 	FIND_VPD_KW(i, "RV");
699 	for (csum = 0; i >= 0; i--)
700 		csum += vpd[i];
701 
702 	if (csum) {
703 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
704 		return -EINVAL;
705 	}
706 	FIND_VPD_KW(ec, "EC");
707 	FIND_VPD_KW(sn, "SN");
708 	FIND_VPD_KW(pn, "PN");
709 	FIND_VPD_KW(na, "NA");
710 #undef FIND_VPD_KW
711 
712 	memcpy(p->id, v->id_data, ID_LEN);
713 	strstrip(p->id);
714 	memcpy(p->ec, vpd + ec, EC_LEN);
715 	strstrip(p->ec);
716 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
717 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
718 	strstrip(p->sn);
719 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
720 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
721 	strstrip((char *)p->pn);
722 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
723 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
724 	strstrip((char *)p->na);
725 
726 	return 0;
727 }
728 
729 /* serial flash and firmware constants and flash config file constants */
730 enum {
731 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
732 
733 	/* flash command opcodes */
734 	SF_PROG_PAGE    = 2,          /* program page */
735 	SF_WR_DISABLE   = 4,          /* disable writes */
736 	SF_RD_STATUS    = 5,          /* read status register */
737 	SF_WR_ENABLE    = 6,          /* enable writes */
738 	SF_RD_DATA_FAST = 0xb,        /* read flash */
739 	SF_RD_ID        = 0x9f,       /* read ID */
740 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
741 };
742 
743 /**
744  *	sf1_read - read data from the serial flash
745  *	@adapter: the adapter
746  *	@byte_cnt: number of bytes to read
747  *	@cont: whether another operation will be chained
748  *	@lock: whether to lock SF for PL access only
749  *	@valp: where to store the read data
750  *
751  *	Reads up to 4 bytes of data from the serial flash.  The location of
752  *	the read needs to be specified prior to calling this by issuing the
753  *	appropriate commands to the serial flash.
754  */
755 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
756 		    int lock, u32 *valp)
757 {
758 	int ret;
759 
760 	if (!byte_cnt || byte_cnt > 4)
761 		return -EINVAL;
762 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
763 		return -EBUSY;
764 	t4_write_reg(adapter, A_SF_OP,
765 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
766 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
767 	if (!ret)
768 		*valp = t4_read_reg(adapter, A_SF_DATA);
769 	return ret;
770 }
771 
772 /**
773  *	sf1_write - write data to the serial flash
774  *	@adapter: the adapter
775  *	@byte_cnt: number of bytes to write
776  *	@cont: whether another operation will be chained
777  *	@lock: whether to lock SF for PL access only
778  *	@val: value to write
779  *
780  *	Writes up to 4 bytes of data to the serial flash.  The location of
781  *	the write needs to be specified prior to calling this by issuing the
782  *	appropriate commands to the serial flash.
783  */
784 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
785 		     int lock, u32 val)
786 {
787 	if (!byte_cnt || byte_cnt > 4)
788 		return -EINVAL;
789 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
790 		return -EBUSY;
791 	t4_write_reg(adapter, A_SF_DATA, val);
792 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
793 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
794 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
795 }
796 
797 /**
798  *	flash_wait_op - wait for a flash operation to complete
799  *	@adapter: the adapter
800  *	@attempts: max number of polls of the status register
801  *	@delay: delay between polls in ms
802  *
803  *	Wait for a flash operation to complete by polling the status register.
804  */
805 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
806 {
807 	int ret;
808 	u32 status;
809 
810 	while (1) {
811 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
812 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
813 			return ret;
814 		if (!(status & 1))
815 			return 0;
816 		if (--attempts == 0)
817 			return -EAGAIN;
818 		if (delay)
819 			msleep(delay);
820 	}
821 }
822 
823 /**
824  *	t4_read_flash - read words from serial flash
825  *	@adapter: the adapter
826  *	@addr: the start address for the read
827  *	@nwords: how many 32-bit words to read
828  *	@data: where to store the read data
829  *	@byte_oriented: whether to store data as bytes or as words
830  *
831  *	Read the specified number of 32-bit words from the serial flash.
832  *	If @byte_oriented is set the read data is stored as a byte array
833  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
834  *	natural endianess.
835  */
836 int t4_read_flash(struct adapter *adapter, unsigned int addr,
837 		  unsigned int nwords, u32 *data, int byte_oriented)
838 {
839 	int ret;
840 
841 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
842 		return -EINVAL;
843 
844 	addr = swab32(addr) | SF_RD_DATA_FAST;
845 
846 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
847 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
848 		return ret;
849 
850 	for ( ; nwords; nwords--, data++) {
851 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
852 		if (nwords == 1)
853 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
854 		if (ret)
855 			return ret;
856 		if (byte_oriented)
857 			*data = htonl(*data);
858 	}
859 	return 0;
860 }
861 
862 /**
863  *	t4_write_flash - write up to a page of data to the serial flash
864  *	@adapter: the adapter
865  *	@addr: the start address to write
866  *	@n: length of data to write in bytes
867  *	@data: the data to write
868  *	@byte_oriented: whether to store data as bytes or as words
869  *
870  *	Writes up to a page of data (256 bytes) to the serial flash starting
871  *	at the given address.  All the data must be written to the same page.
872  *	If @byte_oriented is set the write data is stored as byte stream
873  *	(i.e. matches what on disk), otherwise in big-endian.
874  */
875 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
876 			  unsigned int n, const u8 *data, int byte_oriented)
877 {
878 	int ret;
879 	u32 buf[SF_PAGE_SIZE / 4];
880 	unsigned int i, c, left, val, offset = addr & 0xff;
881 
882 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
883 		return -EINVAL;
884 
885 	val = swab32(addr) | SF_PROG_PAGE;
886 
887 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
888 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
889 		goto unlock;
890 
891 	for (left = n; left; left -= c) {
892 		c = min(left, 4U);
893 		for (val = 0, i = 0; i < c; ++i)
894 			val = (val << 8) + *data++;
895 
896 		if (!byte_oriented)
897 			val = htonl(val);
898 
899 		ret = sf1_write(adapter, c, c != left, 1, val);
900 		if (ret)
901 			goto unlock;
902 	}
903 	ret = flash_wait_op(adapter, 8, 1);
904 	if (ret)
905 		goto unlock;
906 
907 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
908 
909 	/* Read the page to verify the write succeeded */
910 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
911 			    byte_oriented);
912 	if (ret)
913 		return ret;
914 
915 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
916 		CH_ERR(adapter, "failed to correctly write the flash page "
917 		       "at %#x\n", addr);
918 		return -EIO;
919 	}
920 	return 0;
921 
922 unlock:
923 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
924 	return ret;
925 }
926 
927 /**
928  *	t4_get_fw_version - read the firmware version
929  *	@adapter: the adapter
930  *	@vers: where to place the version
931  *
932  *	Reads the FW version from flash.
933  */
934 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
935 {
936 	return t4_read_flash(adapter,
937 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
938 			     vers, 0);
939 }
940 
941 /**
942  *	t4_get_tp_version - read the TP microcode version
943  *	@adapter: the adapter
944  *	@vers: where to place the version
945  *
946  *	Reads the TP microcode version from flash.
947  */
948 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
949 {
950 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
951 							      tp_microcode_ver),
952 			     1, vers, 0);
953 }
954 
955 /**
956  *	t4_check_fw_version - check if the FW is compatible with this driver
957  *	@adapter: the adapter
958  *
959  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
960  *	if there's exact match, a negative error if the version could not be
961  *	read or there's a major version mismatch, and a positive value if the
962  *	expected major version is found but there's a minor version mismatch.
963  */
964 int t4_check_fw_version(struct adapter *adapter)
965 {
966 	int ret, major, minor, micro;
967 	int exp_major, exp_minor, exp_micro;
968 
969 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
970 	if (!ret)
971 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
972 	if (ret)
973 		return ret;
974 
975 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
976 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
977 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
978 
979 	switch (chip_id(adapter)) {
980 	case CHELSIO_T4:
981 		exp_major = T4FW_VERSION_MAJOR;
982 		exp_minor = T4FW_VERSION_MINOR;
983 		exp_micro = T4FW_VERSION_MICRO;
984 		break;
985 	case CHELSIO_T5:
986 		exp_major = T5FW_VERSION_MAJOR;
987 		exp_minor = T5FW_VERSION_MINOR;
988 		exp_micro = T5FW_VERSION_MICRO;
989 		break;
990 	default:
991 		CH_ERR(adapter, "Unsupported chip type, %x\n",
992 		    chip_id(adapter));
993 		return -EINVAL;
994 	}
995 
996 	if (major != exp_major) {            /* major mismatch - fail */
997 		CH_ERR(adapter, "card FW has major version %u, driver wants "
998 		       "%u\n", major, exp_major);
999 		return -EINVAL;
1000 	}
1001 
1002 	if (minor == exp_minor && micro == exp_micro)
1003 		return 0;                                   /* perfect match */
1004 
1005 	/* Minor/micro version mismatch.  Report it but often it's OK. */
1006 	return 1;
1007 }
1008 
1009 /**
1010  *	t4_flash_erase_sectors - erase a range of flash sectors
1011  *	@adapter: the adapter
1012  *	@start: the first sector to erase
1013  *	@end: the last sector to erase
1014  *
1015  *	Erases the sectors in the given inclusive range.
1016  */
1017 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1018 {
1019 	int ret = 0;
1020 
1021 	while (start <= end) {
1022 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1023 		    (ret = sf1_write(adapter, 4, 0, 1,
1024 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1025 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1026 			CH_ERR(adapter, "erase of flash sector %d failed, "
1027 			       "error %d\n", start, ret);
1028 			break;
1029 		}
1030 		start++;
1031 	}
1032 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1033 	return ret;
1034 }
1035 
1036 /**
1037  *	t4_flash_cfg_addr - return the address of the flash configuration file
1038  *	@adapter: the adapter
1039  *
1040  *	Return the address within the flash where the Firmware Configuration
1041  *	File is stored, or an error if the device FLASH is too small to contain
1042  *	a Firmware Configuration File.
1043  */
1044 int t4_flash_cfg_addr(struct adapter *adapter)
1045 {
1046 	/*
1047 	 * If the device FLASH isn't large enough to hold a Firmware
1048 	 * Configuration File, return an error.
1049 	 */
1050 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1051 		return -ENOSPC;
1052 
1053 	return FLASH_CFG_START;
1054 }
1055 
1056 /**
1057  *	t4_load_cfg - download config file
1058  *	@adap: the adapter
1059  *	@cfg_data: the cfg text file to write
1060  *	@size: text file size
1061  *
1062  *	Write the supplied config text file to the card's serial flash.
1063  */
1064 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1065 {
1066 	int ret, i, n, cfg_addr;
1067 	unsigned int addr;
1068 	unsigned int flash_cfg_start_sec;
1069 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1070 
1071 	cfg_addr = t4_flash_cfg_addr(adap);
1072 	if (cfg_addr < 0)
1073 		return cfg_addr;
1074 
1075 	addr = cfg_addr;
1076 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1077 
1078 	if (size > FLASH_CFG_MAX_SIZE) {
1079 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1080 		       FLASH_CFG_MAX_SIZE);
1081 		return -EFBIG;
1082 	}
1083 
1084 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1085 			 sf_sec_size);
1086 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1087 				     flash_cfg_start_sec + i - 1);
1088 	/*
1089 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1090 	 * with the on-adapter Firmware Configuration File.
1091 	 */
1092 	if (ret || size == 0)
1093 		goto out;
1094 
1095 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1096 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1097 		if ( (size - i) <  SF_PAGE_SIZE)
1098 			n = size - i;
1099 		else
1100 			n = SF_PAGE_SIZE;
1101 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1102 		if (ret)
1103 			goto out;
1104 
1105 		addr += SF_PAGE_SIZE;
1106 		cfg_data += SF_PAGE_SIZE;
1107 	}
1108 
1109 out:
1110 	if (ret)
1111 		CH_ERR(adap, "config file %s failed %d\n",
1112 		       (size == 0 ? "clear" : "download"), ret);
1113 	return ret;
1114 }
1115 
1116 
1117 /**
1118  *	t4_load_fw - download firmware
1119  *	@adap: the adapter
1120  *	@fw_data: the firmware image to write
1121  *	@size: image size
1122  *
1123  *	Write the supplied firmware image to the card's serial flash.
1124  */
1125 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1126 {
1127 	u32 csum;
1128 	int ret, addr;
1129 	unsigned int i;
1130 	u8 first_page[SF_PAGE_SIZE];
1131 	const u32 *p = (const u32 *)fw_data;
1132 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1133 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1134 	unsigned int fw_start_sec;
1135 	unsigned int fw_start;
1136 	unsigned int fw_size;
1137 
1138 	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1139 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1140 		fw_start = FLASH_FWBOOTSTRAP_START;
1141 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1142 	} else {
1143 		fw_start_sec = FLASH_FW_START_SEC;
1144  		fw_start = FLASH_FW_START;
1145 		fw_size = FLASH_FW_MAX_SIZE;
1146 	}
1147 	if (!size) {
1148 		CH_ERR(adap, "FW image has no data\n");
1149 		return -EINVAL;
1150 	}
1151 	if (size & 511) {
1152 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1153 		return -EINVAL;
1154 	}
1155 	if (ntohs(hdr->len512) * 512 != size) {
1156 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1157 		return -EINVAL;
1158 	}
1159 	if (size > fw_size) {
1160 		CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1161 		return -EFBIG;
1162 	}
1163 	if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1164 	    (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1165 		CH_ERR(adap,
1166 		    "FW image (%d) is not suitable for this adapter (%d)\n",
1167 		    hdr->chip, chip_id(adap));
1168 		return -EINVAL;
1169 	}
1170 
1171 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1172 		csum += ntohl(p[i]);
1173 
1174 	if (csum != 0xffffffff) {
1175 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1176 		       csum);
1177 		return -EINVAL;
1178 	}
1179 
1180 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1181 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1182 	if (ret)
1183 		goto out;
1184 
1185 	/*
1186 	 * We write the correct version at the end so the driver can see a bad
1187 	 * version if the FW write fails.  Start by writing a copy of the
1188 	 * first page with a bad version.
1189 	 */
1190 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1191 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1192 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1193 	if (ret)
1194 		goto out;
1195 
1196 	addr = fw_start;
1197 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1198 		addr += SF_PAGE_SIZE;
1199 		fw_data += SF_PAGE_SIZE;
1200 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1201 		if (ret)
1202 			goto out;
1203 	}
1204 
1205 	ret = t4_write_flash(adap,
1206 			     fw_start + offsetof(struct fw_hdr, fw_ver),
1207 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1208 out:
1209 	if (ret)
1210 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1211 	return ret;
1212 }
1213 
1214 /* BIOS boot headers */
1215 typedef struct pci_expansion_rom_header {
1216 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1217 	u8	reserved[22]; /* Reserved per processor Architecture data */
1218 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1219 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1220 
1221 /* Legacy PCI Expansion ROM Header */
1222 typedef struct legacy_pci_expansion_rom_header {
1223 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1224 	u8	size512; /* Current Image Size in units of 512 bytes */
1225 	u8	initentry_point[4];
1226 	u8	cksum; /* Checksum computed on the entire Image */
1227 	u8	reserved[16]; /* Reserved */
1228 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1229 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1230 
1231 /* EFI PCI Expansion ROM Header */
1232 typedef struct efi_pci_expansion_rom_header {
1233 	u8	signature[2]; // ROM signature. The value 0xaa55
1234 	u8	initialization_size[2]; /* Units 512. Includes this header */
1235 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1236 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1237 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1238 	u8	compression_type[2]; /* Compression type. */
1239 		/*
1240 		 * Compression type definition
1241 		 * 0x0: uncompressed
1242 		 * 0x1: Compressed
1243 		 * 0x2-0xFFFF: Reserved
1244 		 */
1245 	u8	reserved[8]; /* Reserved */
1246 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1247 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1248 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1249 
1250 /* PCI Data Structure Format */
1251 typedef struct pcir_data_structure { /* PCI Data Structure */
1252 	u8	signature[4]; /* Signature. The string "PCIR" */
1253 	u8	vendor_id[2]; /* Vendor Identification */
1254 	u8	device_id[2]; /* Device Identification */
1255 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1256 	u8	length[2]; /* PCIR Data Structure Length */
1257 	u8	revision; /* PCIR Data Structure Revision */
1258 	u8	class_code[3]; /* Class Code */
1259 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1260 	u8	code_revision[2]; /* Revision Level of Code/Data */
1261 	u8	code_type; /* Code Type. */
1262 		/*
1263 		 * PCI Expansion ROM Code Types
1264 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1265 		 * 0x01: Open Firmware standard for PCI. FCODE
1266 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1267 		 * 0x03: EFI Image. EFI
1268 		 * 0x04-0xFF: Reserved.
1269 		 */
1270 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1271 	u8	reserved[2]; /* Reserved */
1272 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1273 
1274 /* BOOT constants */
1275 enum {
1276 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1277 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1278 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1279 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1280 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1281 	VENDOR_ID = 0x1425, /* Vendor ID */
1282 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1283 };
1284 
1285 /*
1286  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1287  *	@adatper: the device ID to write.
1288  *	@boot_data: the boot image to modify.
1289  *
1290  *	Write the supplied device ID to the boot BIOS image.
1291  */
1292 static void modify_device_id(int device_id, u8 *boot_data)
1293 {
1294 	legacy_pci_exp_rom_header_t *header;
1295 	pcir_data_t *pcir_header;
1296 	u32 cur_header = 0;
1297 
1298 	/*
1299 	 * Loop through all chained images and change the device ID's
1300 	 */
1301 	while (1) {
1302 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1303 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1304 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1305 
1306 		/*
1307 		 * Only modify the Device ID if code type is Legacy or HP.
1308 		 * 0x00: Okay to modify
1309 		 * 0x01: FCODE. Do not be modify
1310 		 * 0x03: Okay to modify
1311 		 * 0x04-0xFF: Do not modify
1312 		 */
1313 		if (pcir_header->code_type == 0x00) {
1314 			u8 csum = 0;
1315 			int i;
1316 
1317 			/*
1318 			 * Modify Device ID to match current adatper
1319 			 */
1320 			*(u16*) pcir_header->device_id = device_id;
1321 
1322 			/*
1323 			 * Set checksum temporarily to 0.
1324 			 * We will recalculate it later.
1325 			 */
1326 			header->cksum = 0x0;
1327 
1328 			/*
1329 			 * Calculate and update checksum
1330 			 */
1331 			for (i = 0; i < (header->size512 * 512); i++)
1332 				csum += (u8)boot_data[cur_header + i];
1333 
1334 			/*
1335 			 * Invert summed value to create the checksum
1336 			 * Writing new checksum value directly to the boot data
1337 			 */
1338 			boot_data[cur_header + 7] = -csum;
1339 
1340 		} else if (pcir_header->code_type == 0x03) {
1341 
1342 			/*
1343 			 * Modify Device ID to match current adatper
1344 			 */
1345 			*(u16*) pcir_header->device_id = device_id;
1346 
1347 		}
1348 
1349 
1350 		/*
1351 		 * Check indicator element to identify if this is the last
1352 		 * image in the ROM.
1353 		 */
1354 		if (pcir_header->indicator & 0x80)
1355 			break;
1356 
1357 		/*
1358 		 * Move header pointer up to the next image in the ROM.
1359 		 */
1360 		cur_header += header->size512 * 512;
1361 	}
1362 }
1363 
1364 /*
1365  *	t4_load_boot - download boot flash
1366  *	@adapter: the adapter
1367  *	@boot_data: the boot image to write
1368  *	@boot_addr: offset in flash to write boot_data
1369  *	@size: image size
1370  *
1371  *	Write the supplied boot image to the card's serial flash.
1372  *	The boot image has the following sections: a 28-byte header and the
1373  *	boot image.
1374  */
1375 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1376 		 unsigned int boot_addr, unsigned int size)
1377 {
1378 	pci_exp_rom_header_t *header;
1379 	int pcir_offset ;
1380 	pcir_data_t *pcir_header;
1381 	int ret, addr;
1382 	uint16_t device_id;
1383 	unsigned int i;
1384 	unsigned int boot_sector = boot_addr * 1024;
1385 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1386 
1387 	/*
1388 	 * Make sure the boot image does not encroach on the firmware region
1389 	 */
1390 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1391 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1392 		return -EFBIG;
1393 	}
1394 
1395 	/*
1396 	 * Number of sectors spanned
1397 	 */
1398 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1399 			sf_sec_size);
1400 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1401 				     (boot_sector >> 16) + i - 1);
1402 
1403 	/*
1404 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1405 	 * with the on-adapter option ROM file
1406 	 */
1407 	if (ret || (size == 0))
1408 		goto out;
1409 
1410 	/* Get boot header */
1411 	header = (pci_exp_rom_header_t *)boot_data;
1412 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1413 	/* PCIR Data Structure */
1414 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1415 
1416 	/*
1417 	 * Perform some primitive sanity testing to avoid accidentally
1418 	 * writing garbage over the boot sectors.  We ought to check for
1419 	 * more but it's not worth it for now ...
1420 	 */
1421 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1422 		CH_ERR(adap, "boot image too small/large\n");
1423 		return -EFBIG;
1424 	}
1425 
1426 	/*
1427 	 * Check BOOT ROM header signature
1428 	 */
1429 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1430 		CH_ERR(adap, "Boot image missing signature\n");
1431 		return -EINVAL;
1432 	}
1433 
1434 	/*
1435 	 * Check PCI header signature
1436 	 */
1437 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1438 		CH_ERR(adap, "PCI header missing signature\n");
1439 		return -EINVAL;
1440 	}
1441 
1442 	/*
1443 	 * Check Vendor ID matches Chelsio ID
1444 	 */
1445 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1446 		CH_ERR(adap, "Vendor ID missing signature\n");
1447 		return -EINVAL;
1448 	}
1449 
1450 	/*
1451 	 * Retrieve adapter's device ID
1452 	 */
1453 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1454 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1455 	device_id = (device_id & 0xff) | 0x4000;
1456 
1457 	/*
1458 	 * Check PCIE Device ID
1459 	 */
1460 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1461 		/*
1462 		 * Change the device ID in the Boot BIOS image to match
1463 		 * the Device ID of the current adapter.
1464 		 */
1465 		modify_device_id(device_id, boot_data);
1466 	}
1467 
1468 	/*
1469 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1470 	 * we finish copying the rest of the boot image. This will ensure
1471 	 * that the BIOS boot header will only be written if the boot image
1472 	 * was written in full.
1473 	 */
1474 	addr = boot_sector;
1475 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1476 		addr += SF_PAGE_SIZE;
1477 		boot_data += SF_PAGE_SIZE;
1478 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1479 		if (ret)
1480 			goto out;
1481 	}
1482 
1483 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1484 
1485 out:
1486 	if (ret)
1487 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1488 	return ret;
1489 }
1490 
1491 /**
1492  *	t4_read_cimq_cfg - read CIM queue configuration
1493  *	@adap: the adapter
1494  *	@base: holds the queue base addresses in bytes
1495  *	@size: holds the queue sizes in bytes
1496  *	@thres: holds the queue full thresholds in bytes
1497  *
1498  *	Returns the current configuration of the CIM queues, starting with
1499  *	the IBQs, then the OBQs.
1500  */
1501 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1502 {
1503 	unsigned int i, v;
1504 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1505 
1506 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1507 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1508 			     V_QUENUMSELECT(i));
1509 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1510 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1511 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1512 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1513 	}
1514 	for (i = 0; i < cim_num_obq; i++) {
1515 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1516 			     V_QUENUMSELECT(i));
1517 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1518 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1519 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1520 	}
1521 }
1522 
1523 /**
1524  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1525  *	@adap: the adapter
1526  *	@qid: the queue index
1527  *	@data: where to store the queue contents
1528  *	@n: capacity of @data in 32-bit words
1529  *
1530  *	Reads the contents of the selected CIM queue starting at address 0 up
1531  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1532  *	error and the number of 32-bit words actually read on success.
1533  */
1534 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1535 {
1536 	int i, err;
1537 	unsigned int addr;
1538 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1539 
1540 	if (qid > 5 || (n & 3))
1541 		return -EINVAL;
1542 
1543 	addr = qid * nwords;
1544 	if (n > nwords)
1545 		n = nwords;
1546 
1547 	for (i = 0; i < n; i++, addr++) {
1548 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1549 			     F_IBQDBGEN);
1550 		/*
1551 		 * It might take 3-10ms before the IBQ debug read access is
1552 		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1553 		 */
1554 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1555 				      1000000, 1);
1556 		if (err)
1557 			return err;
1558 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1559 	}
1560 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1561 	return i;
1562 }
1563 
1564 /**
1565  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1566  *	@adap: the adapter
1567  *	@qid: the queue index
1568  *	@data: where to store the queue contents
1569  *	@n: capacity of @data in 32-bit words
1570  *
1571  *	Reads the contents of the selected CIM queue starting at address 0 up
1572  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1573  *	error and the number of 32-bit words actually read on success.
1574  */
1575 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1576 {
1577 	int i, err;
1578 	unsigned int addr, v, nwords;
1579 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1580 
1581 	if (qid >= cim_num_obq || (n & 3))
1582 		return -EINVAL;
1583 
1584 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1585 		     V_QUENUMSELECT(qid));
1586 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1587 
1588 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1589 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1590 	if (n > nwords)
1591 		n = nwords;
1592 
1593 	for (i = 0; i < n; i++, addr++) {
1594 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1595 			     F_OBQDBGEN);
1596 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1597 				      2, 1);
1598 		if (err)
1599 			return err;
1600 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1601 	}
1602 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1603 	return i;
1604 }
1605 
1606 enum {
1607 	CIM_QCTL_BASE     = 0,
1608 	CIM_CTL_BASE      = 0x2000,
1609 	CIM_PBT_ADDR_BASE = 0x2800,
1610 	CIM_PBT_LRF_BASE  = 0x3000,
1611 	CIM_PBT_DATA_BASE = 0x3800
1612 };
1613 
1614 /**
1615  *	t4_cim_read - read a block from CIM internal address space
1616  *	@adap: the adapter
1617  *	@addr: the start address within the CIM address space
1618  *	@n: number of words to read
1619  *	@valp: where to store the result
1620  *
1621  *	Reads a block of 4-byte words from the CIM intenal address space.
1622  */
1623 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1624 		unsigned int *valp)
1625 {
1626 	int ret = 0;
1627 
1628 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1629 		return -EBUSY;
1630 
1631 	for ( ; !ret && n--; addr += 4) {
1632 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1633 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1634 				      0, 5, 2);
1635 		if (!ret)
1636 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1637 	}
1638 	return ret;
1639 }
1640 
1641 /**
1642  *	t4_cim_write - write a block into CIM internal address space
1643  *	@adap: the adapter
1644  *	@addr: the start address within the CIM address space
1645  *	@n: number of words to write
1646  *	@valp: set of values to write
1647  *
1648  *	Writes a block of 4-byte words into the CIM intenal address space.
1649  */
1650 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1651 		 const unsigned int *valp)
1652 {
1653 	int ret = 0;
1654 
1655 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1656 		return -EBUSY;
1657 
1658 	for ( ; !ret && n--; addr += 4) {
1659 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1660 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1661 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1662 				      0, 5, 2);
1663 	}
1664 	return ret;
1665 }
1666 
1667 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1668 {
1669 	return t4_cim_write(adap, addr, 1, &val);
1670 }
1671 
1672 /**
1673  *	t4_cim_ctl_read - read a block from CIM control region
1674  *	@adap: the adapter
1675  *	@addr: the start address within the CIM control region
1676  *	@n: number of words to read
1677  *	@valp: where to store the result
1678  *
1679  *	Reads a block of 4-byte words from the CIM control region.
1680  */
1681 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1682 		    unsigned int *valp)
1683 {
1684 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1685 }
1686 
1687 /**
1688  *	t4_cim_read_la - read CIM LA capture buffer
1689  *	@adap: the adapter
1690  *	@la_buf: where to store the LA data
1691  *	@wrptr: the HW write pointer within the capture buffer
1692  *
1693  *	Reads the contents of the CIM LA buffer with the most recent entry at
1694  *	the end	of the returned data and with the entry at @wrptr first.
1695  *	We try to leave the LA in the running state we find it in.
1696  */
1697 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1698 {
1699 	int i, ret;
1700 	unsigned int cfg, val, idx;
1701 
1702 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1703 	if (ret)
1704 		return ret;
1705 
1706 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1707 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1708 		if (ret)
1709 			return ret;
1710 	}
1711 
1712 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1713 	if (ret)
1714 		goto restart;
1715 
1716 	idx = G_UPDBGLAWRPTR(val);
1717 	if (wrptr)
1718 		*wrptr = idx;
1719 
1720 	for (i = 0; i < adap->params.cim_la_size; i++) {
1721 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1722 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1723 		if (ret)
1724 			break;
1725 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1726 		if (ret)
1727 			break;
1728 		if (val & F_UPDBGLARDEN) {
1729 			ret = -ETIMEDOUT;
1730 			break;
1731 		}
1732 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1733 		if (ret)
1734 			break;
1735 		idx = (idx + 1) & M_UPDBGLARDPTR;
1736 	}
1737 restart:
1738 	if (cfg & F_UPDBGLAEN) {
1739 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1740 				      cfg & ~F_UPDBGLARDEN);
1741 		if (!ret)
1742 			ret = r;
1743 	}
1744 	return ret;
1745 }
1746 
1747 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1748 			unsigned int *pif_req_wrptr,
1749 			unsigned int *pif_rsp_wrptr)
1750 {
1751 	int i, j;
1752 	u32 cfg, val, req, rsp;
1753 
1754 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1755 	if (cfg & F_LADBGEN)
1756 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1757 
1758 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1759 	req = G_POLADBGWRPTR(val);
1760 	rsp = G_PILADBGWRPTR(val);
1761 	if (pif_req_wrptr)
1762 		*pif_req_wrptr = req;
1763 	if (pif_rsp_wrptr)
1764 		*pif_rsp_wrptr = rsp;
1765 
1766 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1767 		for (j = 0; j < 6; j++) {
1768 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1769 				     V_PILADBGRDPTR(rsp));
1770 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1771 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1772 			req++;
1773 			rsp++;
1774 		}
1775 		req = (req + 2) & M_POLADBGRDPTR;
1776 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1777 	}
1778 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1779 }
1780 
1781 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1782 {
1783 	u32 cfg;
1784 	int i, j, idx;
1785 
1786 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1787 	if (cfg & F_LADBGEN)
1788 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1789 
1790 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1791 		for (j = 0; j < 5; j++) {
1792 			idx = 8 * i + j;
1793 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1794 				     V_PILADBGRDPTR(idx));
1795 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1796 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1797 		}
1798 	}
1799 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1800 }
1801 
1802 /**
1803  *	t4_tp_read_la - read TP LA capture buffer
1804  *	@adap: the adapter
1805  *	@la_buf: where to store the LA data
1806  *	@wrptr: the HW write pointer within the capture buffer
1807  *
1808  *	Reads the contents of the TP LA buffer with the most recent entry at
1809  *	the end	of the returned data and with the entry at @wrptr first.
1810  *	We leave the LA in the running state we find it in.
1811  */
1812 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1813 {
1814 	bool last_incomplete;
1815 	unsigned int i, cfg, val, idx;
1816 
1817 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1818 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1819 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1820 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1821 
1822 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1823 	idx = G_DBGLAWPTR(val);
1824 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1825 	if (last_incomplete)
1826 		idx = (idx + 1) & M_DBGLARPTR;
1827 	if (wrptr)
1828 		*wrptr = idx;
1829 
1830 	val &= 0xffff;
1831 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1832 	val |= adap->params.tp.la_mask;
1833 
1834 	for (i = 0; i < TPLA_SIZE; i++) {
1835 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1836 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1837 		idx = (idx + 1) & M_DBGLARPTR;
1838 	}
1839 
1840 	/* Wipe out last entry if it isn't valid */
1841 	if (last_incomplete)
1842 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1843 
1844 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1845 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1846 			     cfg | adap->params.tp.la_mask);
1847 }
1848 
1849 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1850 {
1851 	unsigned int i, j;
1852 
1853 	for (i = 0; i < 8; i++) {
1854 		u32 *p = la_buf + i;
1855 
1856 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1857 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1858 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1859 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1860 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1861 	}
1862 }
1863 
1864 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1865 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1866 		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1867 
1868 /**
1869  *	t4_link_start - apply link configuration to MAC/PHY
1870  *	@phy: the PHY to setup
1871  *	@mac: the MAC to setup
1872  *	@lc: the requested link configuration
1873  *
1874  *	Set up a port's MAC and PHY according to a desired link configuration.
1875  *	- If the PHY can auto-negotiate first decide what to advertise, then
1876  *	  enable/disable auto-negotiation as desired, and reset.
1877  *	- If the PHY does not auto-negotiate just reset it.
1878  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1879  *	  otherwise do it later based on the outcome of auto-negotiation.
1880  */
1881 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1882 		  struct link_config *lc)
1883 {
1884 	struct fw_port_cmd c;
1885 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1886 
1887 	lc->link_ok = 0;
1888 	if (lc->requested_fc & PAUSE_RX)
1889 		fc |= FW_PORT_CAP_FC_RX;
1890 	if (lc->requested_fc & PAUSE_TX)
1891 		fc |= FW_PORT_CAP_FC_TX;
1892 
1893 	memset(&c, 0, sizeof(c));
1894 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1895 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1896 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1897 				  FW_LEN16(c));
1898 
1899 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1900 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1901 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1902 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1903 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1904 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1905 	} else
1906 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1907 
1908 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1909 }
1910 
1911 /**
1912  *	t4_restart_aneg - restart autonegotiation
1913  *	@adap: the adapter
1914  *	@mbox: mbox to use for the FW command
1915  *	@port: the port id
1916  *
1917  *	Restarts autonegotiation for the selected port.
1918  */
1919 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1920 {
1921 	struct fw_port_cmd c;
1922 
1923 	memset(&c, 0, sizeof(c));
1924 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1925 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1926 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1927 				  FW_LEN16(c));
1928 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1929 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1930 }
1931 
1932 struct intr_info {
1933 	unsigned int mask;       /* bits to check in interrupt status */
1934 	const char *msg;         /* message to print or NULL */
1935 	short stat_idx;          /* stat counter to increment or -1 */
1936 	unsigned short fatal;    /* whether the condition reported is fatal */
1937 };
1938 
1939 /**
1940  *	t4_handle_intr_status - table driven interrupt handler
1941  *	@adapter: the adapter that generated the interrupt
1942  *	@reg: the interrupt status register to process
1943  *	@acts: table of interrupt actions
1944  *
1945  *	A table driven interrupt handler that applies a set of masks to an
1946  *	interrupt status word and performs the corresponding actions if the
1947  *	interrupts described by the mask have occured.  The actions include
1948  *	optionally emitting a warning or alert message.  The table is terminated
1949  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1950  *	conditions.
1951  */
1952 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1953 				 const struct intr_info *acts)
1954 {
1955 	int fatal = 0;
1956 	unsigned int mask = 0;
1957 	unsigned int status = t4_read_reg(adapter, reg);
1958 
1959 	for ( ; acts->mask; ++acts) {
1960 		if (!(status & acts->mask))
1961 			continue;
1962 		if (acts->fatal) {
1963 			fatal++;
1964 			CH_ALERT(adapter, "%s (0x%x)\n",
1965 				 acts->msg, status & acts->mask);
1966 		} else if (acts->msg)
1967 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1968 					  acts->msg, status & acts->mask);
1969 		mask |= acts->mask;
1970 	}
1971 	status &= mask;
1972 	if (status)                           /* clear processed interrupts */
1973 		t4_write_reg(adapter, reg, status);
1974 	return fatal;
1975 }
1976 
1977 /*
1978  * Interrupt handler for the PCIE module.
1979  */
1980 static void pcie_intr_handler(struct adapter *adapter)
1981 {
1982 	static struct intr_info sysbus_intr_info[] = {
1983 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1984 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1985 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1986 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1987 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1988 		{ 0 }
1989 	};
1990 	static struct intr_info pcie_port_intr_info[] = {
1991 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1992 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1993 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1994 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1995 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1996 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1997 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1998 		{ F_RDPE, "Rx data parity error", -1, 1 },
1999 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
2000 		{ 0 }
2001 	};
2002 	static struct intr_info pcie_intr_info[] = {
2003 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2004 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2005 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2006 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2007 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2008 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2009 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2010 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2011 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2012 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2013 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2014 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2015 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2016 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2017 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2018 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2019 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2020 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2021 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2022 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2023 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2024 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2025 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2026 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2027 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2028 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2029 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2030 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2031 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2032 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2033 		  0 },
2034 		{ 0 }
2035 	};
2036 
2037 	static struct intr_info t5_pcie_intr_info[] = {
2038 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2039 		  -1, 1 },
2040 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2041 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2042 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2043 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2044 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2045 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2046 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2047 		  -1, 1 },
2048 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2049 		  -1, 1 },
2050 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2051 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2052 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2053 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2054 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2055 		  -1, 1 },
2056 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2057 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2058 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2059 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2060 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2061 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2062 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2063 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2064 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2065 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2066 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2067 		  -1, 1 },
2068 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2069 		  -1, 1 },
2070 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2071 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2072 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2073 		{ F_READRSPERR, "Outbound read error", -1,
2074 		  0 },
2075 		{ 0 }
2076 	};
2077 
2078 	int fat;
2079 
2080 	fat = t4_handle_intr_status(adapter,
2081 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2082 				    sysbus_intr_info) +
2083 	      t4_handle_intr_status(adapter,
2084 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2085 				    pcie_port_intr_info) +
2086 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2087 				    is_t4(adapter) ?
2088 				    pcie_intr_info : t5_pcie_intr_info);
2089 	if (fat)
2090 		t4_fatal_err(adapter);
2091 }
2092 
2093 /*
2094  * TP interrupt handler.
2095  */
2096 static void tp_intr_handler(struct adapter *adapter)
2097 {
2098 	static struct intr_info tp_intr_info[] = {
2099 		{ 0x3fffffff, "TP parity error", -1, 1 },
2100 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2101 		{ 0 }
2102 	};
2103 
2104 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2105 		t4_fatal_err(adapter);
2106 }
2107 
2108 /*
2109  * SGE interrupt handler.
2110  */
2111 static void sge_intr_handler(struct adapter *adapter)
2112 {
2113 	u64 v;
2114 	u32 err;
2115 
2116 	static struct intr_info sge_intr_info[] = {
2117 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2118 		  "SGE received CPL exceeding IQE size", -1, 1 },
2119 		{ F_ERR_INVALID_CIDX_INC,
2120 		  "SGE GTS CIDX increment too large", -1, 0 },
2121 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2122 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2123 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2124 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2125 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2126 		  0 },
2127 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2128 		  0 },
2129 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2130 		  0 },
2131 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2132 		  0 },
2133 		{ F_ERR_ING_CTXT_PRIO,
2134 		  "SGE too many priority ingress contexts", -1, 0 },
2135 		{ F_ERR_EGR_CTXT_PRIO,
2136 		  "SGE too many priority egress contexts", -1, 0 },
2137 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2138 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2139 		{ 0 }
2140 	};
2141 
2142 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2143 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2144 	if (v) {
2145 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2146 			 (unsigned long long)v);
2147 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2148 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2149 	}
2150 
2151 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2152 
2153 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2154 	if (err & F_ERROR_QID_VALID) {
2155 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2156 		if (err & F_UNCAPTURED_ERROR)
2157 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2158 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2159 			     F_UNCAPTURED_ERROR);
2160 	}
2161 
2162 	if (v != 0)
2163 		t4_fatal_err(adapter);
2164 }
2165 
2166 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2167 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2168 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2169 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2170 
2171 /*
2172  * CIM interrupt handler.
2173  */
2174 static void cim_intr_handler(struct adapter *adapter)
2175 {
2176 	static struct intr_info cim_intr_info[] = {
2177 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2178 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2179 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2180 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2181 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2182 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2183 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2184 		{ 0 }
2185 	};
2186 	static struct intr_info cim_upintr_info[] = {
2187 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2188 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2189 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2190 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2191 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2192 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2193 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2194 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2195 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2196 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2197 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2198 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2199 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2200 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2201 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2202 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2203 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2204 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2205 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2206 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2207 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2208 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2209 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2210 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2211 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2212 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2213 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2214 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2215 		{ 0 }
2216 	};
2217 	int fat;
2218 
2219 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2220 		t4_report_fw_error(adapter);
2221 
2222 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2223 				    cim_intr_info) +
2224 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2225 				    cim_upintr_info);
2226 	if (fat)
2227 		t4_fatal_err(adapter);
2228 }
2229 
2230 /*
2231  * ULP RX interrupt handler.
2232  */
2233 static void ulprx_intr_handler(struct adapter *adapter)
2234 {
2235 	static struct intr_info ulprx_intr_info[] = {
2236 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2237 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2238 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2239 		{ 0 }
2240 	};
2241 
2242 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2243 		t4_fatal_err(adapter);
2244 }
2245 
2246 /*
2247  * ULP TX interrupt handler.
2248  */
2249 static void ulptx_intr_handler(struct adapter *adapter)
2250 {
2251 	static struct intr_info ulptx_intr_info[] = {
2252 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2253 		  0 },
2254 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2255 		  0 },
2256 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2257 		  0 },
2258 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2259 		  0 },
2260 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2261 		{ 0 }
2262 	};
2263 
2264 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2265 		t4_fatal_err(adapter);
2266 }
2267 
2268 /*
2269  * PM TX interrupt handler.
2270  */
2271 static void pmtx_intr_handler(struct adapter *adapter)
2272 {
2273 	static struct intr_info pmtx_intr_info[] = {
2274 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2275 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2276 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2277 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2278 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2279 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2280 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2281 		  1 },
2282 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2283 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2284 		{ 0 }
2285 	};
2286 
2287 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2288 		t4_fatal_err(adapter);
2289 }
2290 
2291 /*
2292  * PM RX interrupt handler.
2293  */
2294 static void pmrx_intr_handler(struct adapter *adapter)
2295 {
2296 	static struct intr_info pmrx_intr_info[] = {
2297 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2298 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2299 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2300 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2301 		  1 },
2302 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2303 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2304 		{ 0 }
2305 	};
2306 
2307 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2308 		t4_fatal_err(adapter);
2309 }
2310 
2311 /*
2312  * CPL switch interrupt handler.
2313  */
2314 static void cplsw_intr_handler(struct adapter *adapter)
2315 {
2316 	static struct intr_info cplsw_intr_info[] = {
2317 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2318 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2319 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2320 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2321 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2322 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2323 		{ 0 }
2324 	};
2325 
2326 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2327 		t4_fatal_err(adapter);
2328 }
2329 
2330 /*
2331  * LE interrupt handler.
2332  */
2333 static void le_intr_handler(struct adapter *adap)
2334 {
2335 	static struct intr_info le_intr_info[] = {
2336 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2337 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2338 		{ F_PARITYERR, "LE parity error", -1, 1 },
2339 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2340 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2341 		{ 0 }
2342 	};
2343 
2344 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2345 		t4_fatal_err(adap);
2346 }
2347 
2348 /*
2349  * MPS interrupt handler.
2350  */
2351 static void mps_intr_handler(struct adapter *adapter)
2352 {
2353 	static struct intr_info mps_rx_intr_info[] = {
2354 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2355 		{ 0 }
2356 	};
2357 	static struct intr_info mps_tx_intr_info[] = {
2358 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2359 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2360 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2361 		  -1, 1 },
2362 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2363 		  -1, 1 },
2364 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2365 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2366 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2367 		{ 0 }
2368 	};
2369 	static struct intr_info mps_trc_intr_info[] = {
2370 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2371 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2372 		  1 },
2373 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2374 		{ 0 }
2375 	};
2376 	static struct intr_info mps_stat_sram_intr_info[] = {
2377 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2378 		{ 0 }
2379 	};
2380 	static struct intr_info mps_stat_tx_intr_info[] = {
2381 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2382 		{ 0 }
2383 	};
2384 	static struct intr_info mps_stat_rx_intr_info[] = {
2385 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2386 		{ 0 }
2387 	};
2388 	static struct intr_info mps_cls_intr_info[] = {
2389 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2390 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2391 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2392 		{ 0 }
2393 	};
2394 
2395 	int fat;
2396 
2397 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2398 				    mps_rx_intr_info) +
2399 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2400 				    mps_tx_intr_info) +
2401 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2402 				    mps_trc_intr_info) +
2403 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2404 				    mps_stat_sram_intr_info) +
2405 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2406 				    mps_stat_tx_intr_info) +
2407 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2408 				    mps_stat_rx_intr_info) +
2409 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2410 				    mps_cls_intr_info);
2411 
2412 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2413 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2414 	if (fat)
2415 		t4_fatal_err(adapter);
2416 }
2417 
2418 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2419 
2420 /*
2421  * EDC/MC interrupt handler.
2422  */
2423 static void mem_intr_handler(struct adapter *adapter, int idx)
2424 {
2425 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2426 
2427 	unsigned int addr, cnt_addr, v;
2428 
2429 	if (idx <= MEM_EDC1) {
2430 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2431 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2432 	} else {
2433 		if (is_t4(adapter)) {
2434 			addr = A_MC_INT_CAUSE;
2435 			cnt_addr = A_MC_ECC_STATUS;
2436 		} else {
2437 			addr = A_MC_P_INT_CAUSE;
2438 			cnt_addr = A_MC_P_ECC_STATUS;
2439 		}
2440 	}
2441 
2442 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2443 	if (v & F_PERR_INT_CAUSE)
2444 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2445 	if (v & F_ECC_CE_INT_CAUSE) {
2446 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2447 
2448 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2449 		CH_WARN_RATELIMIT(adapter,
2450 				  "%u %s correctable ECC data error%s\n",
2451 				  cnt, name[idx], cnt > 1 ? "s" : "");
2452 	}
2453 	if (v & F_ECC_UE_INT_CAUSE)
2454 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2455 			 name[idx]);
2456 
2457 	t4_write_reg(adapter, addr, v);
2458 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2459 		t4_fatal_err(adapter);
2460 }
2461 
2462 /*
2463  * MA interrupt handler.
2464  */
2465 static void ma_intr_handler(struct adapter *adapter)
2466 {
2467 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2468 
2469 	if (status & F_MEM_PERR_INT_CAUSE)
2470 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2471 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2472 	if (status & F_MEM_WRAP_INT_CAUSE) {
2473 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2474 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2475 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2476 			 G_MEM_WRAP_ADDRESS(v) << 4);
2477 	}
2478 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2479 	t4_fatal_err(adapter);
2480 }
2481 
2482 /*
2483  * SMB interrupt handler.
2484  */
2485 static void smb_intr_handler(struct adapter *adap)
2486 {
2487 	static struct intr_info smb_intr_info[] = {
2488 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2489 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2490 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2491 		{ 0 }
2492 	};
2493 
2494 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2495 		t4_fatal_err(adap);
2496 }
2497 
2498 /*
2499  * NC-SI interrupt handler.
2500  */
2501 static void ncsi_intr_handler(struct adapter *adap)
2502 {
2503 	static struct intr_info ncsi_intr_info[] = {
2504 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2505 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2506 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2507 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2508 		{ 0 }
2509 	};
2510 
2511 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2512 		t4_fatal_err(adap);
2513 }
2514 
2515 /*
2516  * XGMAC interrupt handler.
2517  */
2518 static void xgmac_intr_handler(struct adapter *adap, int port)
2519 {
2520 	u32 v, int_cause_reg;
2521 
2522 	if (is_t4(adap))
2523 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2524 	else
2525 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2526 
2527 	v = t4_read_reg(adap, int_cause_reg);
2528 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2529 	if (!v)
2530 		return;
2531 
2532 	if (v & F_TXFIFO_PRTY_ERR)
2533 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2534 	if (v & F_RXFIFO_PRTY_ERR)
2535 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2536 	t4_write_reg(adap, int_cause_reg, v);
2537 	t4_fatal_err(adap);
2538 }
2539 
2540 /*
2541  * PL interrupt handler.
2542  */
2543 static void pl_intr_handler(struct adapter *adap)
2544 {
2545 	static struct intr_info pl_intr_info[] = {
2546 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2547 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2548 		{ 0 }
2549 	};
2550 
2551 	static struct intr_info t5_pl_intr_info[] = {
2552 		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2553 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2554 		{ 0 }
2555 	};
2556 
2557 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2558 	    is_t4(adap) ?  pl_intr_info : t5_pl_intr_info))
2559 		t4_fatal_err(adap);
2560 }
2561 
2562 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2563 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2564 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2565 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2566 
2567 /**
2568  *	t4_slow_intr_handler - control path interrupt handler
2569  *	@adapter: the adapter
2570  *
2571  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2572  *	The designation 'slow' is because it involves register reads, while
2573  *	data interrupts typically don't involve any MMIOs.
2574  */
2575 int t4_slow_intr_handler(struct adapter *adapter)
2576 {
2577 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2578 
2579 	if (!(cause & GLBL_INTR_MASK))
2580 		return 0;
2581 	if (cause & F_CIM)
2582 		cim_intr_handler(adapter);
2583 	if (cause & F_MPS)
2584 		mps_intr_handler(adapter);
2585 	if (cause & F_NCSI)
2586 		ncsi_intr_handler(adapter);
2587 	if (cause & F_PL)
2588 		pl_intr_handler(adapter);
2589 	if (cause & F_SMB)
2590 		smb_intr_handler(adapter);
2591 	if (cause & F_XGMAC0)
2592 		xgmac_intr_handler(adapter, 0);
2593 	if (cause & F_XGMAC1)
2594 		xgmac_intr_handler(adapter, 1);
2595 	if (cause & F_XGMAC_KR0)
2596 		xgmac_intr_handler(adapter, 2);
2597 	if (cause & F_XGMAC_KR1)
2598 		xgmac_intr_handler(adapter, 3);
2599 	if (cause & F_PCIE)
2600 		pcie_intr_handler(adapter);
2601 	if (cause & F_MC)
2602 		mem_intr_handler(adapter, MEM_MC);
2603 	if (cause & F_EDC0)
2604 		mem_intr_handler(adapter, MEM_EDC0);
2605 	if (cause & F_EDC1)
2606 		mem_intr_handler(adapter, MEM_EDC1);
2607 	if (cause & F_LE)
2608 		le_intr_handler(adapter);
2609 	if (cause & F_TP)
2610 		tp_intr_handler(adapter);
2611 	if (cause & F_MA)
2612 		ma_intr_handler(adapter);
2613 	if (cause & F_PM_TX)
2614 		pmtx_intr_handler(adapter);
2615 	if (cause & F_PM_RX)
2616 		pmrx_intr_handler(adapter);
2617 	if (cause & F_ULP_RX)
2618 		ulprx_intr_handler(adapter);
2619 	if (cause & F_CPL_SWITCH)
2620 		cplsw_intr_handler(adapter);
2621 	if (cause & F_SGE)
2622 		sge_intr_handler(adapter);
2623 	if (cause & F_ULP_TX)
2624 		ulptx_intr_handler(adapter);
2625 
2626 	/* Clear the interrupts just processed for which we are the master. */
2627 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2628 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2629 	return 1;
2630 }
2631 
2632 /**
2633  *	t4_intr_enable - enable interrupts
2634  *	@adapter: the adapter whose interrupts should be enabled
2635  *
2636  *	Enable PF-specific interrupts for the calling function and the top-level
2637  *	interrupt concentrator for global interrupts.  Interrupts are already
2638  *	enabled at each module,	here we just enable the roots of the interrupt
2639  *	hierarchies.
2640  *
2641  *	Note: this function should be called only when the driver manages
2642  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2643  *	function at a time should be doing this.
2644  */
2645 void t4_intr_enable(struct adapter *adapter)
2646 {
2647 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2648 
2649 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2650 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2651 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2652 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2653 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2654 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2655 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2656 		     F_EGRESS_SIZE_ERR);
2657 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2658 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2659 }
2660 
2661 /**
2662  *	t4_intr_disable - disable interrupts
2663  *	@adapter: the adapter whose interrupts should be disabled
2664  *
2665  *	Disable interrupts.  We only disable the top-level interrupt
2666  *	concentrators.  The caller must be a PCI function managing global
2667  *	interrupts.
2668  */
2669 void t4_intr_disable(struct adapter *adapter)
2670 {
2671 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2672 
2673 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2674 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2675 }
2676 
2677 /**
2678  *	t4_intr_clear - clear all interrupts
2679  *	@adapter: the adapter whose interrupts should be cleared
2680  *
2681  *	Clears all interrupts.  The caller must be a PCI function managing
2682  *	global interrupts.
2683  */
2684 void t4_intr_clear(struct adapter *adapter)
2685 {
2686 	static const unsigned int cause_reg[] = {
2687 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2688 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2689 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2690 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2691 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2692 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2693 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2694 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2695 		A_TP_INT_CAUSE,
2696 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2697 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2698 		A_MPS_RX_PERR_INT_CAUSE,
2699 		A_CPL_INTR_CAUSE,
2700 		MYPF_REG(A_PL_PF_INT_CAUSE),
2701 		A_PL_PL_INT_CAUSE,
2702 		A_LE_DB_INT_CAUSE,
2703 	};
2704 
2705 	unsigned int i;
2706 
2707 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2708 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2709 
2710 	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2711 				A_MC_P_INT_CAUSE, 0xffffffff);
2712 
2713 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2714 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2715 }
2716 
2717 /**
2718  *	hash_mac_addr - return the hash value of a MAC address
2719  *	@addr: the 48-bit Ethernet MAC address
2720  *
2721  *	Hashes a MAC address according to the hash function used by HW inexact
2722  *	(hash) address matching.
2723  */
2724 static int hash_mac_addr(const u8 *addr)
2725 {
2726 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2727 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2728 	a ^= b;
2729 	a ^= (a >> 12);
2730 	a ^= (a >> 6);
2731 	return a & 0x3f;
2732 }
2733 
2734 /**
2735  *	t4_config_rss_range - configure a portion of the RSS mapping table
2736  *	@adapter: the adapter
2737  *	@mbox: mbox to use for the FW command
2738  *	@viid: virtual interface whose RSS subtable is to be written
2739  *	@start: start entry in the table to write
2740  *	@n: how many table entries to write
2741  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2742  *	@nrspq: number of values in @rspq
2743  *
2744  *	Programs the selected part of the VI's RSS mapping table with the
2745  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2746  *	until the full table range is populated.
2747  *
2748  *	The caller must ensure the values in @rspq are in the range allowed for
2749  *	@viid.
2750  */
2751 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2752 			int start, int n, const u16 *rspq, unsigned int nrspq)
2753 {
2754 	int ret;
2755 	const u16 *rsp = rspq;
2756 	const u16 *rsp_end = rspq + nrspq;
2757 	struct fw_rss_ind_tbl_cmd cmd;
2758 
2759 	memset(&cmd, 0, sizeof(cmd));
2760 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2761 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2762 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2763 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2764 
2765 
2766 	/*
2767 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2768 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2769 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2770 	 * reserved.
2771 	 */
2772 	while (n > 0) {
2773 		int nq = min(n, 32);
2774 		int nq_packed = 0;
2775 		__be32 *qp = &cmd.iq0_to_iq2;
2776 
2777 		/*
2778 		 * Set up the firmware RSS command header to send the next
2779 		 * "nq" Ingress Queue IDs to the firmware.
2780 		 */
2781 		cmd.niqid = htons(nq);
2782 		cmd.startidx = htons(start);
2783 
2784 		/*
2785 		 * "nq" more done for the start of the next loop.
2786 		 */
2787 		start += nq;
2788 		n -= nq;
2789 
2790 		/*
2791 		 * While there are still Ingress Queue IDs to stuff into the
2792 		 * current firmware RSS command, retrieve them from the
2793 		 * Ingress Queue ID array and insert them into the command.
2794 		 */
2795 		while (nq > 0) {
2796 			/*
2797 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2798 			 * around the Ingress Queue ID array if necessary) and
2799 			 * insert them into the firmware RSS command at the
2800 			 * current 3-tuple position within the commad.
2801 			 */
2802 			u16 qbuf[3];
2803 			u16 *qbp = qbuf;
2804 			int nqbuf = min(3, nq);
2805 
2806 			nq -= nqbuf;
2807 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2808 			while (nqbuf && nq_packed < 32) {
2809 				nqbuf--;
2810 				nq_packed++;
2811 				*qbp++ = *rsp++;
2812 				if (rsp >= rsp_end)
2813 					rsp = rspq;
2814 			}
2815 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2816 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2817 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2818 		}
2819 
2820 		/*
2821 		 * Send this portion of the RRS table update to the firmware;
2822 		 * bail out on any errors.
2823 		 */
2824 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2825 		if (ret)
2826 			return ret;
2827 	}
2828 
2829 	return 0;
2830 }
2831 
2832 /**
2833  *	t4_config_glbl_rss - configure the global RSS mode
2834  *	@adapter: the adapter
2835  *	@mbox: mbox to use for the FW command
2836  *	@mode: global RSS mode
2837  *	@flags: mode-specific flags
2838  *
2839  *	Sets the global RSS mode.
2840  */
2841 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2842 		       unsigned int flags)
2843 {
2844 	struct fw_rss_glb_config_cmd c;
2845 
2846 	memset(&c, 0, sizeof(c));
2847 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2848 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2849 	c.retval_len16 = htonl(FW_LEN16(c));
2850 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2851 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2852 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2853 		c.u.basicvirtual.mode_pkd =
2854 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2855 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2856 	} else
2857 		return -EINVAL;
2858 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2859 }
2860 
2861 /**
2862  *	t4_config_vi_rss - configure per VI RSS settings
2863  *	@adapter: the adapter
2864  *	@mbox: mbox to use for the FW command
2865  *	@viid: the VI id
2866  *	@flags: RSS flags
2867  *	@defq: id of the default RSS queue for the VI.
2868  *
2869  *	Configures VI-specific RSS properties.
2870  */
2871 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2872 		     unsigned int flags, unsigned int defq)
2873 {
2874 	struct fw_rss_vi_config_cmd c;
2875 
2876 	memset(&c, 0, sizeof(c));
2877 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2878 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2879 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2880 	c.retval_len16 = htonl(FW_LEN16(c));
2881 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2882 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2883 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2884 }
2885 
2886 /* Read an RSS table row */
2887 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2888 {
2889 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2890 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2891 				   5, 0, val);
2892 }
2893 
2894 /**
2895  *	t4_read_rss - read the contents of the RSS mapping table
2896  *	@adapter: the adapter
2897  *	@map: holds the contents of the RSS mapping table
2898  *
2899  *	Reads the contents of the RSS hash->queue mapping table.
2900  */
2901 int t4_read_rss(struct adapter *adapter, u16 *map)
2902 {
2903 	u32 val;
2904 	int i, ret;
2905 
2906 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2907 		ret = rd_rss_row(adapter, i, &val);
2908 		if (ret)
2909 			return ret;
2910 		*map++ = G_LKPTBLQUEUE0(val);
2911 		*map++ = G_LKPTBLQUEUE1(val);
2912 	}
2913 	return 0;
2914 }
2915 
2916 /**
2917  *	t4_read_rss_key - read the global RSS key
2918  *	@adap: the adapter
2919  *	@key: 10-entry array holding the 320-bit RSS key
2920  *
2921  *	Reads the global 320-bit RSS key.
2922  */
2923 void t4_read_rss_key(struct adapter *adap, u32 *key)
2924 {
2925 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2926 			 A_TP_RSS_SECRET_KEY0);
2927 }
2928 
2929 /**
2930  *	t4_write_rss_key - program one of the RSS keys
2931  *	@adap: the adapter
2932  *	@key: 10-entry array holding the 320-bit RSS key
2933  *	@idx: which RSS key to write
2934  *
2935  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2936  *	0..15 the corresponding entry in the RSS key table is written,
2937  *	otherwise the global RSS key is written.
2938  */
2939 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2940 {
2941 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2942 			  A_TP_RSS_SECRET_KEY0);
2943 	if (idx >= 0 && idx < 16)
2944 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2945 			     V_KEYWRADDR(idx) | F_KEYWREN);
2946 }
2947 
2948 /**
2949  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2950  *	@adapter: the adapter
2951  *	@index: the entry in the PF RSS table to read
2952  *	@valp: where to store the returned value
2953  *
2954  *	Reads the PF RSS Configuration Table at the specified index and returns
2955  *	the value found there.
2956  */
2957 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2958 {
2959 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2960 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2961 }
2962 
2963 /**
2964  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2965  *	@adapter: the adapter
2966  *	@index: the entry in the VF RSS table to read
2967  *	@val: the value to store
2968  *
2969  *	Writes the PF RSS Configuration Table at the specified index with the
2970  *	specified value.
2971  */
2972 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2973 {
2974 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2975 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2976 }
2977 
2978 /**
2979  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2980  *	@adapter: the adapter
2981  *	@index: the entry in the VF RSS table to read
2982  *	@vfl: where to store the returned VFL
2983  *	@vfh: where to store the returned VFH
2984  *
2985  *	Reads the VF RSS Configuration Table at the specified index and returns
2986  *	the (VFL, VFH) values found there.
2987  */
2988 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2989 			   u32 *vfl, u32 *vfh)
2990 {
2991 	u32 vrt;
2992 
2993 	/*
2994 	 * Request that the index'th VF Table values be read into VFL/VFH.
2995 	 */
2996 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2997 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2998 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2999 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3000 
3001 	/*
3002 	 * Grab the VFL/VFH values ...
3003 	 */
3004 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3005 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
3006 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3007 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
3008 }
3009 
3010 /**
3011  *	t4_write_rss_vf_config - write VF RSS Configuration Table
3012  *
3013  *	@adapter: the adapter
3014  *	@index: the entry in the VF RSS table to write
3015  *	@vfl: the VFL to store
3016  *	@vfh: the VFH to store
3017  *
3018  *	Writes the VF RSS Configuration Table at the specified index with the
3019  *	specified (VFL, VFH) values.
3020  */
3021 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3022 			    u32 vfl, u32 vfh)
3023 {
3024 	u32 vrt;
3025 
3026 	/*
3027 	 * Load up VFL/VFH with the values to be written ...
3028 	 */
3029 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3030 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
3031 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3032 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
3033 
3034 	/*
3035 	 * Write the VFL/VFH into the VF Table at index'th location.
3036 	 */
3037 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3038 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3039 	vrt |= V_VFWRADDR(index) | F_VFWREN;
3040 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3041 }
3042 
3043 /**
3044  *	t4_read_rss_pf_map - read PF RSS Map
3045  *	@adapter: the adapter
3046  *
3047  *	Reads the PF RSS Map register and returns its value.
3048  */
3049 u32 t4_read_rss_pf_map(struct adapter *adapter)
3050 {
3051 	u32 pfmap;
3052 
3053 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3054 			 &pfmap, 1, A_TP_RSS_PF_MAP);
3055 	return pfmap;
3056 }
3057 
3058 /**
3059  *	t4_write_rss_pf_map - write PF RSS Map
3060  *	@adapter: the adapter
3061  *	@pfmap: PF RSS Map value
3062  *
3063  *	Writes the specified value to the PF RSS Map register.
3064  */
3065 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3066 {
3067 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3068 			  &pfmap, 1, A_TP_RSS_PF_MAP);
3069 }
3070 
3071 /**
3072  *	t4_read_rss_pf_mask - read PF RSS Mask
3073  *	@adapter: the adapter
3074  *
3075  *	Reads the PF RSS Mask register and returns its value.
3076  */
3077 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3078 {
3079 	u32 pfmask;
3080 
3081 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3082 			 &pfmask, 1, A_TP_RSS_PF_MSK);
3083 	return pfmask;
3084 }
3085 
3086 /**
3087  *	t4_write_rss_pf_mask - write PF RSS Mask
3088  *	@adapter: the adapter
3089  *	@pfmask: PF RSS Mask value
3090  *
3091  *	Writes the specified value to the PF RSS Mask register.
3092  */
3093 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3094 {
3095 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3096 			  &pfmask, 1, A_TP_RSS_PF_MSK);
3097 }
3098 
3099 /**
3100  *	t4_set_filter_mode - configure the optional components of filter tuples
3101  *	@adap: the adapter
3102  *	@mode_map: a bitmap selcting which optional filter components to enable
3103  *
3104  *	Sets the filter mode by selecting the optional components to enable
3105  *	in filter tuples.  Returns 0 on success and a negative error if the
3106  *	requested mode needs more bits than are available for optional
3107  *	components.
3108  */
3109 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3110 {
3111 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3112 
3113 	int i, nbits = 0;
3114 
3115 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3116 		if (mode_map & (1 << i))
3117 			nbits += width[i];
3118 	if (nbits > FILTER_OPT_LEN)
3119 		return -EINVAL;
3120 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3121 			  A_TP_VLAN_PRI_MAP);
3122 	return 0;
3123 }
3124 
3125 /**
3126  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3127  *	@adap: the adapter
3128  *	@v4: holds the TCP/IP counter values
3129  *	@v6: holds the TCP/IPv6 counter values
3130  *
3131  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3132  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3133  */
3134 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3135 			 struct tp_tcp_stats *v6)
3136 {
3137 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3138 
3139 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3140 #define STAT(x)     val[STAT_IDX(x)]
3141 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3142 
3143 	if (v4) {
3144 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3145 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3146 		v4->tcpOutRsts = STAT(OUT_RST);
3147 		v4->tcpInSegs  = STAT64(IN_SEG);
3148 		v4->tcpOutSegs = STAT64(OUT_SEG);
3149 		v4->tcpRetransSegs = STAT64(RXT_SEG);
3150 	}
3151 	if (v6) {
3152 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3153 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3154 		v6->tcpOutRsts = STAT(OUT_RST);
3155 		v6->tcpInSegs  = STAT64(IN_SEG);
3156 		v6->tcpOutSegs = STAT64(OUT_SEG);
3157 		v6->tcpRetransSegs = STAT64(RXT_SEG);
3158 	}
3159 #undef STAT64
3160 #undef STAT
3161 #undef STAT_IDX
3162 }
3163 
3164 /**
3165  *	t4_tp_get_err_stats - read TP's error MIB counters
3166  *	@adap: the adapter
3167  *	@st: holds the counter values
3168  *
3169  *	Returns the values of TP's error counters.
3170  */
3171 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3172 {
3173 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3174 			 12, A_TP_MIB_MAC_IN_ERR_0);
3175 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3176 			 8, A_TP_MIB_TNL_CNG_DROP_0);
3177 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3178 			 4, A_TP_MIB_TNL_DROP_0);
3179 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3180 			 4, A_TP_MIB_OFD_VLN_DROP_0);
3181 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3182 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3183 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3184 			 2, A_TP_MIB_OFD_ARP_DROP);
3185 }
3186 
3187 /**
3188  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3189  *	@adap: the adapter
3190  *	@st: holds the counter values
3191  *
3192  *	Returns the values of TP's proxy counters.
3193  */
3194 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3195 {
3196 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3197 			 4, A_TP_MIB_TNL_LPBK_0);
3198 }
3199 
3200 /**
3201  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3202  *	@adap: the adapter
3203  *	@st: holds the counter values
3204  *
3205  *	Returns the values of TP's CPL counters.
3206  */
3207 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3208 {
3209 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3210 			 8, A_TP_MIB_CPL_IN_REQ_0);
3211 }
3212 
3213 /**
3214  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3215  *	@adap: the adapter
3216  *	@st: holds the counter values
3217  *
3218  *	Returns the values of TP's RDMA counters.
3219  */
3220 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3221 {
3222 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3223 			 2, A_TP_MIB_RQE_DFR_MOD);
3224 }
3225 
3226 /**
3227  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3228  *	@adap: the adapter
3229  *	@idx: the port index
3230  *	@st: holds the counter values
3231  *
3232  *	Returns the values of TP's FCoE counters for the selected port.
3233  */
3234 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3235 		       struct tp_fcoe_stats *st)
3236 {
3237 	u32 val[2];
3238 
3239 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3240 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3241 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3242 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3243 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3244 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3245 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3246 }
3247 
3248 /**
3249  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3250  *	@adap: the adapter
3251  *	@st: holds the counter values
3252  *
3253  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3254  */
3255 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3256 {
3257 	u32 val[4];
3258 
3259 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3260 			 A_TP_MIB_USM_PKTS);
3261 	st->frames = val[0];
3262 	st->drops = val[1];
3263 	st->octets = ((u64)val[2] << 32) | val[3];
3264 }
3265 
3266 /**
3267  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3268  *	@adap: the adapter
3269  *	@mtus: where to store the MTU values
3270  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3271  *
3272  *	Reads the HW path MTU table.
3273  */
3274 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3275 {
3276 	u32 v;
3277 	int i;
3278 
3279 	for (i = 0; i < NMTUS; ++i) {
3280 		t4_write_reg(adap, A_TP_MTU_TABLE,
3281 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3282 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3283 		mtus[i] = G_MTUVALUE(v);
3284 		if (mtu_log)
3285 			mtu_log[i] = G_MTUWIDTH(v);
3286 	}
3287 }
3288 
3289 /**
3290  *	t4_read_cong_tbl - reads the congestion control table
3291  *	@adap: the adapter
3292  *	@incr: where to store the alpha values
3293  *
3294  *	Reads the additive increments programmed into the HW congestion
3295  *	control table.
3296  */
3297 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3298 {
3299 	unsigned int mtu, w;
3300 
3301 	for (mtu = 0; mtu < NMTUS; ++mtu)
3302 		for (w = 0; w < NCCTRL_WIN; ++w) {
3303 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3304 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3305 			incr[mtu][w] = (u16)t4_read_reg(adap,
3306 						A_TP_CCTRL_TABLE) & 0x1fff;
3307 		}
3308 }
3309 
3310 /**
3311  *	t4_read_pace_tbl - read the pace table
3312  *	@adap: the adapter
3313  *	@pace_vals: holds the returned values
3314  *
3315  *	Returns the values of TP's pace table in microseconds.
3316  */
3317 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3318 {
3319 	unsigned int i, v;
3320 
3321 	for (i = 0; i < NTX_SCHED; i++) {
3322 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3323 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3324 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3325 	}
3326 }
3327 
3328 /**
3329  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3330  *	@adap: the adapter
3331  *	@addr: the indirect TP register address
3332  *	@mask: specifies the field within the register to modify
3333  *	@val: new value for the field
3334  *
3335  *	Sets a field of an indirect TP register to the given value.
3336  */
3337 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3338 			    unsigned int mask, unsigned int val)
3339 {
3340 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3341 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3342 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3343 }
3344 
3345 /**
3346  *	init_cong_ctrl - initialize congestion control parameters
3347  *	@a: the alpha values for congestion control
3348  *	@b: the beta values for congestion control
3349  *
3350  *	Initialize the congestion control parameters.
3351  */
3352 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3353 {
3354 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3355 	a[9] = 2;
3356 	a[10] = 3;
3357 	a[11] = 4;
3358 	a[12] = 5;
3359 	a[13] = 6;
3360 	a[14] = 7;
3361 	a[15] = 8;
3362 	a[16] = 9;
3363 	a[17] = 10;
3364 	a[18] = 14;
3365 	a[19] = 17;
3366 	a[20] = 21;
3367 	a[21] = 25;
3368 	a[22] = 30;
3369 	a[23] = 35;
3370 	a[24] = 45;
3371 	a[25] = 60;
3372 	a[26] = 80;
3373 	a[27] = 100;
3374 	a[28] = 200;
3375 	a[29] = 300;
3376 	a[30] = 400;
3377 	a[31] = 500;
3378 
3379 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3380 	b[9] = b[10] = 1;
3381 	b[11] = b[12] = 2;
3382 	b[13] = b[14] = b[15] = b[16] = 3;
3383 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3384 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3385 	b[28] = b[29] = 6;
3386 	b[30] = b[31] = 7;
3387 }
3388 
3389 /* The minimum additive increment value for the congestion control table */
3390 #define CC_MIN_INCR 2U
3391 
3392 /**
3393  *	t4_load_mtus - write the MTU and congestion control HW tables
3394  *	@adap: the adapter
3395  *	@mtus: the values for the MTU table
3396  *	@alpha: the values for the congestion control alpha parameter
3397  *	@beta: the values for the congestion control beta parameter
3398  *
3399  *	Write the HW MTU table with the supplied MTUs and the high-speed
3400  *	congestion control table with the supplied alpha, beta, and MTUs.
3401  *	We write the two tables together because the additive increments
3402  *	depend on the MTUs.
3403  */
3404 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3405 		  const unsigned short *alpha, const unsigned short *beta)
3406 {
3407 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3408 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3409 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3410 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3411 	};
3412 
3413 	unsigned int i, w;
3414 
3415 	for (i = 0; i < NMTUS; ++i) {
3416 		unsigned int mtu = mtus[i];
3417 		unsigned int log2 = fls(mtu);
3418 
3419 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3420 			log2--;
3421 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3422 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3423 
3424 		for (w = 0; w < NCCTRL_WIN; ++w) {
3425 			unsigned int inc;
3426 
3427 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3428 				  CC_MIN_INCR);
3429 
3430 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3431 				     (w << 16) | (beta[w] << 13) | inc);
3432 		}
3433 	}
3434 }
3435 
3436 /**
3437  *	t4_set_pace_tbl - set the pace table
3438  *	@adap: the adapter
3439  *	@pace_vals: the pace values in microseconds
3440  *	@start: index of the first entry in the HW pace table to set
3441  *	@n: how many entries to set
3442  *
3443  *	Sets (a subset of the) HW pace table.
3444  */
3445 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3446 		     unsigned int start, unsigned int n)
3447 {
3448 	unsigned int vals[NTX_SCHED], i;
3449 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3450 
3451 	if (n > NTX_SCHED)
3452 	    return -ERANGE;
3453 
3454 	/* convert values from us to dack ticks, rounding to closest value */
3455 	for (i = 0; i < n; i++, pace_vals++) {
3456 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3457 		if (vals[i] > 0x7ff)
3458 			return -ERANGE;
3459 		if (*pace_vals && vals[i] == 0)
3460 			return -ERANGE;
3461 	}
3462 	for (i = 0; i < n; i++, start++)
3463 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3464 	return 0;
3465 }
3466 
3467 /**
3468  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3469  *	@adap: the adapter
3470  *	@kbps: target rate in Kbps
3471  *	@sched: the scheduler index
3472  *
3473  *	Configure a Tx HW scheduler for the target rate.
3474  */
3475 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3476 {
3477 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3478 	unsigned int clk = adap->params.vpd.cclk * 1000;
3479 	unsigned int selected_cpt = 0, selected_bpt = 0;
3480 
3481 	if (kbps > 0) {
3482 		kbps *= 125;     /* -> bytes */
3483 		for (cpt = 1; cpt <= 255; cpt++) {
3484 			tps = clk / cpt;
3485 			bpt = (kbps + tps / 2) / tps;
3486 			if (bpt > 0 && bpt <= 255) {
3487 				v = bpt * tps;
3488 				delta = v >= kbps ? v - kbps : kbps - v;
3489 				if (delta < mindelta) {
3490 					mindelta = delta;
3491 					selected_cpt = cpt;
3492 					selected_bpt = bpt;
3493 				}
3494 			} else if (selected_cpt)
3495 				break;
3496 		}
3497 		if (!selected_cpt)
3498 			return -EINVAL;
3499 	}
3500 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3501 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3502 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3503 	if (sched & 1)
3504 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3505 	else
3506 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3507 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3508 	return 0;
3509 }
3510 
3511 /**
3512  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3513  *	@adap: the adapter
3514  *	@sched: the scheduler index
3515  *	@ipg: the interpacket delay in tenths of nanoseconds
3516  *
3517  *	Set the interpacket delay for a HW packet rate scheduler.
3518  */
3519 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3520 {
3521 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3522 
3523 	/* convert ipg to nearest number of core clocks */
3524 	ipg *= core_ticks_per_usec(adap);
3525 	ipg = (ipg + 5000) / 10000;
3526 	if (ipg > M_TXTIMERSEPQ0)
3527 		return -EINVAL;
3528 
3529 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3530 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3531 	if (sched & 1)
3532 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3533 	else
3534 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3535 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3536 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3537 	return 0;
3538 }
3539 
3540 /**
3541  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3542  *	@adap: the adapter
3543  *	@sched: the scheduler index
3544  *	@kbps: the byte rate in Kbps
3545  *	@ipg: the interpacket delay in tenths of nanoseconds
3546  *
3547  *	Return the current configuration of a HW Tx scheduler.
3548  */
3549 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3550 		     unsigned int *ipg)
3551 {
3552 	unsigned int v, addr, bpt, cpt;
3553 
3554 	if (kbps) {
3555 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3556 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3557 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3558 		if (sched & 1)
3559 			v >>= 16;
3560 		bpt = (v >> 8) & 0xff;
3561 		cpt = v & 0xff;
3562 		if (!cpt)
3563 			*kbps = 0;        /* scheduler disabled */
3564 		else {
3565 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3566 			*kbps = (v * bpt) / 125;
3567 		}
3568 	}
3569 	if (ipg) {
3570 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3571 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3572 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3573 		if (sched & 1)
3574 			v >>= 16;
3575 		v &= 0xffff;
3576 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3577 	}
3578 }
3579 
3580 /*
3581  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3582  * clocks.  The formula is
3583  *
3584  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3585  *
3586  * which is equivalent to
3587  *
3588  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3589  */
3590 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3591 {
3592 	u64 v = bytes256 * adap->params.vpd.cclk;
3593 
3594 	return v * 62 + v / 2;
3595 }
3596 
3597 /**
3598  *	t4_get_chan_txrate - get the current per channel Tx rates
3599  *	@adap: the adapter
3600  *	@nic_rate: rates for NIC traffic
3601  *	@ofld_rate: rates for offloaded traffic
3602  *
3603  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3604  *	for each channel.
3605  */
3606 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3607 {
3608 	u32 v;
3609 
3610 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3611 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3612 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3613 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3614 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3615 
3616 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3617 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3618 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3619 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3620 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3621 }
3622 
3623 /**
3624  *	t4_set_trace_filter - configure one of the tracing filters
3625  *	@adap: the adapter
3626  *	@tp: the desired trace filter parameters
3627  *	@idx: which filter to configure
3628  *	@enable: whether to enable or disable the filter
3629  *
3630  *	Configures one of the tracing filters available in HW.  If @tp is %NULL
3631  *	it indicates that the filter is already written in the register and it
3632  *	just needs to be enabled or disabled.
3633  */
3634 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3635     int idx, int enable)
3636 {
3637 	int i, ofst = idx * 4;
3638 	u32 data_reg, mask_reg, cfg;
3639 	u32 multitrc = F_TRCMULTIFILTER;
3640 	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
3641 
3642 	if (idx < 0 || idx >= NTRACE)
3643 		return -EINVAL;
3644 
3645 	if (tp == NULL || !enable) {
3646 		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
3647 		    enable ? en : 0);
3648 		return 0;
3649 	}
3650 
3651 	/*
3652 	 * TODO - After T4 data book is updated, specify the exact
3653 	 * section below.
3654 	 *
3655 	 * See T4 data book - MPS section for a complete description
3656 	 * of the below if..else handling of A_MPS_TRC_CFG register
3657 	 * value.
3658 	 */
3659 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3660 	if (cfg & F_TRCMULTIFILTER) {
3661 		/*
3662 		 * If multiple tracers are enabled, then maximum
3663 		 * capture size is 2.5KB (FIFO size of a single channel)
3664 		 * minus 2 flits for CPL_TRACE_PKT header.
3665 		 */
3666 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3667 			return -EINVAL;
3668 	} else {
3669 		/*
3670 		 * If multiple tracers are disabled, to avoid deadlocks
3671 		 * maximum packet capture size of 9600 bytes is recommended.
3672 		 * Also in this mode, only trace0 can be enabled and running.
3673 		 */
3674 		multitrc = 0;
3675 		if (tp->snap_len > 9600 || idx)
3676 			return -EINVAL;
3677 	}
3678 
3679 	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
3680 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
3681 	    tp->min_len > M_TFMINPKTSIZE)
3682 		return -EINVAL;
3683 
3684 	/* stop the tracer we'll be changing */
3685 	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
3686 
3687 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3688 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3689 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3690 
3691 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3692 		t4_write_reg(adap, data_reg, tp->data[i]);
3693 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3694 	}
3695 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3696 		     V_TFCAPTUREMAX(tp->snap_len) |
3697 		     V_TFMINPKTSIZE(tp->min_len));
3698 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3699 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
3700 		     (is_t4(adap) ?
3701 		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
3702 		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
3703 
3704 	return 0;
3705 }
3706 
3707 /**
3708  *	t4_get_trace_filter - query one of the tracing filters
3709  *	@adap: the adapter
3710  *	@tp: the current trace filter parameters
3711  *	@idx: which trace filter to query
3712  *	@enabled: non-zero if the filter is enabled
3713  *
3714  *	Returns the current settings of one of the HW tracing filters.
3715  */
3716 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3717 			 int *enabled)
3718 {
3719 	u32 ctla, ctlb;
3720 	int i, ofst = idx * 4;
3721 	u32 data_reg, mask_reg;
3722 
3723 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3724 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3725 
3726 	if (is_t4(adap)) {
3727 		*enabled = !!(ctla & F_TFEN);
3728 		tp->port =  G_TFPORT(ctla);
3729 		tp->invert = !!(ctla & F_TFINVERTMATCH);
3730 	} else {
3731 		*enabled = !!(ctla & F_T5_TFEN);
3732 		tp->port = G_T5_TFPORT(ctla);
3733 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
3734 	}
3735 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3736 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3737 	tp->skip_ofst = G_TFOFFSET(ctla);
3738 	tp->skip_len = G_TFLENGTH(ctla);
3739 
3740 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3741 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3742 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3743 
3744 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3745 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3746 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3747 	}
3748 }
3749 
3750 /**
3751  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3752  *	@adap: the adapter
3753  *	@cnt: where to store the count statistics
3754  *	@cycles: where to store the cycle statistics
3755  *
3756  *	Returns performance statistics from PMTX.
3757  */
3758 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3759 {
3760 	int i;
3761 	u32 data[2];
3762 
3763 	for (i = 0; i < PM_NSTATS; i++) {
3764 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3765 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3766 		if (is_t4(adap))
3767 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3768 		else {
3769 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3770 					 A_PM_TX_DBG_DATA, data, 2,
3771 					 A_PM_TX_DBG_STAT_MSB);
3772 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3773 		}
3774 	}
3775 }
3776 
3777 /**
3778  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3779  *	@adap: the adapter
3780  *	@cnt: where to store the count statistics
3781  *	@cycles: where to store the cycle statistics
3782  *
3783  *	Returns performance statistics from PMRX.
3784  */
3785 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3786 {
3787 	int i;
3788 	u32 data[2];
3789 
3790 	for (i = 0; i < PM_NSTATS; i++) {
3791 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3792 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3793 		if (is_t4(adap))
3794 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3795 		else {
3796 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3797 					 A_PM_RX_DBG_DATA, data, 2,
3798 					 A_PM_RX_DBG_STAT_MSB);
3799 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3800 		}
3801 	}
3802 }
3803 
3804 /**
3805  *	get_mps_bg_map - return the buffer groups associated with a port
3806  *	@adap: the adapter
3807  *	@idx: the port index
3808  *
3809  *	Returns a bitmap indicating which MPS buffer groups are associated
3810  *	with the given port.  Bit i is set if buffer group i is used by the
3811  *	port.
3812  */
3813 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3814 {
3815 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3816 
3817 	if (n == 0)
3818 		return idx == 0 ? 0xf : 0;
3819 	if (n == 1)
3820 		return idx < 2 ? (3 << (2 * idx)) : 0;
3821 	return 1 << idx;
3822 }
3823 
3824 /**
3825  *      t4_get_port_stats_offset - collect port stats relative to a previous
3826  *                                 snapshot
3827  *      @adap: The adapter
3828  *      @idx: The port
3829  *      @stats: Current stats to fill
3830  *      @offset: Previous stats snapshot
3831  */
3832 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3833 		struct port_stats *stats,
3834 		struct port_stats *offset)
3835 {
3836 	u64 *s, *o;
3837 	int i;
3838 
3839 	t4_get_port_stats(adap, idx, stats);
3840 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3841 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3842 			i++, s++, o++)
3843 		*s -= *o;
3844 }
3845 
3846 /**
3847  *	t4_get_port_stats - collect port statistics
3848  *	@adap: the adapter
3849  *	@idx: the port index
3850  *	@p: the stats structure to fill
3851  *
3852  *	Collect statistics related to the given port from HW.
3853  */
3854 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3855 {
3856 	u32 bgmap = get_mps_bg_map(adap, idx);
3857 
3858 #define GET_STAT(name) \
3859 	t4_read_reg64(adap, \
3860 	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3861 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3862 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3863 
3864 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3865 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3866 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3867 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3868 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3869 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3870 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3871 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3872 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3873 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3874 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3875 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3876 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3877 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3878 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3879 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3880 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3881 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3882 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3883 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3884 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3885 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3886 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3887 
3888 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3889 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3890 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3891 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3892 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3893 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3894 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3895 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3896 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3897 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3898 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3899 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3900 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3901 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3902 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3903 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3904 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3905 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3906 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3907 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3908 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3909 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3910 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3911 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3912 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3913 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3914 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3915 
3916 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3917 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3918 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3919 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3920 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3921 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3922 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3923 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3924 
3925 #undef GET_STAT
3926 #undef GET_STAT_COM
3927 }
3928 
3929 /**
3930  *	t4_clr_port_stats - clear port statistics
3931  *	@adap: the adapter
3932  *	@idx: the port index
3933  *
3934  *	Clear HW statistics for the given port.
3935  */
3936 void t4_clr_port_stats(struct adapter *adap, int idx)
3937 {
3938 	unsigned int i;
3939 	u32 bgmap = get_mps_bg_map(adap, idx);
3940 	u32 port_base_addr;
3941 
3942 	if (is_t4(adap))
3943 		port_base_addr = PORT_BASE(idx);
3944 	else
3945 		port_base_addr = T5_PORT_BASE(idx);
3946 
3947 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3948 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3949 		t4_write_reg(adap, port_base_addr + i, 0);
3950 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3951 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3952 		t4_write_reg(adap, port_base_addr + i, 0);
3953 	for (i = 0; i < 4; i++)
3954 		if (bgmap & (1 << i)) {
3955 			t4_write_reg(adap,
3956 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3957 			t4_write_reg(adap,
3958 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3959 		}
3960 }
3961 
3962 /**
3963  *	t4_get_lb_stats - collect loopback port statistics
3964  *	@adap: the adapter
3965  *	@idx: the loopback port index
3966  *	@p: the stats structure to fill
3967  *
3968  *	Return HW statistics for the given loopback port.
3969  */
3970 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3971 {
3972 	u32 bgmap = get_mps_bg_map(adap, idx);
3973 
3974 #define GET_STAT(name) \
3975 	t4_read_reg64(adap, \
3976 	(is_t4(adap) ? \
3977 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
3978 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
3979 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3980 
3981 	p->octets           = GET_STAT(BYTES);
3982 	p->frames           = GET_STAT(FRAMES);
3983 	p->bcast_frames     = GET_STAT(BCAST);
3984 	p->mcast_frames     = GET_STAT(MCAST);
3985 	p->ucast_frames     = GET_STAT(UCAST);
3986 	p->error_frames     = GET_STAT(ERROR);
3987 
3988 	p->frames_64        = GET_STAT(64B);
3989 	p->frames_65_127    = GET_STAT(65B_127B);
3990 	p->frames_128_255   = GET_STAT(128B_255B);
3991 	p->frames_256_511   = GET_STAT(256B_511B);
3992 	p->frames_512_1023  = GET_STAT(512B_1023B);
3993 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3994 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3995 	p->drop             = GET_STAT(DROP_FRAMES);
3996 
3997 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3998 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3999 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4000 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4001 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4002 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4003 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4004 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4005 
4006 #undef GET_STAT
4007 #undef GET_STAT_COM
4008 }
4009 
4010 /**
4011  *	t4_wol_magic_enable - enable/disable magic packet WoL
4012  *	@adap: the adapter
4013  *	@port: the physical port index
4014  *	@addr: MAC address expected in magic packets, %NULL to disable
4015  *
4016  *	Enables/disables magic packet wake-on-LAN for the selected port.
4017  */
4018 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4019 			 const u8 *addr)
4020 {
4021 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4022 
4023 	if (is_t4(adap)) {
4024 		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4025 		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4026 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4027 	} else {
4028 		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4029 		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4030 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4031 	}
4032 
4033 	if (addr) {
4034 		t4_write_reg(adap, mag_id_reg_l,
4035 			     (addr[2] << 24) | (addr[3] << 16) |
4036 			     (addr[4] << 8) | addr[5]);
4037 		t4_write_reg(adap, mag_id_reg_h,
4038 			     (addr[0] << 8) | addr[1]);
4039 	}
4040 	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4041 			 V_MAGICEN(addr != NULL));
4042 }
4043 
4044 /**
4045  *	t4_wol_pat_enable - enable/disable pattern-based WoL
4046  *	@adap: the adapter
4047  *	@port: the physical port index
4048  *	@map: bitmap of which HW pattern filters to set
4049  *	@mask0: byte mask for bytes 0-63 of a packet
4050  *	@mask1: byte mask for bytes 64-127 of a packet
4051  *	@crc: Ethernet CRC for selected bytes
4052  *	@enable: enable/disable switch
4053  *
4054  *	Sets the pattern filters indicated in @map to mask out the bytes
4055  *	specified in @mask0/@mask1 in received packets and compare the CRC of
4056  *	the resulting packet against @crc.  If @enable is %true pattern-based
4057  *	WoL is enabled, otherwise disabled.
4058  */
4059 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4060 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4061 {
4062 	int i;
4063 	u32 port_cfg_reg;
4064 
4065 	if (is_t4(adap))
4066 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4067 	else
4068 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4069 
4070 	if (!enable) {
4071 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4072 		return 0;
4073 	}
4074 	if (map > 0xff)
4075 		return -EINVAL;
4076 
4077 #define EPIO_REG(name) \
4078 	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4079 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4080 
4081 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4082 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4083 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4084 
4085 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4086 		if (!(map & 1))
4087 			continue;
4088 
4089 		/* write byte masks */
4090 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4091 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4092 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4093 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4094 			return -ETIMEDOUT;
4095 
4096 		/* write CRC */
4097 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4098 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4099 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4100 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4101 			return -ETIMEDOUT;
4102 	}
4103 #undef EPIO_REG
4104 
4105 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4106 	return 0;
4107 }
4108 
4109 /**
4110  *	t4_mk_filtdelwr - create a delete filter WR
4111  *	@ftid: the filter ID
4112  *	@wr: the filter work request to populate
4113  *	@qid: ingress queue to receive the delete notification
4114  *
4115  *	Creates a filter work request to delete the supplied filter.  If @qid is
4116  *	negative the delete notification is suppressed.
4117  */
4118 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4119 {
4120 	memset(wr, 0, sizeof(*wr));
4121 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4122 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4123 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4124 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4125 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4126 	if (qid >= 0)
4127 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4128 }
4129 
4130 #define INIT_CMD(var, cmd, rd_wr) do { \
4131 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4132 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4133 	(var).retval_len16 = htonl(FW_LEN16(var)); \
4134 } while (0)
4135 
4136 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4137 {
4138 	struct fw_ldst_cmd c;
4139 
4140 	memset(&c, 0, sizeof(c));
4141 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4142 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4143 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4144 	c.u.addrval.addr = htonl(addr);
4145 	c.u.addrval.val = htonl(val);
4146 
4147 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4148 }
4149 
4150 /**
4151  *	t4_i2c_rd - read a byte from an i2c addressable device
4152  *	@adap: the adapter
4153  *	@mbox: mailbox to use for the FW command
4154  *	@port_id: the port id
4155  *	@dev_addr: the i2c device address
4156  *	@offset: the byte offset to read from
4157  *	@valp: where to store the value
4158  */
4159 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
4160 	       u8 dev_addr, u8 offset, u8 *valp)
4161 {
4162 	int ret;
4163 	struct fw_ldst_cmd c;
4164 
4165 	memset(&c, 0, sizeof(c));
4166 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4167 		F_FW_CMD_READ |
4168 		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
4169 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4170 	c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
4171 	c.u.i2c_deprecated.base = dev_addr;
4172 	c.u.i2c_deprecated.boffset = offset;
4173 
4174 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4175 	if (ret == 0)
4176 		*valp = c.u.i2c_deprecated.data;
4177 	return ret;
4178 }
4179 
4180 /**
4181  *	t4_mdio_rd - read a PHY register through MDIO
4182  *	@adap: the adapter
4183  *	@mbox: mailbox to use for the FW command
4184  *	@phy_addr: the PHY address
4185  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4186  *	@reg: the register to read
4187  *	@valp: where to store the value
4188  *
4189  *	Issues a FW command through the given mailbox to read a PHY register.
4190  */
4191 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4192 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4193 {
4194 	int ret;
4195 	struct fw_ldst_cmd c;
4196 
4197 	memset(&c, 0, sizeof(c));
4198 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4199 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4200 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4201 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4202 				   V_FW_LDST_CMD_MMD(mmd));
4203 	c.u.mdio.raddr = htons(reg);
4204 
4205 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4206 	if (ret == 0)
4207 		*valp = ntohs(c.u.mdio.rval);
4208 	return ret;
4209 }
4210 
4211 /**
4212  *	t4_mdio_wr - write a PHY register through MDIO
4213  *	@adap: the adapter
4214  *	@mbox: mailbox to use for the FW command
4215  *	@phy_addr: the PHY address
4216  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4217  *	@reg: the register to write
4218  *	@valp: value to write
4219  *
4220  *	Issues a FW command through the given mailbox to write a PHY register.
4221  */
4222 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4223 	       unsigned int mmd, unsigned int reg, unsigned int val)
4224 {
4225 	struct fw_ldst_cmd c;
4226 
4227 	memset(&c, 0, sizeof(c));
4228 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4229 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4230 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4231 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4232 				   V_FW_LDST_CMD_MMD(mmd));
4233 	c.u.mdio.raddr = htons(reg);
4234 	c.u.mdio.rval = htons(val);
4235 
4236 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4237 }
4238 
4239 /**
4240  *	t4_sge_ctxt_flush - flush the SGE context cache
4241  *	@adap: the adapter
4242  *	@mbox: mailbox to use for the FW command
4243  *
4244  *	Issues a FW command through the given mailbox to flush the
4245  *	SGE context cache.
4246  */
4247 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4248 {
4249 	int ret;
4250 	struct fw_ldst_cmd c;
4251 
4252 	memset(&c, 0, sizeof(c));
4253 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4254 			F_FW_CMD_READ |
4255 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4256 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4257 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4258 
4259 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4260 	return ret;
4261 }
4262 
4263 /**
4264  *	t4_sge_ctxt_rd - read an SGE context through FW
4265  *	@adap: the adapter
4266  *	@mbox: mailbox to use for the FW command
4267  *	@cid: the context id
4268  *	@ctype: the context type
4269  *	@data: where to store the context data
4270  *
4271  *	Issues a FW command through the given mailbox to read an SGE context.
4272  */
4273 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4274 		   enum ctxt_type ctype, u32 *data)
4275 {
4276 	int ret;
4277 	struct fw_ldst_cmd c;
4278 
4279 	if (ctype == CTXT_EGRESS)
4280 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4281 	else if (ctype == CTXT_INGRESS)
4282 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4283 	else if (ctype == CTXT_FLM)
4284 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4285 	else
4286 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4287 
4288 	memset(&c, 0, sizeof(c));
4289 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4290 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4291 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4292 	c.u.idctxt.physid = htonl(cid);
4293 
4294 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4295 	if (ret == 0) {
4296 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4297 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4298 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4299 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4300 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4301 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4302 	}
4303 	return ret;
4304 }
4305 
4306 /**
4307  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4308  *	@adap: the adapter
4309  *	@cid: the context id
4310  *	@ctype: the context type
4311  *	@data: where to store the context data
4312  *
4313  *	Reads an SGE context directly, bypassing FW.  This is only for
4314  *	debugging when FW is unavailable.
4315  */
4316 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4317 		      u32 *data)
4318 {
4319 	int i, ret;
4320 
4321 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4322 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4323 	if (!ret)
4324 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4325 			*data++ = t4_read_reg(adap, i);
4326 	return ret;
4327 }
4328 
4329 /**
4330  *	t4_fw_hello - establish communication with FW
4331  *	@adap: the adapter
4332  *	@mbox: mailbox to use for the FW command
4333  *	@evt_mbox: mailbox to receive async FW events
4334  *	@master: specifies the caller's willingness to be the device master
4335  *	@state: returns the current device state (if non-NULL)
4336  *
4337  *	Issues a command to establish communication with FW.  Returns either
4338  *	an error (negative integer) or the mailbox of the Master PF.
4339  */
4340 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4341 		enum dev_master master, enum dev_state *state)
4342 {
4343 	int ret;
4344 	struct fw_hello_cmd c;
4345 	u32 v;
4346 	unsigned int master_mbox;
4347 	int retries = FW_CMD_HELLO_RETRIES;
4348 
4349 retry:
4350 	memset(&c, 0, sizeof(c));
4351 	INIT_CMD(c, HELLO, WRITE);
4352 	c.err_to_clearinit = htonl(
4353 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4354 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4355 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4356 			M_FW_HELLO_CMD_MBMASTER) |
4357 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4358 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4359 		F_FW_HELLO_CMD_CLEARINIT);
4360 
4361 	/*
4362 	 * Issue the HELLO command to the firmware.  If it's not successful
4363 	 * but indicates that we got a "busy" or "timeout" condition, retry
4364 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4365 	 * retry limit, check to see if the firmware left us any error
4366 	 * information and report that if so ...
4367 	 */
4368 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4369 	if (ret != FW_SUCCESS) {
4370 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4371 			goto retry;
4372 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4373 			t4_report_fw_error(adap);
4374 		return ret;
4375 	}
4376 
4377 	v = ntohl(c.err_to_clearinit);
4378 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4379 	if (state) {
4380 		if (v & F_FW_HELLO_CMD_ERR)
4381 			*state = DEV_STATE_ERR;
4382 		else if (v & F_FW_HELLO_CMD_INIT)
4383 			*state = DEV_STATE_INIT;
4384 		else
4385 			*state = DEV_STATE_UNINIT;
4386 	}
4387 
4388 	/*
4389 	 * If we're not the Master PF then we need to wait around for the
4390 	 * Master PF Driver to finish setting up the adapter.
4391 	 *
4392 	 * Note that we also do this wait if we're a non-Master-capable PF and
4393 	 * there is no current Master PF; a Master PF may show up momentarily
4394 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4395 	 * OS loads lots of different drivers rapidly at the same time).  In
4396 	 * this case, the Master PF returned by the firmware will be
4397 	 * M_PCIE_FW_MASTER so the test below will work ...
4398 	 */
4399 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4400 	    master_mbox != mbox) {
4401 		int waiting = FW_CMD_HELLO_TIMEOUT;
4402 
4403 		/*
4404 		 * Wait for the firmware to either indicate an error or
4405 		 * initialized state.  If we see either of these we bail out
4406 		 * and report the issue to the caller.  If we exhaust the
4407 		 * "hello timeout" and we haven't exhausted our retries, try
4408 		 * again.  Otherwise bail with a timeout error.
4409 		 */
4410 		for (;;) {
4411 			u32 pcie_fw;
4412 
4413 			msleep(50);
4414 			waiting -= 50;
4415 
4416 			/*
4417 			 * If neither Error nor Initialialized are indicated
4418 			 * by the firmware keep waiting till we exhaust our
4419 			 * timeout ... and then retry if we haven't exhausted
4420 			 * our retries ...
4421 			 */
4422 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4423 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4424 				if (waiting <= 0) {
4425 					if (retries-- > 0)
4426 						goto retry;
4427 
4428 					return -ETIMEDOUT;
4429 				}
4430 				continue;
4431 			}
4432 
4433 			/*
4434 			 * We either have an Error or Initialized condition
4435 			 * report errors preferentially.
4436 			 */
4437 			if (state) {
4438 				if (pcie_fw & F_PCIE_FW_ERR)
4439 					*state = DEV_STATE_ERR;
4440 				else if (pcie_fw & F_PCIE_FW_INIT)
4441 					*state = DEV_STATE_INIT;
4442 			}
4443 
4444 			/*
4445 			 * If we arrived before a Master PF was selected and
4446 			 * there's not a valid Master PF, grab its identity
4447 			 * for our caller.
4448 			 */
4449 			if (master_mbox == M_PCIE_FW_MASTER &&
4450 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4451 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4452 			break;
4453 		}
4454 	}
4455 
4456 	return master_mbox;
4457 }
4458 
4459 /**
4460  *	t4_fw_bye - end communication with FW
4461  *	@adap: the adapter
4462  *	@mbox: mailbox to use for the FW command
4463  *
4464  *	Issues a command to terminate communication with FW.
4465  */
4466 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4467 {
4468 	struct fw_bye_cmd c;
4469 
4470 	memset(&c, 0, sizeof(c));
4471 	INIT_CMD(c, BYE, WRITE);
4472 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4473 }
4474 
4475 /**
4476  *	t4_fw_reset - issue a reset to FW
4477  *	@adap: the adapter
4478  *	@mbox: mailbox to use for the FW command
4479  *	@reset: specifies the type of reset to perform
4480  *
4481  *	Issues a reset command of the specified type to FW.
4482  */
4483 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4484 {
4485 	struct fw_reset_cmd c;
4486 
4487 	memset(&c, 0, sizeof(c));
4488 	INIT_CMD(c, RESET, WRITE);
4489 	c.val = htonl(reset);
4490 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4491 }
4492 
4493 /**
4494  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4495  *	@adap: the adapter
4496  *	@mbox: mailbox to use for the FW RESET command (if desired)
4497  *	@force: force uP into RESET even if FW RESET command fails
4498  *
4499  *	Issues a RESET command to firmware (if desired) with a HALT indication
4500  *	and then puts the microprocessor into RESET state.  The RESET command
4501  *	will only be issued if a legitimate mailbox is provided (mbox <=
4502  *	M_PCIE_FW_MASTER).
4503  *
4504  *	This is generally used in order for the host to safely manipulate the
4505  *	adapter without fear of conflicting with whatever the firmware might
4506  *	be doing.  The only way out of this state is to RESTART the firmware
4507  *	...
4508  */
4509 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4510 {
4511 	int ret = 0;
4512 
4513 	/*
4514 	 * If a legitimate mailbox is provided, issue a RESET command
4515 	 * with a HALT indication.
4516 	 */
4517 	if (mbox <= M_PCIE_FW_MASTER) {
4518 		struct fw_reset_cmd c;
4519 
4520 		memset(&c, 0, sizeof(c));
4521 		INIT_CMD(c, RESET, WRITE);
4522 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4523 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4524 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4525 	}
4526 
4527 	/*
4528 	 * Normally we won't complete the operation if the firmware RESET
4529 	 * command fails but if our caller insists we'll go ahead and put the
4530 	 * uP into RESET.  This can be useful if the firmware is hung or even
4531 	 * missing ...  We'll have to take the risk of putting the uP into
4532 	 * RESET without the cooperation of firmware in that case.
4533 	 *
4534 	 * We also force the firmware's HALT flag to be on in case we bypassed
4535 	 * the firmware RESET command above or we're dealing with old firmware
4536 	 * which doesn't have the HALT capability.  This will serve as a flag
4537 	 * for the incoming firmware to know that it's coming out of a HALT
4538 	 * rather than a RESET ... if it's new enough to understand that ...
4539 	 */
4540 	if (ret == 0 || force) {
4541 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4542 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4543 	}
4544 
4545 	/*
4546 	 * And we always return the result of the firmware RESET command
4547 	 * even when we force the uP into RESET ...
4548 	 */
4549 	return ret;
4550 }
4551 
4552 /**
4553  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4554  *	@adap: the adapter
4555  *	@reset: if we want to do a RESET to restart things
4556  *
4557  *	Restart firmware previously halted by t4_fw_halt().  On successful
4558  *	return the previous PF Master remains as the new PF Master and there
4559  *	is no need to issue a new HELLO command, etc.
4560  *
4561  *	We do this in two ways:
4562  *
4563  *	 1. If we're dealing with newer firmware we'll simply want to take
4564  *	    the chip's microprocessor out of RESET.  This will cause the
4565  *	    firmware to start up from its start vector.  And then we'll loop
4566  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4567  *	    reset to 0) or we timeout.
4568  *
4569  *	 2. If we're dealing with older firmware then we'll need to RESET
4570  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4571  *	    flag and automatically RESET itself on startup.
4572  */
4573 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4574 {
4575 	if (reset) {
4576 		/*
4577 		 * Since we're directing the RESET instead of the firmware
4578 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4579 		 * bit.
4580 		 */
4581 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4582 
4583 		/*
4584 		 * If we've been given a valid mailbox, first try to get the
4585 		 * firmware to do the RESET.  If that works, great and we can
4586 		 * return success.  Otherwise, if we haven't been given a
4587 		 * valid mailbox or the RESET command failed, fall back to
4588 		 * hitting the chip with a hammer.
4589 		 */
4590 		if (mbox <= M_PCIE_FW_MASTER) {
4591 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4592 			msleep(100);
4593 			if (t4_fw_reset(adap, mbox,
4594 					F_PIORST | F_PIORSTMODE) == 0)
4595 				return 0;
4596 		}
4597 
4598 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4599 		msleep(2000);
4600 	} else {
4601 		int ms;
4602 
4603 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4604 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4605 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4606 				return FW_SUCCESS;
4607 			msleep(100);
4608 			ms += 100;
4609 		}
4610 		return -ETIMEDOUT;
4611 	}
4612 	return 0;
4613 }
4614 
4615 /**
4616  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4617  *	@adap: the adapter
4618  *	@mbox: mailbox to use for the FW RESET command (if desired)
4619  *	@fw_data: the firmware image to write
4620  *	@size: image size
4621  *	@force: force upgrade even if firmware doesn't cooperate
4622  *
4623  *	Perform all of the steps necessary for upgrading an adapter's
4624  *	firmware image.  Normally this requires the cooperation of the
4625  *	existing firmware in order to halt all existing activities
4626  *	but if an invalid mailbox token is passed in we skip that step
4627  *	(though we'll still put the adapter microprocessor into RESET in
4628  *	that case).
4629  *
4630  *	On successful return the new firmware will have been loaded and
4631  *	the adapter will have been fully RESET losing all previous setup
4632  *	state.  On unsuccessful return the adapter may be completely hosed ...
4633  *	positive errno indicates that the adapter is ~probably~ intact, a
4634  *	negative errno indicates that things are looking bad ...
4635  */
4636 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4637 		  const u8 *fw_data, unsigned int size, int force)
4638 {
4639 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4640 	unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4641 	int reset, ret;
4642 
4643 	if (!bootstrap) {
4644 		ret = t4_fw_halt(adap, mbox, force);
4645 		if (ret < 0 && !force)
4646 			return ret;
4647 	}
4648 
4649 	ret = t4_load_fw(adap, fw_data, size);
4650 	if (ret < 0 || bootstrap)
4651 		return ret;
4652 
4653 	/*
4654 	 * Older versions of the firmware don't understand the new
4655 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4656 	 * restart.  So for newly loaded older firmware we'll have to do the
4657 	 * RESET for it so it starts up on a clean slate.  We can tell if
4658 	 * the newly loaded firmware will handle this right by checking
4659 	 * its header flags to see if it advertises the capability.
4660 	 */
4661 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4662 	return t4_fw_restart(adap, mbox, reset);
4663 }
4664 
4665 /**
4666  *	t4_fw_initialize - ask FW to initialize the device
4667  *	@adap: the adapter
4668  *	@mbox: mailbox to use for the FW command
4669  *
4670  *	Issues a command to FW to partially initialize the device.  This
4671  *	performs initialization that generally doesn't depend on user input.
4672  */
4673 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4674 {
4675 	struct fw_initialize_cmd c;
4676 
4677 	memset(&c, 0, sizeof(c));
4678 	INIT_CMD(c, INITIALIZE, WRITE);
4679 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4680 }
4681 
4682 /**
4683  *	t4_query_params - query FW or device parameters
4684  *	@adap: the adapter
4685  *	@mbox: mailbox to use for the FW command
4686  *	@pf: the PF
4687  *	@vf: the VF
4688  *	@nparams: the number of parameters
4689  *	@params: the parameter names
4690  *	@val: the parameter values
4691  *
4692  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4693  *	queried at once.
4694  */
4695 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4696 		    unsigned int vf, unsigned int nparams, const u32 *params,
4697 		    u32 *val)
4698 {
4699 	int i, ret;
4700 	struct fw_params_cmd c;
4701 	__be32 *p = &c.param[0].mnem;
4702 
4703 	if (nparams > 7)
4704 		return -EINVAL;
4705 
4706 	memset(&c, 0, sizeof(c));
4707 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4708 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4709 			    V_FW_PARAMS_CMD_VFN(vf));
4710 	c.retval_len16 = htonl(FW_LEN16(c));
4711 
4712 	for (i = 0; i < nparams; i++, p += 2, params++)
4713 		*p = htonl(*params);
4714 
4715 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4716 	if (ret == 0)
4717 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4718 			*val++ = ntohl(*p);
4719 	return ret;
4720 }
4721 
4722 /**
4723  *	t4_set_params - sets FW or device parameters
4724  *	@adap: the adapter
4725  *	@mbox: mailbox to use for the FW command
4726  *	@pf: the PF
4727  *	@vf: the VF
4728  *	@nparams: the number of parameters
4729  *	@params: the parameter names
4730  *	@val: the parameter values
4731  *
4732  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4733  *	specified at once.
4734  */
4735 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4736 		  unsigned int vf, unsigned int nparams, const u32 *params,
4737 		  const u32 *val)
4738 {
4739 	struct fw_params_cmd c;
4740 	__be32 *p = &c.param[0].mnem;
4741 
4742 	if (nparams > 7)
4743 		return -EINVAL;
4744 
4745 	memset(&c, 0, sizeof(c));
4746 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4747 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4748 			    V_FW_PARAMS_CMD_VFN(vf));
4749 	c.retval_len16 = htonl(FW_LEN16(c));
4750 
4751 	while (nparams--) {
4752 		*p++ = htonl(*params);
4753 		params++;
4754 		*p++ = htonl(*val);
4755 		val++;
4756 	}
4757 
4758 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4759 }
4760 
4761 /**
4762  *	t4_cfg_pfvf - configure PF/VF resource limits
4763  *	@adap: the adapter
4764  *	@mbox: mailbox to use for the FW command
4765  *	@pf: the PF being configured
4766  *	@vf: the VF being configured
4767  *	@txq: the max number of egress queues
4768  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4769  *	@rxqi: the max number of interrupt-capable ingress queues
4770  *	@rxq: the max number of interruptless ingress queues
4771  *	@tc: the PCI traffic class
4772  *	@vi: the max number of virtual interfaces
4773  *	@cmask: the channel access rights mask for the PF/VF
4774  *	@pmask: the port access rights mask for the PF/VF
4775  *	@nexact: the maximum number of exact MPS filters
4776  *	@rcaps: read capabilities
4777  *	@wxcaps: write/execute capabilities
4778  *
4779  *	Configures resource limits and capabilities for a physical or virtual
4780  *	function.
4781  */
4782 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4783 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4784 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4785 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4786 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4787 {
4788 	struct fw_pfvf_cmd c;
4789 
4790 	memset(&c, 0, sizeof(c));
4791 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4792 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4793 			    V_FW_PFVF_CMD_VFN(vf));
4794 	c.retval_len16 = htonl(FW_LEN16(c));
4795 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4796 			       V_FW_PFVF_CMD_NIQ(rxq));
4797 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4798 			      V_FW_PFVF_CMD_PMASK(pmask) |
4799 			      V_FW_PFVF_CMD_NEQ(txq));
4800 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4801 				V_FW_PFVF_CMD_NEXACTF(nexact));
4802 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4803 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4804 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4805 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4806 }
4807 
4808 /**
4809  *	t4_alloc_vi_func - allocate a virtual interface
4810  *	@adap: the adapter
4811  *	@mbox: mailbox to use for the FW command
4812  *	@port: physical port associated with the VI
4813  *	@pf: the PF owning the VI
4814  *	@vf: the VF owning the VI
4815  *	@nmac: number of MAC addresses needed (1 to 5)
4816  *	@mac: the MAC addresses of the VI
4817  *	@rss_size: size of RSS table slice associated with this VI
4818  *	@portfunc: which Port Application Function MAC Address is desired
4819  *	@idstype: Intrusion Detection Type
4820  *
4821  *	Allocates a virtual interface for the given physical port.  If @mac is
4822  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4823  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4824  *	stored consecutively so the space needed is @nmac * 6 bytes.
4825  *	Returns a negative error number or the non-negative VI id.
4826  */
4827 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4828 		     unsigned int port, unsigned int pf, unsigned int vf,
4829 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4830 		     unsigned int portfunc, unsigned int idstype)
4831 {
4832 	int ret;
4833 	struct fw_vi_cmd c;
4834 
4835 	memset(&c, 0, sizeof(c));
4836 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4837 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4838 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4839 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4840 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4841 			       V_FW_VI_CMD_FUNC(portfunc));
4842 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4843 	c.nmac = nmac - 1;
4844 
4845 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4846 	if (ret)
4847 		return ret;
4848 
4849 	if (mac) {
4850 		memcpy(mac, c.mac, sizeof(c.mac));
4851 		switch (nmac) {
4852 		case 5:
4853 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4854 		case 4:
4855 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4856 		case 3:
4857 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4858 		case 2:
4859 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4860 		}
4861 	}
4862 	if (rss_size)
4863 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4864 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4865 }
4866 
4867 /**
4868  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4869  *	@adap: the adapter
4870  *	@mbox: mailbox to use for the FW command
4871  *	@port: physical port associated with the VI
4872  *	@pf: the PF owning the VI
4873  *	@vf: the VF owning the VI
4874  *	@nmac: number of MAC addresses needed (1 to 5)
4875  *	@mac: the MAC addresses of the VI
4876  *	@rss_size: size of RSS table slice associated with this VI
4877  *
4878  *	backwards compatible and convieniance routine to allocate a Virtual
4879  *	Interface with a Ethernet Port Application Function and Intrustion
4880  *	Detection System disabled.
4881  */
4882 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4883 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4884 		unsigned int *rss_size)
4885 {
4886 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4887 				FW_VI_FUNC_ETH, 0);
4888 }
4889 
4890 /**
4891  *	t4_free_vi - free a virtual interface
4892  *	@adap: the adapter
4893  *	@mbox: mailbox to use for the FW command
4894  *	@pf: the PF owning the VI
4895  *	@vf: the VF owning the VI
4896  *	@viid: virtual interface identifiler
4897  *
4898  *	Free a previously allocated virtual interface.
4899  */
4900 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4901 	       unsigned int vf, unsigned int viid)
4902 {
4903 	struct fw_vi_cmd c;
4904 
4905 	memset(&c, 0, sizeof(c));
4906 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4907 			    F_FW_CMD_REQUEST |
4908 			    F_FW_CMD_EXEC |
4909 			    V_FW_VI_CMD_PFN(pf) |
4910 			    V_FW_VI_CMD_VFN(vf));
4911 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4912 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4913 
4914 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4915 }
4916 
4917 /**
4918  *	t4_set_rxmode - set Rx properties of a virtual interface
4919  *	@adap: the adapter
4920  *	@mbox: mailbox to use for the FW command
4921  *	@viid: the VI id
4922  *	@mtu: the new MTU or -1
4923  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4924  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4925  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4926  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4927  *	@sleep_ok: if true we may sleep while awaiting command completion
4928  *
4929  *	Sets Rx properties of a virtual interface.
4930  */
4931 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4932 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4933 		  bool sleep_ok)
4934 {
4935 	struct fw_vi_rxmode_cmd c;
4936 
4937 	/* convert to FW values */
4938 	if (mtu < 0)
4939 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4940 	if (promisc < 0)
4941 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4942 	if (all_multi < 0)
4943 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4944 	if (bcast < 0)
4945 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4946 	if (vlanex < 0)
4947 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4948 
4949 	memset(&c, 0, sizeof(c));
4950 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4951 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4952 	c.retval_len16 = htonl(FW_LEN16(c));
4953 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4954 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4955 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4956 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4957 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4958 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4959 }
4960 
4961 /**
4962  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4963  *	@adap: the adapter
4964  *	@mbox: mailbox to use for the FW command
4965  *	@viid: the VI id
4966  *	@free: if true any existing filters for this VI id are first removed
4967  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4968  *	@addr: the MAC address(es)
4969  *	@idx: where to store the index of each allocated filter
4970  *	@hash: pointer to hash address filter bitmap
4971  *	@sleep_ok: call is allowed to sleep
4972  *
4973  *	Allocates an exact-match filter for each of the supplied addresses and
4974  *	sets it to the corresponding address.  If @idx is not %NULL it should
4975  *	have at least @naddr entries, each of which will be set to the index of
4976  *	the filter allocated for the corresponding MAC address.  If a filter
4977  *	could not be allocated for an address its index is set to 0xffff.
4978  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4979  *	are hashed and update the hash filter bitmap pointed at by @hash.
4980  *
4981  *	Returns a negative error number or the number of filters allocated.
4982  */
4983 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4984 		      unsigned int viid, bool free, unsigned int naddr,
4985 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4986 {
4987 	int offset, ret = 0;
4988 	struct fw_vi_mac_cmd c;
4989 	unsigned int nfilters = 0;
4990 	unsigned int max_naddr = is_t4(adap) ?
4991 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
4992 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4993 	unsigned int rem = naddr;
4994 
4995 	if (naddr > max_naddr)
4996 		return -EINVAL;
4997 
4998 	for (offset = 0; offset < naddr ; /**/) {
4999 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
5000 					 ? rem
5001 					 : ARRAY_SIZE(c.u.exact));
5002 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5003 						     u.exact[fw_naddr]), 16);
5004 		struct fw_vi_mac_exact *p;
5005 		int i;
5006 
5007 		memset(&c, 0, sizeof(c));
5008 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5009 				     F_FW_CMD_REQUEST |
5010 				     F_FW_CMD_WRITE |
5011 				     V_FW_CMD_EXEC(free) |
5012 				     V_FW_VI_MAC_CMD_VIID(viid));
5013 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5014 					    V_FW_CMD_LEN16(len16));
5015 
5016 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5017 			p->valid_to_idx = htons(
5018 				F_FW_VI_MAC_CMD_VALID |
5019 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5020 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5021 		}
5022 
5023 		/*
5024 		 * It's okay if we run out of space in our MAC address arena.
5025 		 * Some of the addresses we submit may get stored so we need
5026 		 * to run through the reply to see what the results were ...
5027 		 */
5028 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5029 		if (ret && ret != -FW_ENOMEM)
5030 			break;
5031 
5032 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5033 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5034 
5035 			if (idx)
5036 				idx[offset+i] = (index >=  max_naddr
5037 						 ? 0xffff
5038 						 : index);
5039 			if (index < max_naddr)
5040 				nfilters++;
5041 			else if (hash)
5042 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5043 		}
5044 
5045 		free = false;
5046 		offset += fw_naddr;
5047 		rem -= fw_naddr;
5048 	}
5049 
5050 	if (ret == 0 || ret == -FW_ENOMEM)
5051 		ret = nfilters;
5052 	return ret;
5053 }
5054 
5055 /**
5056  *	t4_change_mac - modifies the exact-match filter for a MAC address
5057  *	@adap: the adapter
5058  *	@mbox: mailbox to use for the FW command
5059  *	@viid: the VI id
5060  *	@idx: index of existing filter for old value of MAC address, or -1
5061  *	@addr: the new MAC address value
5062  *	@persist: whether a new MAC allocation should be persistent
5063  *	@add_smt: if true also add the address to the HW SMT
5064  *
5065  *	Modifies an exact-match filter and sets it to the new MAC address if
5066  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5067  *	latter case the address is added persistently if @persist is %true.
5068  *
5069  *	Note that in general it is not possible to modify the value of a given
5070  *	filter so the generic way to modify an address filter is to free the one
5071  *	being used by the old address value and allocate a new filter for the
5072  *	new address value.
5073  *
5074  *	Returns a negative error number or the index of the filter with the new
5075  *	MAC value.  Note that this index may differ from @idx.
5076  */
5077 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5078 		  int idx, const u8 *addr, bool persist, bool add_smt)
5079 {
5080 	int ret, mode;
5081 	struct fw_vi_mac_cmd c;
5082 	struct fw_vi_mac_exact *p = c.u.exact;
5083 	unsigned int max_mac_addr = is_t4(adap) ?
5084 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5085 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5086 
5087 	if (idx < 0)                             /* new allocation */
5088 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5089 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5090 
5091 	memset(&c, 0, sizeof(c));
5092 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5093 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5094 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5095 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5096 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5097 				V_FW_VI_MAC_CMD_IDX(idx));
5098 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5099 
5100 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5101 	if (ret == 0) {
5102 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5103 		if (ret >= max_mac_addr)
5104 			ret = -ENOMEM;
5105 	}
5106 	return ret;
5107 }
5108 
5109 /**
5110  *	t4_set_addr_hash - program the MAC inexact-match hash filter
5111  *	@adap: the adapter
5112  *	@mbox: mailbox to use for the FW command
5113  *	@viid: the VI id
5114  *	@ucast: whether the hash filter should also match unicast addresses
5115  *	@vec: the value to be written to the hash filter
5116  *	@sleep_ok: call is allowed to sleep
5117  *
5118  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5119  */
5120 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5121 		     bool ucast, u64 vec, bool sleep_ok)
5122 {
5123 	struct fw_vi_mac_cmd c;
5124 
5125 	memset(&c, 0, sizeof(c));
5126 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5127 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5128 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5129 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5130 				    V_FW_CMD_LEN16(1));
5131 	c.u.hash.hashvec = cpu_to_be64(vec);
5132 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5133 }
5134 
5135 /**
5136  *	t4_enable_vi - enable/disable a virtual interface
5137  *	@adap: the adapter
5138  *	@mbox: mailbox to use for the FW command
5139  *	@viid: the VI id
5140  *	@rx_en: 1=enable Rx, 0=disable Rx
5141  *	@tx_en: 1=enable Tx, 0=disable Tx
5142  *
5143  *	Enables/disables a virtual interface.
5144  */
5145 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5146 		 bool rx_en, bool tx_en)
5147 {
5148 	struct fw_vi_enable_cmd c;
5149 
5150 	memset(&c, 0, sizeof(c));
5151 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5152 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5153 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5154 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5155 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5156 }
5157 
5158 /**
5159  *	t4_identify_port - identify a VI's port by blinking its LED
5160  *	@adap: the adapter
5161  *	@mbox: mailbox to use for the FW command
5162  *	@viid: the VI id
5163  *	@nblinks: how many times to blink LED at 2.5 Hz
5164  *
5165  *	Identifies a VI's port by blinking its LED.
5166  */
5167 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5168 		     unsigned int nblinks)
5169 {
5170 	struct fw_vi_enable_cmd c;
5171 
5172 	memset(&c, 0, sizeof(c));
5173 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5174 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5175 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5176 	c.blinkdur = htons(nblinks);
5177 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5178 }
5179 
5180 /**
5181  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
5182  *	@adap: the adapter
5183  *	@mbox: mailbox to use for the FW command
5184  *	@start: %true to enable the queues, %false to disable them
5185  *	@pf: the PF owning the queues
5186  *	@vf: the VF owning the queues
5187  *	@iqid: ingress queue id
5188  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5189  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5190  *
5191  *	Starts or stops an ingress queue and its associated FLs, if any.
5192  */
5193 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5194 		     unsigned int pf, unsigned int vf, unsigned int iqid,
5195 		     unsigned int fl0id, unsigned int fl1id)
5196 {
5197 	struct fw_iq_cmd c;
5198 
5199 	memset(&c, 0, sizeof(c));
5200 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5201 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5202 			    V_FW_IQ_CMD_VFN(vf));
5203 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5204 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5205 	c.iqid = htons(iqid);
5206 	c.fl0id = htons(fl0id);
5207 	c.fl1id = htons(fl1id);
5208 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5209 }
5210 
5211 /**
5212  *	t4_iq_free - free an ingress queue and its FLs
5213  *	@adap: the adapter
5214  *	@mbox: mailbox to use for the FW command
5215  *	@pf: the PF owning the queues
5216  *	@vf: the VF owning the queues
5217  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5218  *	@iqid: ingress queue id
5219  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5220  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5221  *
5222  *	Frees an ingress queue and its associated FLs, if any.
5223  */
5224 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5225 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5226 	       unsigned int fl0id, unsigned int fl1id)
5227 {
5228 	struct fw_iq_cmd c;
5229 
5230 	memset(&c, 0, sizeof(c));
5231 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5232 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5233 			    V_FW_IQ_CMD_VFN(vf));
5234 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5235 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5236 	c.iqid = htons(iqid);
5237 	c.fl0id = htons(fl0id);
5238 	c.fl1id = htons(fl1id);
5239 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5240 }
5241 
5242 /**
5243  *	t4_eth_eq_free - free an Ethernet egress queue
5244  *	@adap: the adapter
5245  *	@mbox: mailbox to use for the FW command
5246  *	@pf: the PF owning the queue
5247  *	@vf: the VF owning the queue
5248  *	@eqid: egress queue id
5249  *
5250  *	Frees an Ethernet egress queue.
5251  */
5252 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5253 		   unsigned int vf, unsigned int eqid)
5254 {
5255 	struct fw_eq_eth_cmd c;
5256 
5257 	memset(&c, 0, sizeof(c));
5258 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5259 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5260 			    V_FW_EQ_ETH_CMD_VFN(vf));
5261 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5262 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5263 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5264 }
5265 
5266 /**
5267  *	t4_ctrl_eq_free - free a control egress queue
5268  *	@adap: the adapter
5269  *	@mbox: mailbox to use for the FW command
5270  *	@pf: the PF owning the queue
5271  *	@vf: the VF owning the queue
5272  *	@eqid: egress queue id
5273  *
5274  *	Frees a control egress queue.
5275  */
5276 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5277 		    unsigned int vf, unsigned int eqid)
5278 {
5279 	struct fw_eq_ctrl_cmd c;
5280 
5281 	memset(&c, 0, sizeof(c));
5282 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5283 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5284 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5285 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5286 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5287 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5288 }
5289 
5290 /**
5291  *	t4_ofld_eq_free - free an offload egress queue
5292  *	@adap: the adapter
5293  *	@mbox: mailbox to use for the FW command
5294  *	@pf: the PF owning the queue
5295  *	@vf: the VF owning the queue
5296  *	@eqid: egress queue id
5297  *
5298  *	Frees a control egress queue.
5299  */
5300 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5301 		    unsigned int vf, unsigned int eqid)
5302 {
5303 	struct fw_eq_ofld_cmd c;
5304 
5305 	memset(&c, 0, sizeof(c));
5306 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5307 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5308 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5309 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5310 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5311 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5312 }
5313 
5314 /**
5315  *	t4_handle_fw_rpl - process a FW reply message
5316  *	@adap: the adapter
5317  *	@rpl: start of the FW message
5318  *
5319  *	Processes a FW message, such as link state change messages.
5320  */
5321 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5322 {
5323 	u8 opcode = *(const u8 *)rpl;
5324 	const struct fw_port_cmd *p = (const void *)rpl;
5325 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5326 
5327 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5328 		/* link/module state change message */
5329 		int speed = 0, fc = 0, i;
5330 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5331 		struct port_info *pi = NULL;
5332 		struct link_config *lc;
5333 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5334 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5335 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5336 
5337 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5338 			fc |= PAUSE_RX;
5339 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5340 			fc |= PAUSE_TX;
5341 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5342 			speed = SPEED_100;
5343 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5344 			speed = SPEED_1000;
5345 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5346 			speed = SPEED_10000;
5347 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5348 			speed = SPEED_40000;
5349 
5350 		for_each_port(adap, i) {
5351 			pi = adap2pinfo(adap, i);
5352 			if (pi->tx_chan == chan)
5353 				break;
5354 		}
5355 		lc = &pi->link_cfg;
5356 
5357 		if (link_ok != lc->link_ok || speed != lc->speed ||
5358 		    fc != lc->fc) {                    /* something changed */
5359 			int reason;
5360 
5361 			if (!link_ok && lc->link_ok)
5362 				reason = G_FW_PORT_CMD_LINKDNRC(stat);
5363 			else
5364 				reason = -1;
5365 
5366 			lc->link_ok = link_ok;
5367 			lc->speed = speed;
5368 			lc->fc = fc;
5369 			lc->supported = ntohs(p->u.info.pcap);
5370 			t4_os_link_changed(adap, i, link_ok, reason);
5371 		}
5372 		if (mod != pi->mod_type) {
5373 			pi->mod_type = mod;
5374 			t4_os_portmod_changed(adap, i);
5375 		}
5376 	} else {
5377 		CH_WARN_RATELIMIT(adap,
5378 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5379 		return -EINVAL;
5380 	}
5381 	return 0;
5382 }
5383 
5384 /**
5385  *	get_pci_mode - determine a card's PCI mode
5386  *	@adapter: the adapter
5387  *	@p: where to store the PCI settings
5388  *
5389  *	Determines a card's PCI mode and associated parameters, such as speed
5390  *	and width.
5391  */
5392 static void __devinit get_pci_mode(struct adapter *adapter,
5393 				   struct pci_params *p)
5394 {
5395 	u16 val;
5396 	u32 pcie_cap;
5397 
5398 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5399 	if (pcie_cap) {
5400 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5401 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5402 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5403 	}
5404 }
5405 
5406 /**
5407  *	init_link_config - initialize a link's SW state
5408  *	@lc: structure holding the link state
5409  *	@caps: link capabilities
5410  *
5411  *	Initializes the SW state maintained for each link, including the link's
5412  *	capabilities and default speed/flow-control/autonegotiation settings.
5413  */
5414 static void __devinit init_link_config(struct link_config *lc,
5415 				       unsigned int caps)
5416 {
5417 	lc->supported = caps;
5418 	lc->requested_speed = 0;
5419 	lc->speed = 0;
5420 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5421 	if (lc->supported & FW_PORT_CAP_ANEG) {
5422 		lc->advertising = lc->supported & ADVERT_MASK;
5423 		lc->autoneg = AUTONEG_ENABLE;
5424 		lc->requested_fc |= PAUSE_AUTONEG;
5425 	} else {
5426 		lc->advertising = 0;
5427 		lc->autoneg = AUTONEG_DISABLE;
5428 	}
5429 }
5430 
5431 static int __devinit get_flash_params(struct adapter *adapter)
5432 {
5433 	int ret;
5434 	u32 info = 0;
5435 
5436 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5437 	if (!ret)
5438 		ret = sf1_read(adapter, 3, 0, 1, &info);
5439 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5440 	if (ret < 0)
5441 		return ret;
5442 
5443 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5444 		return -EINVAL;
5445 	info >>= 16;                           /* log2 of size */
5446 	if (info >= 0x14 && info < 0x18)
5447 		adapter->params.sf_nsec = 1 << (info - 16);
5448 	else if (info == 0x18)
5449 		adapter->params.sf_nsec = 64;
5450 	else
5451 		return -EINVAL;
5452 	adapter->params.sf_size = 1 << info;
5453 	return 0;
5454 }
5455 
5456 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5457 						  u8 range)
5458 {
5459 	u16 val;
5460 	u32 pcie_cap;
5461 
5462 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5463 	if (pcie_cap) {
5464 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5465 		val &= 0xfff0;
5466 		val |= range ;
5467 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5468 	}
5469 }
5470 
5471 /**
5472  *	t4_prep_adapter - prepare SW and HW for operation
5473  *	@adapter: the adapter
5474  *	@reset: if true perform a HW reset
5475  *
5476  *	Initialize adapter SW state for the various HW modules, set initial
5477  *	values for some adapter tunables, take PHYs out of reset, and
5478  *	initialize the MDIO interface.
5479  */
5480 int __devinit t4_prep_adapter(struct adapter *adapter)
5481 {
5482 	int ret;
5483 	uint16_t device_id;
5484 	uint32_t pl_rev;
5485 
5486 	get_pci_mode(adapter, &adapter->params.pci);
5487 
5488 	pl_rev = t4_read_reg(adapter, A_PL_REV);
5489 	adapter->params.chipid = G_CHIPID(pl_rev);
5490 	adapter->params.rev = G_REV(pl_rev);
5491 	if (adapter->params.chipid == 0) {
5492 		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5493 		adapter->params.chipid = CHELSIO_T4;
5494 
5495 		/* T4A1 chip is not supported */
5496 		if (adapter->params.rev == 1) {
5497 			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5498 			return -EINVAL;
5499 		}
5500 	}
5501 	adapter->params.pci.vpd_cap_addr =
5502 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5503 
5504 	ret = get_flash_params(adapter);
5505 	if (ret < 0)
5506 		return ret;
5507 
5508 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5509 	if (ret < 0)
5510 		return ret;
5511 
5512 	/* Cards with real ASICs have the chipid in the PCIe device id */
5513 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5514 	if (device_id >> 12 == adapter->params.chipid)
5515 		adapter->params.cim_la_size = CIMLA_SIZE;
5516 	else {
5517 		/* FPGA */
5518 		adapter->params.fpga = 1;
5519 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5520 	}
5521 
5522 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5523 
5524 	/*
5525 	 * Default port and clock for debugging in case we can't reach FW.
5526 	 */
5527 	adapter->params.nports = 1;
5528 	adapter->params.portvec = 1;
5529 	adapter->params.vpd.cclk = 50000;
5530 
5531 	/* Set pci completion timeout value to 4 seconds. */
5532 	set_pcie_completion_timeout(adapter, 0xd);
5533 	return 0;
5534 }
5535 
5536 /**
5537  *	t4_init_tp_params - initialize adap->params.tp
5538  *	@adap: the adapter
5539  *
5540  *	Initialize various fields of the adapter's TP Parameters structure.
5541  */
5542 int __devinit t4_init_tp_params(struct adapter *adap)
5543 {
5544 	int chan;
5545 	u32 v;
5546 
5547 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5548 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
5549 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5550 
5551 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5552 	for (chan = 0; chan < NCHAN; chan++)
5553 		adap->params.tp.tx_modq[chan] = chan;
5554 
5555 	/*
5556 	 * Cache the adapter's Compressed Filter Mode and global Incress
5557 	 * Configuration.
5558 	 */
5559         t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5560                          &adap->params.tp.vlan_pri_map, 1,
5561                          A_TP_VLAN_PRI_MAP);
5562 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5563 			 &adap->params.tp.ingress_config, 1,
5564 			 A_TP_INGRESS_CONFIG);
5565 
5566 	/*
5567 	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5568 	 * shift positions of several elements of the Compressed Filter Tuple
5569 	 * for this adapter which we need frequently ...
5570 	 */
5571 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5572 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5573 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5574 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
5575 
5576 	/*
5577 	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5578 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
5579 	 */
5580 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
5581 		adap->params.tp.vnic_shift = -1;
5582 
5583 	return 0;
5584 }
5585 
5586 /**
5587  *	t4_filter_field_shift - calculate filter field shift
5588  *	@adap: the adapter
5589  *	@filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5590  *
5591  *	Return the shift position of a filter field within the Compressed
5592  *	Filter Tuple.  The filter field is specified via its selection bit
5593  *	within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
5594  */
5595 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5596 {
5597 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5598 	unsigned int sel;
5599 	int field_shift;
5600 
5601 	if ((filter_mode & filter_sel) == 0)
5602 		return -1;
5603 
5604 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5605 	    switch (filter_mode & sel) {
5606 		case F_FCOE:          field_shift += W_FT_FCOE;          break;
5607 		case F_PORT:          field_shift += W_FT_PORT;          break;
5608 		case F_VNIC_ID:       field_shift += W_FT_VNIC_ID;       break;
5609 		case F_VLAN:          field_shift += W_FT_VLAN;          break;
5610 		case F_TOS:           field_shift += W_FT_TOS;           break;
5611 		case F_PROTOCOL:      field_shift += W_FT_PROTOCOL;      break;
5612 		case F_ETHERTYPE:     field_shift += W_FT_ETHERTYPE;     break;
5613 		case F_MACMATCH:      field_shift += W_FT_MACMATCH;      break;
5614 		case F_MPSHITTYPE:    field_shift += W_FT_MPSHITTYPE;    break;
5615 		case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5616 	    }
5617 	}
5618 	return field_shift;
5619 }
5620 
5621 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5622 {
5623 	u8 addr[6];
5624 	int ret, i, j;
5625 	struct fw_port_cmd c;
5626 	unsigned int rss_size;
5627 	adapter_t *adap = p->adapter;
5628 
5629 	memset(&c, 0, sizeof(c));
5630 
5631 	for (i = 0, j = -1; i <= p->port_id; i++) {
5632 		do {
5633 			j++;
5634 		} while ((adap->params.portvec & (1 << j)) == 0);
5635 	}
5636 
5637 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5638 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5639 			       V_FW_PORT_CMD_PORTID(j));
5640 	c.action_to_len16 = htonl(
5641 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5642 		FW_LEN16(c));
5643 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5644 	if (ret)
5645 		return ret;
5646 
5647 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5648 	if (ret < 0)
5649 		return ret;
5650 
5651 	p->viid = ret;
5652 	p->tx_chan = j;
5653 	p->lport = j;
5654 	p->rss_size = rss_size;
5655 	t4_os_set_hw_addr(adap, p->port_id, addr);
5656 
5657 	ret = ntohl(c.u.info.lstatus_to_modtype);
5658 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5659 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5660 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5661 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5662 
5663 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5664 
5665 	return 0;
5666 }
5667 
5668 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
5669 {
5670 	struct fw_sched_cmd cmd;
5671 
5672 	memset(&cmd, 0, sizeof(cmd));
5673 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5674 				      F_FW_CMD_REQUEST |
5675 				      F_FW_CMD_WRITE);
5676 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5677 
5678 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
5679 	cmd.u.config.type = type;
5680 	cmd.u.config.minmaxen = minmaxen;
5681 
5682 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5683 			       NULL, 1);
5684 }
5685 
5686 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
5687 		    int rateunit, int ratemode, int channel, int cl,
5688 		    int minrate, int maxrate, int weight, int pktsize)
5689 {
5690 	struct fw_sched_cmd cmd;
5691 
5692 	memset(&cmd, 0, sizeof(cmd));
5693 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5694 				      F_FW_CMD_REQUEST |
5695 				      F_FW_CMD_WRITE);
5696 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5697 
5698 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
5699 	cmd.u.params.type = type;
5700 	cmd.u.params.level = level;
5701 	cmd.u.params.mode = mode;
5702 	cmd.u.params.ch = channel;
5703 	cmd.u.params.cl = cl;
5704 	cmd.u.params.unit = rateunit;
5705 	cmd.u.params.rate = ratemode;
5706 	cmd.u.params.min = cpu_to_be32(minrate);
5707 	cmd.u.params.max = cpu_to_be32(maxrate);
5708 	cmd.u.params.weight = cpu_to_be16(weight);
5709 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
5710 
5711 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5712 			       NULL, 1);
5713 }
5714