xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision ce3adf4362fcca6a43e500b2531f0038adbfbd21)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include "common.h"
33 #include "t4_regs.h"
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
36 
37 #undef msleep
38 #define msleep(x) do { \
39 	if (cold) \
40 		DELAY((x) * 1000); \
41 	else \
42 		pause("t4hw", (x) * hz / 1000); \
43 } while (0)
44 
45 /**
46  *	t4_wait_op_done_val - wait until an operation is completed
47  *	@adapter: the adapter performing the operation
48  *	@reg: the register to check for completion
49  *	@mask: a single-bit field within @reg that indicates completion
50  *	@polarity: the value of the field when the operation is completed
51  *	@attempts: number of check iterations
52  *	@delay: delay in usecs between iterations
53  *	@valp: where to store the value of the register at completion time
54  *
55  *	Wait until an operation is completed by checking a bit in a register
56  *	up to @attempts times.  If @valp is not NULL the value of the register
57  *	at the time it indicated completion is stored there.  Returns 0 if the
58  *	operation completes and	-EAGAIN	otherwise.
59  */
60 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 		        int polarity, int attempts, int delay, u32 *valp)
62 {
63 	while (1) {
64 		u32 val = t4_read_reg(adapter, reg);
65 
66 		if (!!(val & mask) == polarity) {
67 			if (valp)
68 				*valp = val;
69 			return 0;
70 		}
71 		if (--attempts == 0)
72 			return -EAGAIN;
73 		if (delay)
74 			udelay(delay);
75 	}
76 }
77 
78 /**
79  *	t4_set_reg_field - set a register field to a value
80  *	@adapter: the adapter to program
81  *	@addr: the register address
82  *	@mask: specifies the portion of the register to modify
83  *	@val: the new value for the register field
84  *
85  *	Sets a register field specified by the supplied mask to the
86  *	given value.
87  */
88 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89 		      u32 val)
90 {
91 	u32 v = t4_read_reg(adapter, addr) & ~mask;
92 
93 	t4_write_reg(adapter, addr, v | val);
94 	(void) t4_read_reg(adapter, addr);      /* flush */
95 }
96 
97 /**
98  *	t4_read_indirect - read indirectly addressed registers
99  *	@adap: the adapter
100  *	@addr_reg: register holding the indirect address
101  *	@data_reg: register holding the value of the indirect register
102  *	@vals: where the read register values are stored
103  *	@nregs: how many indirect registers to read
104  *	@start_idx: index of first indirect register to read
105  *
106  *	Reads registers that are accessed indirectly through an address/data
107  *	register pair.
108  */
109 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
111 		      unsigned int start_idx)
112 {
113 	while (nregs--) {
114 		t4_write_reg(adap, addr_reg, start_idx);
115 		*vals++ = t4_read_reg(adap, data_reg);
116 		start_idx++;
117 	}
118 }
119 
120 /**
121  *	t4_write_indirect - write indirectly addressed registers
122  *	@adap: the adapter
123  *	@addr_reg: register holding the indirect addresses
124  *	@data_reg: register holding the value for the indirect registers
125  *	@vals: values to write
126  *	@nregs: how many indirect registers to write
127  *	@start_idx: address of first indirect register to write
128  *
129  *	Writes a sequential block of registers that are accessed indirectly
130  *	through an address/data register pair.
131  */
132 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133 		       unsigned int data_reg, const u32 *vals,
134 		       unsigned int nregs, unsigned int start_idx)
135 {
136 	while (nregs--) {
137 		t4_write_reg(adap, addr_reg, start_idx++);
138 		t4_write_reg(adap, data_reg, *vals++);
139 	}
140 }
141 
142 /*
143  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144  * mechanism.  This guarantees that we get the real value even if we're
145  * operating within a Virtual Machine and the Hypervisor is trapping our
146  * Configuration Space accesses.
147  */
148 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149 {
150 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152 		     V_REGISTER(reg));
153 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154 }
155 
156 /*
157  *	t4_report_fw_error - report firmware error
158  *	@adap: the adapter
159  *
160  *	The adapter firmware can indicate error conditions to the host.
161  *	This routine prints out the reason for the firmware error (as
162  *	reported by the firmware).
163  */
164 static void t4_report_fw_error(struct adapter *adap)
165 {
166 	static const char *reason[] = {
167 		"Crash",			/* PCIE_FW_EVAL_CRASH */
168 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
169 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
170 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
171 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
173 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
174 		"Reserved",			/* reserved */
175 	};
176 	u32 pcie_fw;
177 
178 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179 	if (pcie_fw & F_PCIE_FW_ERR)
180 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
181 		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
182 }
183 
184 /*
185  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
186  */
187 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
188 			 u32 mbox_addr)
189 {
190 	for ( ; nflit; nflit--, mbox_addr += 8)
191 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
192 }
193 
194 /*
195  * Handle a FW assertion reported in a mailbox.
196  */
197 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
198 {
199 	struct fw_debug_cmd asrt;
200 
201 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
202 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
203 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
204 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
205 }
206 
207 #define X_CIM_PF_NOACCESS 0xeeeeeeee
208 /**
209  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
210  *	@adap: the adapter
211  *	@mbox: index of the mailbox to use
212  *	@cmd: the command to write
213  *	@size: command length in bytes
214  *	@rpl: where to optionally store the reply
215  *	@sleep_ok: if true we may sleep while awaiting command completion
216  *
217  *	Sends the given command to FW through the selected mailbox and waits
218  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
219  *	store the FW's reply to the command.  The command and its optional
220  *	reply are of the same length.  Some FW commands like RESET and
221  *	INITIALIZE can take a considerable amount of time to execute.
222  *	@sleep_ok determines whether we may sleep while awaiting the response.
223  *	If sleeping is allowed we use progressive backoff otherwise we spin.
224  *
225  *	The return value is 0 on success or a negative errno on failure.  A
226  *	failure can happen either because we are not able to execute the
227  *	command or FW executes it but signals an error.  In the latter case
228  *	the return value is the error code indicated by FW (negated).
229  */
230 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 		    void *rpl, bool sleep_ok)
232 {
233 	/*
234 	 * We delay in small increments at first in an effort to maintain
235 	 * responsiveness for simple, fast executing commands but then back
236 	 * off to larger delays to a maximum retry delay.
237 	 */
238 	static const int delay[] = {
239 		1, 1, 3, 5, 10, 10, 20, 50, 100
240 	};
241 
242 	u32 v;
243 	u64 res;
244 	int i, ms, delay_idx;
245 	const __be64 *p = cmd;
246 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
247 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
248 
249 	if ((size & 15) || size > MBOX_LEN)
250 		return -EINVAL;
251 
252 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
253 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
254 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255 
256 	if (v != X_MBOWNER_PL)
257 		return v ? -EBUSY : -ETIMEDOUT;
258 
259 	for (i = 0; i < size; i += 8, p++)
260 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
261 
262 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
263 	t4_read_reg(adap, ctl_reg);          /* flush write */
264 
265 	delay_idx = 0;
266 	ms = delay[0];
267 
268 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
269 		if (sleep_ok) {
270 			ms = delay[delay_idx];  /* last element may repeat */
271 			if (delay_idx < ARRAY_SIZE(delay) - 1)
272 				delay_idx++;
273 			msleep(ms);
274 		} else
275 			mdelay(ms);
276 
277 		v = t4_read_reg(adap, ctl_reg);
278 		if (v == X_CIM_PF_NOACCESS)
279 			continue;
280 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
281 			if (!(v & F_MBMSGVALID)) {
282 				t4_write_reg(adap, ctl_reg,
283 					     V_MBOWNER(X_MBOWNER_NONE));
284 				continue;
285 			}
286 
287 			res = t4_read_reg64(adap, data_reg);
288 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
289 				fw_asrt(adap, data_reg);
290 				res = V_FW_CMD_RETVAL(EIO);
291 			} else if (rpl)
292 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
293 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
294 			return -G_FW_CMD_RETVAL((int)res);
295 		}
296 	}
297 
298 	/*
299 	 * We timed out waiting for a reply to our mailbox command.  Report
300 	 * the error and also check to see if the firmware reported any
301 	 * errors ...
302 	 */
303 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
304 	       *(const u8 *)cmd, mbox);
305 	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
306 		t4_report_fw_error(adap);
307 	return -ETIMEDOUT;
308 }
309 
310 /**
311  *	t4_mc_read - read from MC through backdoor accesses
312  *	@adap: the adapter
313  *	@idx: which MC to access
314  *	@addr: address of first byte requested
315  *	@data: 64 bytes of data containing the requested address
316  *	@ecc: where to store the corresponding 64-bit ECC word
317  *
318  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
319  *	that covers the requested address @addr.  If @parity is not %NULL it
320  *	is assigned the 64-bit ECC word for the read data.
321  */
322 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
323 {
324 	int i;
325 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
326 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
327 
328 	if (is_t4(adap)) {
329 		mc_bist_cmd_reg = A_MC_BIST_CMD;
330 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
331 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
332 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
333 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
334 	} else {
335 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
336 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
337 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
338 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
339 						  idx);
340 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
341 						  idx);
342 	}
343 
344 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
345 		return -EBUSY;
346 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
347 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
348 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
349 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
350 		     F_START_BIST | V_BIST_CMD_GAP(1));
351 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
352 	if (i)
353 		return i;
354 
355 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
356 
357 	for (i = 15; i >= 0; i--)
358 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
359 	if (ecc)
360 		*ecc = t4_read_reg64(adap, MC_DATA(16));
361 #undef MC_DATA
362 	return 0;
363 }
364 
365 /**
366  *	t4_edc_read - read from EDC through backdoor accesses
367  *	@adap: the adapter
368  *	@idx: which EDC to access
369  *	@addr: address of first byte requested
370  *	@data: 64 bytes of data containing the requested address
371  *	@ecc: where to store the corresponding 64-bit ECC word
372  *
373  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
374  *	that covers the requested address @addr.  If @parity is not %NULL it
375  *	is assigned the 64-bit ECC word for the read data.
376  */
377 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
378 {
379 	int i;
380 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
381 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
382 
383 	if (is_t4(adap)) {
384 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
385 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
386 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
387 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
388 						    idx);
389 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
390 						    idx);
391 	} else {
392 /*
393  * These macro are missing in t4_regs.h file.
394  * Added temporarily for testing.
395  */
396 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
397 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
398 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
399 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
400 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
401 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
402 						    idx);
403 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
404 						    idx);
405 #undef EDC_REG_T5
406 #undef EDC_STRIDE_T5
407 	}
408 
409 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
410 		return -EBUSY;
411 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
412 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
413 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
414 	t4_write_reg(adap, edc_bist_cmd_reg,
415 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
416 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
417 	if (i)
418 		return i;
419 
420 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
421 
422 	for (i = 15; i >= 0; i--)
423 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
424 	if (ecc)
425 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
426 #undef EDC_DATA
427 	return 0;
428 }
429 
430 /**
431  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
432  *	@adap: the adapter
433  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
434  *	@addr: address within indicated memory type
435  *	@len: amount of memory to read
436  *	@buf: host memory buffer
437  *
438  *	Reads an [almost] arbitrary memory region in the firmware: the
439  *	firmware memory address, length and host buffer must be aligned on
440  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
441  *	the firmware's memory.  If this memory contains data structures which
442  *	contain multi-byte integers, it's the callers responsibility to
443  *	perform appropriate byte order conversions.
444  */
445 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
446 		__be32 *buf)
447 {
448 	u32 pos, start, end, offset;
449 	int ret;
450 
451 	/*
452 	 * Argument sanity checks ...
453 	 */
454 	if ((addr & 0x3) || (len & 0x3))
455 		return -EINVAL;
456 
457 	/*
458 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
459 	 * need to round down the start and round up the end.  We'll start
460 	 * copying out of the first line at (addr - start) a word at a time.
461 	 */
462 	start = addr & ~(64-1);
463 	end = (addr + len + 64-1) & ~(64-1);
464 	offset = (addr - start)/sizeof(__be32);
465 
466 	for (pos = start; pos < end; pos += 64, offset = 0) {
467 		__be32 data[16];
468 
469 		/*
470 		 * Read the chip's memory block and bail if there's an error.
471 		 */
472 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
473 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
474 		else
475 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
476 		if (ret)
477 			return ret;
478 
479 		/*
480 		 * Copy the data into the caller's memory buffer.
481 		 */
482 		while (offset < 16 && len > 0) {
483 			*buf++ = data[offset++];
484 			len -= sizeof(__be32);
485 		}
486 	}
487 
488 	return 0;
489 }
490 
491 /*
492  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
493  * VPD-R header.
494  */
495 struct t4_vpd_hdr {
496 	u8  id_tag;
497 	u8  id_len[2];
498 	u8  id_data[ID_LEN];
499 	u8  vpdr_tag;
500 	u8  vpdr_len[2];
501 };
502 
503 /*
504  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
505  */
506 #define EEPROM_MAX_RD_POLL 40
507 #define EEPROM_MAX_WR_POLL 6
508 #define EEPROM_STAT_ADDR   0x7bfc
509 #define VPD_BASE           0x400
510 #define VPD_BASE_OLD       0
511 #define VPD_LEN            1024
512 #define VPD_INFO_FLD_HDR_SIZE	3
513 #define CHELSIO_VPD_UNIQUE_ID 0x82
514 
515 /**
516  *	t4_seeprom_read - read a serial EEPROM location
517  *	@adapter: adapter to read
518  *	@addr: EEPROM virtual address
519  *	@data: where to store the read data
520  *
521  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
522  *	VPD capability.  Note that this function must be called with a virtual
523  *	address.
524  */
525 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
526 {
527 	u16 val;
528 	int attempts = EEPROM_MAX_RD_POLL;
529 	unsigned int base = adapter->params.pci.vpd_cap_addr;
530 
531 	if (addr >= EEPROMVSIZE || (addr & 3))
532 		return -EINVAL;
533 
534 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
535 	do {
536 		udelay(10);
537 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
538 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
539 
540 	if (!(val & PCI_VPD_ADDR_F)) {
541 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
542 		return -EIO;
543 	}
544 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
545 	*data = le32_to_cpu(*data);
546 	return 0;
547 }
548 
549 /**
550  *	t4_seeprom_write - write a serial EEPROM location
551  *	@adapter: adapter to write
552  *	@addr: virtual EEPROM address
553  *	@data: value to write
554  *
555  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
556  *	VPD capability.  Note that this function must be called with a virtual
557  *	address.
558  */
559 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
560 {
561 	u16 val;
562 	int attempts = EEPROM_MAX_WR_POLL;
563 	unsigned int base = adapter->params.pci.vpd_cap_addr;
564 
565 	if (addr >= EEPROMVSIZE || (addr & 3))
566 		return -EINVAL;
567 
568 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
569 				 cpu_to_le32(data));
570 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
571 				 (u16)addr | PCI_VPD_ADDR_F);
572 	do {
573 		msleep(1);
574 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
575 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
576 
577 	if (val & PCI_VPD_ADDR_F) {
578 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
579 		return -EIO;
580 	}
581 	return 0;
582 }
583 
584 /**
585  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
586  *	@phys_addr: the physical EEPROM address
587  *	@fn: the PCI function number
588  *	@sz: size of function-specific area
589  *
590  *	Translate a physical EEPROM address to virtual.  The first 1K is
591  *	accessed through virtual addresses starting at 31K, the rest is
592  *	accessed through virtual addresses starting at 0.
593  *
594  *	The mapping is as follows:
595  *	[0..1K) -> [31K..32K)
596  *	[1K..1K+A) -> [ES-A..ES)
597  *	[1K+A..ES) -> [0..ES-A-1K)
598  *
599  *	where A = @fn * @sz, and ES = EEPROM size.
600  */
601 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
602 {
603 	fn *= sz;
604 	if (phys_addr < 1024)
605 		return phys_addr + (31 << 10);
606 	if (phys_addr < 1024 + fn)
607 		return EEPROMSIZE - fn + phys_addr - 1024;
608 	if (phys_addr < EEPROMSIZE)
609 		return phys_addr - 1024 - fn;
610 	return -EINVAL;
611 }
612 
613 /**
614  *	t4_seeprom_wp - enable/disable EEPROM write protection
615  *	@adapter: the adapter
616  *	@enable: whether to enable or disable write protection
617  *
618  *	Enables or disables write protection on the serial EEPROM.
619  */
620 int t4_seeprom_wp(struct adapter *adapter, int enable)
621 {
622 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
623 }
624 
625 /**
626  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
627  *	@v: Pointer to buffered vpd data structure
628  *	@kw: The keyword to search for
629  *
630  *	Returns the value of the information field keyword or
631  *	-ENOENT otherwise.
632  */
633 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
634 {
635          int i;
636 	 unsigned int offset , len;
637 	 const u8 *buf = &v->id_tag;
638 	 const u8 *vpdr_len = &v->vpdr_tag;
639 	 offset = sizeof(struct t4_vpd_hdr);
640 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
641 
642 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
643 		 return -ENOENT;
644 	 }
645 
646          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
647 		 if(memcmp(buf + i , kw , 2) == 0){
648 			 i += VPD_INFO_FLD_HDR_SIZE;
649                          return i;
650 		  }
651 
652                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
653          }
654 
655          return -ENOENT;
656 }
657 
658 
659 /**
660  *	get_vpd_params - read VPD parameters from VPD EEPROM
661  *	@adapter: adapter to read
662  *	@p: where to store the parameters
663  *
664  *	Reads card parameters stored in VPD EEPROM.
665  */
666 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
667 {
668 	int i, ret, addr;
669 	int ec, sn, pn, na;
670 	u8 vpd[VPD_LEN], csum;
671 	const struct t4_vpd_hdr *v;
672 
673 	/*
674 	 * Card information normally starts at VPD_BASE but early cards had
675 	 * it at 0.
676 	 */
677 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
678 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
679 
680 	for (i = 0; i < sizeof(vpd); i += 4) {
681 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
682 		if (ret)
683 			return ret;
684 	}
685  	v = (const struct t4_vpd_hdr *)vpd;
686 
687 #define FIND_VPD_KW(var,name) do { \
688 	var = get_vpd_keyword_val(v , name); \
689 	if (var < 0) { \
690 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
691 		return -EINVAL; \
692 	} \
693 } while (0)
694 
695 	FIND_VPD_KW(i, "RV");
696 	for (csum = 0; i >= 0; i--)
697 		csum += vpd[i];
698 
699 	if (csum) {
700 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
701 		return -EINVAL;
702 	}
703 	FIND_VPD_KW(ec, "EC");
704 	FIND_VPD_KW(sn, "SN");
705 	FIND_VPD_KW(pn, "PN");
706 	FIND_VPD_KW(na, "NA");
707 #undef FIND_VPD_KW
708 
709 	memcpy(p->id, v->id_data, ID_LEN);
710 	strstrip(p->id);
711 	memcpy(p->ec, vpd + ec, EC_LEN);
712 	strstrip(p->ec);
713 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
714 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
715 	strstrip(p->sn);
716 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
717 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
718 	strstrip((char *)p->pn);
719 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
720 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
721 	strstrip((char *)p->na);
722 
723 	return 0;
724 }
725 
726 /* serial flash and firmware constants and flash config file constants */
727 enum {
728 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
729 
730 	/* flash command opcodes */
731 	SF_PROG_PAGE    = 2,          /* program page */
732 	SF_WR_DISABLE   = 4,          /* disable writes */
733 	SF_RD_STATUS    = 5,          /* read status register */
734 	SF_WR_ENABLE    = 6,          /* enable writes */
735 	SF_RD_DATA_FAST = 0xb,        /* read flash */
736 	SF_RD_ID        = 0x9f,       /* read ID */
737 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
738 };
739 
740 /**
741  *	sf1_read - read data from the serial flash
742  *	@adapter: the adapter
743  *	@byte_cnt: number of bytes to read
744  *	@cont: whether another operation will be chained
745  *	@lock: whether to lock SF for PL access only
746  *	@valp: where to store the read data
747  *
748  *	Reads up to 4 bytes of data from the serial flash.  The location of
749  *	the read needs to be specified prior to calling this by issuing the
750  *	appropriate commands to the serial flash.
751  */
752 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
753 		    int lock, u32 *valp)
754 {
755 	int ret;
756 
757 	if (!byte_cnt || byte_cnt > 4)
758 		return -EINVAL;
759 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
760 		return -EBUSY;
761 	t4_write_reg(adapter, A_SF_OP,
762 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
763 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
764 	if (!ret)
765 		*valp = t4_read_reg(adapter, A_SF_DATA);
766 	return ret;
767 }
768 
769 /**
770  *	sf1_write - write data to the serial flash
771  *	@adapter: the adapter
772  *	@byte_cnt: number of bytes to write
773  *	@cont: whether another operation will be chained
774  *	@lock: whether to lock SF for PL access only
775  *	@val: value to write
776  *
777  *	Writes up to 4 bytes of data to the serial flash.  The location of
778  *	the write needs to be specified prior to calling this by issuing the
779  *	appropriate commands to the serial flash.
780  */
781 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
782 		     int lock, u32 val)
783 {
784 	if (!byte_cnt || byte_cnt > 4)
785 		return -EINVAL;
786 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
787 		return -EBUSY;
788 	t4_write_reg(adapter, A_SF_DATA, val);
789 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
790 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
791 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
792 }
793 
794 /**
795  *	flash_wait_op - wait for a flash operation to complete
796  *	@adapter: the adapter
797  *	@attempts: max number of polls of the status register
798  *	@delay: delay between polls in ms
799  *
800  *	Wait for a flash operation to complete by polling the status register.
801  */
802 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
803 {
804 	int ret;
805 	u32 status;
806 
807 	while (1) {
808 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
809 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
810 			return ret;
811 		if (!(status & 1))
812 			return 0;
813 		if (--attempts == 0)
814 			return -EAGAIN;
815 		if (delay)
816 			msleep(delay);
817 	}
818 }
819 
820 /**
821  *	t4_read_flash - read words from serial flash
822  *	@adapter: the adapter
823  *	@addr: the start address for the read
824  *	@nwords: how many 32-bit words to read
825  *	@data: where to store the read data
826  *	@byte_oriented: whether to store data as bytes or as words
827  *
828  *	Read the specified number of 32-bit words from the serial flash.
829  *	If @byte_oriented is set the read data is stored as a byte array
830  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
831  *	natural endianess.
832  */
833 int t4_read_flash(struct adapter *adapter, unsigned int addr,
834 		  unsigned int nwords, u32 *data, int byte_oriented)
835 {
836 	int ret;
837 
838 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
839 		return -EINVAL;
840 
841 	addr = swab32(addr) | SF_RD_DATA_FAST;
842 
843 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
844 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
845 		return ret;
846 
847 	for ( ; nwords; nwords--, data++) {
848 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
849 		if (nwords == 1)
850 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
851 		if (ret)
852 			return ret;
853 		if (byte_oriented)
854 			*data = htonl(*data);
855 	}
856 	return 0;
857 }
858 
859 /**
860  *	t4_write_flash - write up to a page of data to the serial flash
861  *	@adapter: the adapter
862  *	@addr: the start address to write
863  *	@n: length of data to write in bytes
864  *	@data: the data to write
865  *	@byte_oriented: whether to store data as bytes or as words
866  *
867  *	Writes up to a page of data (256 bytes) to the serial flash starting
868  *	at the given address.  All the data must be written to the same page.
869  *	If @byte_oriented is set the write data is stored as byte stream
870  *	(i.e. matches what on disk), otherwise in big-endian.
871  */
872 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
873 			  unsigned int n, const u8 *data, int byte_oriented)
874 {
875 	int ret;
876 	u32 buf[SF_PAGE_SIZE / 4];
877 	unsigned int i, c, left, val, offset = addr & 0xff;
878 
879 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
880 		return -EINVAL;
881 
882 	val = swab32(addr) | SF_PROG_PAGE;
883 
884 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
885 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
886 		goto unlock;
887 
888 	for (left = n; left; left -= c) {
889 		c = min(left, 4U);
890 		for (val = 0, i = 0; i < c; ++i)
891 			val = (val << 8) + *data++;
892 
893 		if (!byte_oriented)
894 			val = htonl(val);
895 
896 		ret = sf1_write(adapter, c, c != left, 1, val);
897 		if (ret)
898 			goto unlock;
899 	}
900 	ret = flash_wait_op(adapter, 8, 1);
901 	if (ret)
902 		goto unlock;
903 
904 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
905 
906 	/* Read the page to verify the write succeeded */
907 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
908 			    byte_oriented);
909 	if (ret)
910 		return ret;
911 
912 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
913 		CH_ERR(adapter, "failed to correctly write the flash page "
914 		       "at %#x\n", addr);
915 		return -EIO;
916 	}
917 	return 0;
918 
919 unlock:
920 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
921 	return ret;
922 }
923 
924 /**
925  *	t4_get_fw_version - read the firmware version
926  *	@adapter: the adapter
927  *	@vers: where to place the version
928  *
929  *	Reads the FW version from flash.
930  */
931 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
932 {
933 	return t4_read_flash(adapter,
934 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
935 			     vers, 0);
936 }
937 
938 /**
939  *	t4_get_tp_version - read the TP microcode version
940  *	@adapter: the adapter
941  *	@vers: where to place the version
942  *
943  *	Reads the TP microcode version from flash.
944  */
945 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
946 {
947 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
948 							      tp_microcode_ver),
949 			     1, vers, 0);
950 }
951 
952 /**
953  *	t4_check_fw_version - check if the FW is compatible with this driver
954  *	@adapter: the adapter
955  *
956  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
957  *	if there's exact match, a negative error if the version could not be
958  *	read or there's a major version mismatch, and a positive value if the
959  *	expected major version is found but there's a minor version mismatch.
960  */
961 int t4_check_fw_version(struct adapter *adapter)
962 {
963 	int ret, major, minor, micro;
964 	int exp_major, exp_minor, exp_micro;
965 
966 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
967 	if (!ret)
968 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
969 	if (ret)
970 		return ret;
971 
972 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
973 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
974 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
975 
976 	switch (chip_id(adapter)) {
977 	case CHELSIO_T4:
978 		exp_major = T4FW_VERSION_MAJOR;
979 		exp_minor = T4FW_VERSION_MINOR;
980 		exp_micro = T4FW_VERSION_MICRO;
981 		break;
982 	case CHELSIO_T5:
983 		exp_major = T5FW_VERSION_MAJOR;
984 		exp_minor = T5FW_VERSION_MINOR;
985 		exp_micro = T5FW_VERSION_MICRO;
986 		break;
987 	default:
988 		CH_ERR(adapter, "Unsupported chip type, %x\n",
989 		    chip_id(adapter));
990 		return -EINVAL;
991 	}
992 
993 	if (major != exp_major) {            /* major mismatch - fail */
994 		CH_ERR(adapter, "card FW has major version %u, driver wants "
995 		       "%u\n", major, exp_major);
996 		return -EINVAL;
997 	}
998 
999 	if (minor == exp_minor && micro == exp_micro)
1000 		return 0;                                   /* perfect match */
1001 
1002 	/* Minor/micro version mismatch.  Report it but often it's OK. */
1003 	return 1;
1004 }
1005 
1006 /**
1007  *	t4_flash_erase_sectors - erase a range of flash sectors
1008  *	@adapter: the adapter
1009  *	@start: the first sector to erase
1010  *	@end: the last sector to erase
1011  *
1012  *	Erases the sectors in the given inclusive range.
1013  */
1014 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1015 {
1016 	int ret = 0;
1017 
1018 	while (start <= end) {
1019 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1020 		    (ret = sf1_write(adapter, 4, 0, 1,
1021 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1022 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1023 			CH_ERR(adapter, "erase of flash sector %d failed, "
1024 			       "error %d\n", start, ret);
1025 			break;
1026 		}
1027 		start++;
1028 	}
1029 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1030 	return ret;
1031 }
1032 
1033 /**
1034  *	t4_flash_cfg_addr - return the address of the flash configuration file
1035  *	@adapter: the adapter
1036  *
1037  *	Return the address within the flash where the Firmware Configuration
1038  *	File is stored, or an error if the device FLASH is too small to contain
1039  *	a Firmware Configuration File.
1040  */
1041 int t4_flash_cfg_addr(struct adapter *adapter)
1042 {
1043 	/*
1044 	 * If the device FLASH isn't large enough to hold a Firmware
1045 	 * Configuration File, return an error.
1046 	 */
1047 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1048 		return -ENOSPC;
1049 
1050 	return FLASH_CFG_START;
1051 }
1052 
1053 /**
1054  *	t4_load_cfg - download config file
1055  *	@adap: the adapter
1056  *	@cfg_data: the cfg text file to write
1057  *	@size: text file size
1058  *
1059  *	Write the supplied config text file to the card's serial flash.
1060  */
1061 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1062 {
1063 	int ret, i, n, cfg_addr;
1064 	unsigned int addr;
1065 	unsigned int flash_cfg_start_sec;
1066 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1067 
1068 	cfg_addr = t4_flash_cfg_addr(adap);
1069 	if (cfg_addr < 0)
1070 		return cfg_addr;
1071 
1072 	addr = cfg_addr;
1073 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1074 
1075 	if (size > FLASH_CFG_MAX_SIZE) {
1076 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1077 		       FLASH_CFG_MAX_SIZE);
1078 		return -EFBIG;
1079 	}
1080 
1081 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1082 			 sf_sec_size);
1083 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1084 				     flash_cfg_start_sec + i - 1);
1085 	/*
1086 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1087 	 * with the on-adapter Firmware Configuration File.
1088 	 */
1089 	if (ret || size == 0)
1090 		goto out;
1091 
1092 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1093 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1094 		if ( (size - i) <  SF_PAGE_SIZE)
1095 			n = size - i;
1096 		else
1097 			n = SF_PAGE_SIZE;
1098 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1099 		if (ret)
1100 			goto out;
1101 
1102 		addr += SF_PAGE_SIZE;
1103 		cfg_data += SF_PAGE_SIZE;
1104 	}
1105 
1106 out:
1107 	if (ret)
1108 		CH_ERR(adap, "config file %s failed %d\n",
1109 		       (size == 0 ? "clear" : "download"), ret);
1110 	return ret;
1111 }
1112 
1113 
1114 /**
1115  *	t4_load_fw - download firmware
1116  *	@adap: the adapter
1117  *	@fw_data: the firmware image to write
1118  *	@size: image size
1119  *
1120  *	Write the supplied firmware image to the card's serial flash.
1121  */
1122 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1123 {
1124 	u32 csum;
1125 	int ret, addr;
1126 	unsigned int i;
1127 	u8 first_page[SF_PAGE_SIZE];
1128 	const u32 *p = (const u32 *)fw_data;
1129 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1130 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1131 	unsigned int fw_start_sec;
1132 	unsigned int fw_start;
1133 	unsigned int fw_size;
1134 
1135 	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1136 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1137 		fw_start = FLASH_FWBOOTSTRAP_START;
1138 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1139 	} else {
1140 		fw_start_sec = FLASH_FW_START_SEC;
1141  		fw_start = FLASH_FW_START;
1142 		fw_size = FLASH_FW_MAX_SIZE;
1143 	}
1144 	if (!size) {
1145 		CH_ERR(adap, "FW image has no data\n");
1146 		return -EINVAL;
1147 	}
1148 	if (size & 511) {
1149 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1150 		return -EINVAL;
1151 	}
1152 	if (ntohs(hdr->len512) * 512 != size) {
1153 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1154 		return -EINVAL;
1155 	}
1156 	if (size > fw_size) {
1157 		CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1158 		return -EFBIG;
1159 	}
1160 	if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1161 	    (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1162 		CH_ERR(adap,
1163 		    "FW image (%d) is not suitable for this adapter (%d)\n",
1164 		    hdr->chip, chip_id(adap));
1165 		return -EINVAL;
1166 	}
1167 
1168 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1169 		csum += ntohl(p[i]);
1170 
1171 	if (csum != 0xffffffff) {
1172 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1173 		       csum);
1174 		return -EINVAL;
1175 	}
1176 
1177 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1178 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1179 	if (ret)
1180 		goto out;
1181 
1182 	/*
1183 	 * We write the correct version at the end so the driver can see a bad
1184 	 * version if the FW write fails.  Start by writing a copy of the
1185 	 * first page with a bad version.
1186 	 */
1187 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1188 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1189 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1190 	if (ret)
1191 		goto out;
1192 
1193 	addr = fw_start;
1194 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1195 		addr += SF_PAGE_SIZE;
1196 		fw_data += SF_PAGE_SIZE;
1197 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1198 		if (ret)
1199 			goto out;
1200 	}
1201 
1202 	ret = t4_write_flash(adap,
1203 			     fw_start + offsetof(struct fw_hdr, fw_ver),
1204 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1205 out:
1206 	if (ret)
1207 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1208 	return ret;
1209 }
1210 
1211 /* BIOS boot headers */
1212 typedef struct pci_expansion_rom_header {
1213 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1214 	u8	reserved[22]; /* Reserved per processor Architecture data */
1215 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1216 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1217 
1218 /* Legacy PCI Expansion ROM Header */
1219 typedef struct legacy_pci_expansion_rom_header {
1220 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1221 	u8	size512; /* Current Image Size in units of 512 bytes */
1222 	u8	initentry_point[4];
1223 	u8	cksum; /* Checksum computed on the entire Image */
1224 	u8	reserved[16]; /* Reserved */
1225 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1226 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1227 
1228 /* EFI PCI Expansion ROM Header */
1229 typedef struct efi_pci_expansion_rom_header {
1230 	u8	signature[2]; // ROM signature. The value 0xaa55
1231 	u8	initialization_size[2]; /* Units 512. Includes this header */
1232 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1233 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1234 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1235 	u8	compression_type[2]; /* Compression type. */
1236 		/*
1237 		 * Compression type definition
1238 		 * 0x0: uncompressed
1239 		 * 0x1: Compressed
1240 		 * 0x2-0xFFFF: Reserved
1241 		 */
1242 	u8	reserved[8]; /* Reserved */
1243 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1244 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1245 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1246 
1247 /* PCI Data Structure Format */
1248 typedef struct pcir_data_structure { /* PCI Data Structure */
1249 	u8	signature[4]; /* Signature. The string "PCIR" */
1250 	u8	vendor_id[2]; /* Vendor Identification */
1251 	u8	device_id[2]; /* Device Identification */
1252 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1253 	u8	length[2]; /* PCIR Data Structure Length */
1254 	u8	revision; /* PCIR Data Structure Revision */
1255 	u8	class_code[3]; /* Class Code */
1256 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1257 	u8	code_revision[2]; /* Revision Level of Code/Data */
1258 	u8	code_type; /* Code Type. */
1259 		/*
1260 		 * PCI Expansion ROM Code Types
1261 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1262 		 * 0x01: Open Firmware standard for PCI. FCODE
1263 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1264 		 * 0x03: EFI Image. EFI
1265 		 * 0x04-0xFF: Reserved.
1266 		 */
1267 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1268 	u8	reserved[2]; /* Reserved */
1269 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1270 
1271 /* BOOT constants */
1272 enum {
1273 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1274 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1275 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1276 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1277 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1278 	VENDOR_ID = 0x1425, /* Vendor ID */
1279 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1280 };
1281 
1282 /*
1283  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1284  *	@adatper: the device ID to write.
1285  *	@boot_data: the boot image to modify.
1286  *
1287  *	Write the supplied device ID to the boot BIOS image.
1288  */
1289 static void modify_device_id(int device_id, u8 *boot_data)
1290 {
1291 	legacy_pci_exp_rom_header_t *header;
1292 	pcir_data_t *pcir_header;
1293 	u32 cur_header = 0;
1294 
1295 	/*
1296 	 * Loop through all chained images and change the device ID's
1297 	 */
1298 	while (1) {
1299 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1300 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1301 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1302 
1303 		/*
1304 		 * Only modify the Device ID if code type is Legacy or HP.
1305 		 * 0x00: Okay to modify
1306 		 * 0x01: FCODE. Do not be modify
1307 		 * 0x03: Okay to modify
1308 		 * 0x04-0xFF: Do not modify
1309 		 */
1310 		if (pcir_header->code_type == 0x00) {
1311 			u8 csum = 0;
1312 			int i;
1313 
1314 			/*
1315 			 * Modify Device ID to match current adatper
1316 			 */
1317 			*(u16*) pcir_header->device_id = device_id;
1318 
1319 			/*
1320 			 * Set checksum temporarily to 0.
1321 			 * We will recalculate it later.
1322 			 */
1323 			header->cksum = 0x0;
1324 
1325 			/*
1326 			 * Calculate and update checksum
1327 			 */
1328 			for (i = 0; i < (header->size512 * 512); i++)
1329 				csum += (u8)boot_data[cur_header + i];
1330 
1331 			/*
1332 			 * Invert summed value to create the checksum
1333 			 * Writing new checksum value directly to the boot data
1334 			 */
1335 			boot_data[cur_header + 7] = -csum;
1336 
1337 		} else if (pcir_header->code_type == 0x03) {
1338 
1339 			/*
1340 			 * Modify Device ID to match current adatper
1341 			 */
1342 			*(u16*) pcir_header->device_id = device_id;
1343 
1344 		}
1345 
1346 
1347 		/*
1348 		 * Check indicator element to identify if this is the last
1349 		 * image in the ROM.
1350 		 */
1351 		if (pcir_header->indicator & 0x80)
1352 			break;
1353 
1354 		/*
1355 		 * Move header pointer up to the next image in the ROM.
1356 		 */
1357 		cur_header += header->size512 * 512;
1358 	}
1359 }
1360 
1361 /*
1362  *	t4_load_boot - download boot flash
1363  *	@adapter: the adapter
1364  *	@boot_data: the boot image to write
1365  *	@boot_addr: offset in flash to write boot_data
1366  *	@size: image size
1367  *
1368  *	Write the supplied boot image to the card's serial flash.
1369  *	The boot image has the following sections: a 28-byte header and the
1370  *	boot image.
1371  */
1372 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1373 		 unsigned int boot_addr, unsigned int size)
1374 {
1375 	pci_exp_rom_header_t *header;
1376 	int pcir_offset ;
1377 	pcir_data_t *pcir_header;
1378 	int ret, addr;
1379 	uint16_t device_id;
1380 	unsigned int i;
1381 	unsigned int boot_sector = boot_addr * 1024;
1382 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1383 
1384 	/*
1385 	 * Make sure the boot image does not encroach on the firmware region
1386 	 */
1387 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1388 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1389 		return -EFBIG;
1390 	}
1391 
1392 	/*
1393 	 * Number of sectors spanned
1394 	 */
1395 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1396 			sf_sec_size);
1397 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1398 				     (boot_sector >> 16) + i - 1);
1399 
1400 	/*
1401 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1402 	 * with the on-adapter option ROM file
1403 	 */
1404 	if (ret || (size == 0))
1405 		goto out;
1406 
1407 	/* Get boot header */
1408 	header = (pci_exp_rom_header_t *)boot_data;
1409 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1410 	/* PCIR Data Structure */
1411 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1412 
1413 	/*
1414 	 * Perform some primitive sanity testing to avoid accidentally
1415 	 * writing garbage over the boot sectors.  We ought to check for
1416 	 * more but it's not worth it for now ...
1417 	 */
1418 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1419 		CH_ERR(adap, "boot image too small/large\n");
1420 		return -EFBIG;
1421 	}
1422 
1423 	/*
1424 	 * Check BOOT ROM header signature
1425 	 */
1426 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1427 		CH_ERR(adap, "Boot image missing signature\n");
1428 		return -EINVAL;
1429 	}
1430 
1431 	/*
1432 	 * Check PCI header signature
1433 	 */
1434 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1435 		CH_ERR(adap, "PCI header missing signature\n");
1436 		return -EINVAL;
1437 	}
1438 
1439 	/*
1440 	 * Check Vendor ID matches Chelsio ID
1441 	 */
1442 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1443 		CH_ERR(adap, "Vendor ID missing signature\n");
1444 		return -EINVAL;
1445 	}
1446 
1447 	/*
1448 	 * Retrieve adapter's device ID
1449 	 */
1450 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1451 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1452 	device_id = (device_id & 0xff) | 0x4000;
1453 
1454 	/*
1455 	 * Check PCIE Device ID
1456 	 */
1457 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1458 		/*
1459 		 * Change the device ID in the Boot BIOS image to match
1460 		 * the Device ID of the current adapter.
1461 		 */
1462 		modify_device_id(device_id, boot_data);
1463 	}
1464 
1465 	/*
1466 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1467 	 * we finish copying the rest of the boot image. This will ensure
1468 	 * that the BIOS boot header will only be written if the boot image
1469 	 * was written in full.
1470 	 */
1471 	addr = boot_sector;
1472 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1473 		addr += SF_PAGE_SIZE;
1474 		boot_data += SF_PAGE_SIZE;
1475 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1476 		if (ret)
1477 			goto out;
1478 	}
1479 
1480 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1481 
1482 out:
1483 	if (ret)
1484 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1485 	return ret;
1486 }
1487 
1488 /**
1489  *	t4_read_cimq_cfg - read CIM queue configuration
1490  *	@adap: the adapter
1491  *	@base: holds the queue base addresses in bytes
1492  *	@size: holds the queue sizes in bytes
1493  *	@thres: holds the queue full thresholds in bytes
1494  *
1495  *	Returns the current configuration of the CIM queues, starting with
1496  *	the IBQs, then the OBQs.
1497  */
1498 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1499 {
1500 	unsigned int i, v;
1501 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1502 
1503 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1504 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1505 			     V_QUENUMSELECT(i));
1506 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1507 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1508 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1509 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1510 	}
1511 	for (i = 0; i < cim_num_obq; i++) {
1512 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1513 			     V_QUENUMSELECT(i));
1514 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1515 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1516 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1517 	}
1518 }
1519 
1520 /**
1521  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1522  *	@adap: the adapter
1523  *	@qid: the queue index
1524  *	@data: where to store the queue contents
1525  *	@n: capacity of @data in 32-bit words
1526  *
1527  *	Reads the contents of the selected CIM queue starting at address 0 up
1528  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1529  *	error and the number of 32-bit words actually read on success.
1530  */
1531 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1532 {
1533 	int i, err;
1534 	unsigned int addr;
1535 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1536 
1537 	if (qid > 5 || (n & 3))
1538 		return -EINVAL;
1539 
1540 	addr = qid * nwords;
1541 	if (n > nwords)
1542 		n = nwords;
1543 
1544 	for (i = 0; i < n; i++, addr++) {
1545 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1546 			     F_IBQDBGEN);
1547 		/*
1548 		 * It might take 3-10ms before the IBQ debug read access is
1549 		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1550 		 */
1551 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1552 				      1000000, 1);
1553 		if (err)
1554 			return err;
1555 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1556 	}
1557 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1558 	return i;
1559 }
1560 
1561 /**
1562  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1563  *	@adap: the adapter
1564  *	@qid: the queue index
1565  *	@data: where to store the queue contents
1566  *	@n: capacity of @data in 32-bit words
1567  *
1568  *	Reads the contents of the selected CIM queue starting at address 0 up
1569  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1570  *	error and the number of 32-bit words actually read on success.
1571  */
1572 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1573 {
1574 	int i, err;
1575 	unsigned int addr, v, nwords;
1576 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1577 
1578 	if (qid >= cim_num_obq || (n & 3))
1579 		return -EINVAL;
1580 
1581 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1582 		     V_QUENUMSELECT(qid));
1583 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1584 
1585 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1586 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1587 	if (n > nwords)
1588 		n = nwords;
1589 
1590 	for (i = 0; i < n; i++, addr++) {
1591 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1592 			     F_OBQDBGEN);
1593 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1594 				      2, 1);
1595 		if (err)
1596 			return err;
1597 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1598 	}
1599 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1600 	return i;
1601 }
1602 
1603 enum {
1604 	CIM_QCTL_BASE     = 0,
1605 	CIM_CTL_BASE      = 0x2000,
1606 	CIM_PBT_ADDR_BASE = 0x2800,
1607 	CIM_PBT_LRF_BASE  = 0x3000,
1608 	CIM_PBT_DATA_BASE = 0x3800
1609 };
1610 
1611 /**
1612  *	t4_cim_read - read a block from CIM internal address space
1613  *	@adap: the adapter
1614  *	@addr: the start address within the CIM address space
1615  *	@n: number of words to read
1616  *	@valp: where to store the result
1617  *
1618  *	Reads a block of 4-byte words from the CIM intenal address space.
1619  */
1620 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1621 		unsigned int *valp)
1622 {
1623 	int ret = 0;
1624 
1625 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1626 		return -EBUSY;
1627 
1628 	for ( ; !ret && n--; addr += 4) {
1629 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1630 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1631 				      0, 5, 2);
1632 		if (!ret)
1633 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1634 	}
1635 	return ret;
1636 }
1637 
1638 /**
1639  *	t4_cim_write - write a block into CIM internal address space
1640  *	@adap: the adapter
1641  *	@addr: the start address within the CIM address space
1642  *	@n: number of words to write
1643  *	@valp: set of values to write
1644  *
1645  *	Writes a block of 4-byte words into the CIM intenal address space.
1646  */
1647 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1648 		 const unsigned int *valp)
1649 {
1650 	int ret = 0;
1651 
1652 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1653 		return -EBUSY;
1654 
1655 	for ( ; !ret && n--; addr += 4) {
1656 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1657 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1658 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1659 				      0, 5, 2);
1660 	}
1661 	return ret;
1662 }
1663 
1664 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1665 {
1666 	return t4_cim_write(adap, addr, 1, &val);
1667 }
1668 
1669 /**
1670  *	t4_cim_ctl_read - read a block from CIM control region
1671  *	@adap: the adapter
1672  *	@addr: the start address within the CIM control region
1673  *	@n: number of words to read
1674  *	@valp: where to store the result
1675  *
1676  *	Reads a block of 4-byte words from the CIM control region.
1677  */
1678 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1679 		    unsigned int *valp)
1680 {
1681 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1682 }
1683 
1684 /**
1685  *	t4_cim_read_la - read CIM LA capture buffer
1686  *	@adap: the adapter
1687  *	@la_buf: where to store the LA data
1688  *	@wrptr: the HW write pointer within the capture buffer
1689  *
1690  *	Reads the contents of the CIM LA buffer with the most recent entry at
1691  *	the end	of the returned data and with the entry at @wrptr first.
1692  *	We try to leave the LA in the running state we find it in.
1693  */
1694 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1695 {
1696 	int i, ret;
1697 	unsigned int cfg, val, idx;
1698 
1699 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1700 	if (ret)
1701 		return ret;
1702 
1703 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1704 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1705 		if (ret)
1706 			return ret;
1707 	}
1708 
1709 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1710 	if (ret)
1711 		goto restart;
1712 
1713 	idx = G_UPDBGLAWRPTR(val);
1714 	if (wrptr)
1715 		*wrptr = idx;
1716 
1717 	for (i = 0; i < adap->params.cim_la_size; i++) {
1718 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1719 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1720 		if (ret)
1721 			break;
1722 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1723 		if (ret)
1724 			break;
1725 		if (val & F_UPDBGLARDEN) {
1726 			ret = -ETIMEDOUT;
1727 			break;
1728 		}
1729 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1730 		if (ret)
1731 			break;
1732 		idx = (idx + 1) & M_UPDBGLARDPTR;
1733 	}
1734 restart:
1735 	if (cfg & F_UPDBGLAEN) {
1736 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1737 				      cfg & ~F_UPDBGLARDEN);
1738 		if (!ret)
1739 			ret = r;
1740 	}
1741 	return ret;
1742 }
1743 
1744 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1745 			unsigned int *pif_req_wrptr,
1746 			unsigned int *pif_rsp_wrptr)
1747 {
1748 	int i, j;
1749 	u32 cfg, val, req, rsp;
1750 
1751 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1752 	if (cfg & F_LADBGEN)
1753 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1754 
1755 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1756 	req = G_POLADBGWRPTR(val);
1757 	rsp = G_PILADBGWRPTR(val);
1758 	if (pif_req_wrptr)
1759 		*pif_req_wrptr = req;
1760 	if (pif_rsp_wrptr)
1761 		*pif_rsp_wrptr = rsp;
1762 
1763 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1764 		for (j = 0; j < 6; j++) {
1765 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1766 				     V_PILADBGRDPTR(rsp));
1767 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1768 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1769 			req++;
1770 			rsp++;
1771 		}
1772 		req = (req + 2) & M_POLADBGRDPTR;
1773 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1774 	}
1775 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1776 }
1777 
1778 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1779 {
1780 	u32 cfg;
1781 	int i, j, idx;
1782 
1783 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1784 	if (cfg & F_LADBGEN)
1785 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1786 
1787 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1788 		for (j = 0; j < 5; j++) {
1789 			idx = 8 * i + j;
1790 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1791 				     V_PILADBGRDPTR(idx));
1792 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1793 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1794 		}
1795 	}
1796 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1797 }
1798 
1799 /**
1800  *	t4_tp_read_la - read TP LA capture buffer
1801  *	@adap: the adapter
1802  *	@la_buf: where to store the LA data
1803  *	@wrptr: the HW write pointer within the capture buffer
1804  *
1805  *	Reads the contents of the TP LA buffer with the most recent entry at
1806  *	the end	of the returned data and with the entry at @wrptr first.
1807  *	We leave the LA in the running state we find it in.
1808  */
1809 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1810 {
1811 	bool last_incomplete;
1812 	unsigned int i, cfg, val, idx;
1813 
1814 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1815 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1816 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1817 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1818 
1819 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1820 	idx = G_DBGLAWPTR(val);
1821 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1822 	if (last_incomplete)
1823 		idx = (idx + 1) & M_DBGLARPTR;
1824 	if (wrptr)
1825 		*wrptr = idx;
1826 
1827 	val &= 0xffff;
1828 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1829 	val |= adap->params.tp.la_mask;
1830 
1831 	for (i = 0; i < TPLA_SIZE; i++) {
1832 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1833 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1834 		idx = (idx + 1) & M_DBGLARPTR;
1835 	}
1836 
1837 	/* Wipe out last entry if it isn't valid */
1838 	if (last_incomplete)
1839 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1840 
1841 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1842 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1843 			     cfg | adap->params.tp.la_mask);
1844 }
1845 
1846 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1847 {
1848 	unsigned int i, j;
1849 
1850 	for (i = 0; i < 8; i++) {
1851 		u32 *p = la_buf + i;
1852 
1853 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1854 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1855 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1856 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1857 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1858 	}
1859 }
1860 
1861 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1862 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1863 		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1864 
1865 /**
1866  *	t4_link_start - apply link configuration to MAC/PHY
1867  *	@phy: the PHY to setup
1868  *	@mac: the MAC to setup
1869  *	@lc: the requested link configuration
1870  *
1871  *	Set up a port's MAC and PHY according to a desired link configuration.
1872  *	- If the PHY can auto-negotiate first decide what to advertise, then
1873  *	  enable/disable auto-negotiation as desired, and reset.
1874  *	- If the PHY does not auto-negotiate just reset it.
1875  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1876  *	  otherwise do it later based on the outcome of auto-negotiation.
1877  */
1878 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1879 		  struct link_config *lc)
1880 {
1881 	struct fw_port_cmd c;
1882 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1883 
1884 	lc->link_ok = 0;
1885 	if (lc->requested_fc & PAUSE_RX)
1886 		fc |= FW_PORT_CAP_FC_RX;
1887 	if (lc->requested_fc & PAUSE_TX)
1888 		fc |= FW_PORT_CAP_FC_TX;
1889 
1890 	memset(&c, 0, sizeof(c));
1891 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1892 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1893 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1894 				  FW_LEN16(c));
1895 
1896 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1897 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1898 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1899 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1900 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1901 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1902 	} else
1903 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1904 
1905 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1906 }
1907 
1908 /**
1909  *	t4_restart_aneg - restart autonegotiation
1910  *	@adap: the adapter
1911  *	@mbox: mbox to use for the FW command
1912  *	@port: the port id
1913  *
1914  *	Restarts autonegotiation for the selected port.
1915  */
1916 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1917 {
1918 	struct fw_port_cmd c;
1919 
1920 	memset(&c, 0, sizeof(c));
1921 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1922 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1923 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1924 				  FW_LEN16(c));
1925 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1926 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1927 }
1928 
1929 struct intr_info {
1930 	unsigned int mask;       /* bits to check in interrupt status */
1931 	const char *msg;         /* message to print or NULL */
1932 	short stat_idx;          /* stat counter to increment or -1 */
1933 	unsigned short fatal;    /* whether the condition reported is fatal */
1934 };
1935 
1936 /**
1937  *	t4_handle_intr_status - table driven interrupt handler
1938  *	@adapter: the adapter that generated the interrupt
1939  *	@reg: the interrupt status register to process
1940  *	@acts: table of interrupt actions
1941  *
1942  *	A table driven interrupt handler that applies a set of masks to an
1943  *	interrupt status word and performs the corresponding actions if the
1944  *	interrupts described by the mask have occured.  The actions include
1945  *	optionally emitting a warning or alert message.  The table is terminated
1946  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1947  *	conditions.
1948  */
1949 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1950 				 const struct intr_info *acts)
1951 {
1952 	int fatal = 0;
1953 	unsigned int mask = 0;
1954 	unsigned int status = t4_read_reg(adapter, reg);
1955 
1956 	for ( ; acts->mask; ++acts) {
1957 		if (!(status & acts->mask))
1958 			continue;
1959 		if (acts->fatal) {
1960 			fatal++;
1961 			CH_ALERT(adapter, "%s (0x%x)\n",
1962 				 acts->msg, status & acts->mask);
1963 		} else if (acts->msg)
1964 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1965 					  acts->msg, status & acts->mask);
1966 		mask |= acts->mask;
1967 	}
1968 	status &= mask;
1969 	if (status)                           /* clear processed interrupts */
1970 		t4_write_reg(adapter, reg, status);
1971 	return fatal;
1972 }
1973 
1974 /*
1975  * Interrupt handler for the PCIE module.
1976  */
1977 static void pcie_intr_handler(struct adapter *adapter)
1978 {
1979 	static struct intr_info sysbus_intr_info[] = {
1980 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1981 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1982 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1983 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1984 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1985 		{ 0 }
1986 	};
1987 	static struct intr_info pcie_port_intr_info[] = {
1988 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1989 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1990 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1991 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1992 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1993 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1994 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1995 		{ F_RDPE, "Rx data parity error", -1, 1 },
1996 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1997 		{ 0 }
1998 	};
1999 	static struct intr_info pcie_intr_info[] = {
2000 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2001 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2002 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2003 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2004 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2005 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2006 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2007 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2008 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2009 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2010 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2011 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2012 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2013 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2014 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2015 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2016 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2017 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2018 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2019 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2020 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2021 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2022 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2023 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2024 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2025 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2026 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2027 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2028 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2029 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2030 		  0 },
2031 		{ 0 }
2032 	};
2033 
2034 	static struct intr_info t5_pcie_intr_info[] = {
2035 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2036 		  -1, 1 },
2037 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2038 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2039 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2040 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2041 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2042 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2043 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2044 		  -1, 1 },
2045 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2046 		  -1, 1 },
2047 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2048 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2049 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2050 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2051 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2052 		  -1, 1 },
2053 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2054 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2055 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2056 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2057 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2058 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2059 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2060 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2061 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2062 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2063 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2064 		  -1, 1 },
2065 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2066 		  -1, 1 },
2067 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2068 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2069 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2070 		{ F_READRSPERR, "Outbound read error", -1,
2071 		  0 },
2072 		{ 0 }
2073 	};
2074 
2075 	int fat;
2076 
2077 	fat = t4_handle_intr_status(adapter,
2078 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2079 				    sysbus_intr_info) +
2080 	      t4_handle_intr_status(adapter,
2081 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2082 				    pcie_port_intr_info) +
2083 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2084 				    is_t4(adapter) ?
2085 				    pcie_intr_info : t5_pcie_intr_info);
2086 	if (fat)
2087 		t4_fatal_err(adapter);
2088 }
2089 
2090 /*
2091  * TP interrupt handler.
2092  */
2093 static void tp_intr_handler(struct adapter *adapter)
2094 {
2095 	static struct intr_info tp_intr_info[] = {
2096 		{ 0x3fffffff, "TP parity error", -1, 1 },
2097 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2098 		{ 0 }
2099 	};
2100 
2101 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2102 		t4_fatal_err(adapter);
2103 }
2104 
2105 /*
2106  * SGE interrupt handler.
2107  */
2108 static void sge_intr_handler(struct adapter *adapter)
2109 {
2110 	u64 v;
2111 	u32 err;
2112 
2113 	static struct intr_info sge_intr_info[] = {
2114 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2115 		  "SGE received CPL exceeding IQE size", -1, 1 },
2116 		{ F_ERR_INVALID_CIDX_INC,
2117 		  "SGE GTS CIDX increment too large", -1, 0 },
2118 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2119 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2120 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2121 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2122 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2123 		  0 },
2124 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2125 		  0 },
2126 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2127 		  0 },
2128 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2129 		  0 },
2130 		{ F_ERR_ING_CTXT_PRIO,
2131 		  "SGE too many priority ingress contexts", -1, 0 },
2132 		{ F_ERR_EGR_CTXT_PRIO,
2133 		  "SGE too many priority egress contexts", -1, 0 },
2134 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2135 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2136 		{ 0 }
2137 	};
2138 
2139 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2140 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2141 	if (v) {
2142 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2143 			 (unsigned long long)v);
2144 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2145 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2146 	}
2147 
2148 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2149 
2150 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2151 	if (err & F_ERROR_QID_VALID) {
2152 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2153 		if (err & F_UNCAPTURED_ERROR)
2154 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2155 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2156 			     F_UNCAPTURED_ERROR);
2157 	}
2158 
2159 	if (v != 0)
2160 		t4_fatal_err(adapter);
2161 }
2162 
2163 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2164 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2165 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2166 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2167 
2168 /*
2169  * CIM interrupt handler.
2170  */
2171 static void cim_intr_handler(struct adapter *adapter)
2172 {
2173 	static struct intr_info cim_intr_info[] = {
2174 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2175 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2176 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2177 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2178 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2179 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2180 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2181 		{ 0 }
2182 	};
2183 	static struct intr_info cim_upintr_info[] = {
2184 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2185 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2186 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2187 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2188 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2189 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2190 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2191 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2192 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2193 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2194 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2195 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2196 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2197 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2198 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2199 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2200 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2201 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2202 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2203 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2204 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2205 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2206 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2207 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2208 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2209 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2210 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2211 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2212 		{ 0 }
2213 	};
2214 	int fat;
2215 
2216 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2217 		t4_report_fw_error(adapter);
2218 
2219 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2220 				    cim_intr_info) +
2221 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2222 				    cim_upintr_info);
2223 	if (fat)
2224 		t4_fatal_err(adapter);
2225 }
2226 
2227 /*
2228  * ULP RX interrupt handler.
2229  */
2230 static void ulprx_intr_handler(struct adapter *adapter)
2231 {
2232 	static struct intr_info ulprx_intr_info[] = {
2233 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2234 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2235 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2236 		{ 0 }
2237 	};
2238 
2239 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2240 		t4_fatal_err(adapter);
2241 }
2242 
2243 /*
2244  * ULP TX interrupt handler.
2245  */
2246 static void ulptx_intr_handler(struct adapter *adapter)
2247 {
2248 	static struct intr_info ulptx_intr_info[] = {
2249 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2250 		  0 },
2251 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2252 		  0 },
2253 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2254 		  0 },
2255 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2256 		  0 },
2257 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2258 		{ 0 }
2259 	};
2260 
2261 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2262 		t4_fatal_err(adapter);
2263 }
2264 
2265 /*
2266  * PM TX interrupt handler.
2267  */
2268 static void pmtx_intr_handler(struct adapter *adapter)
2269 {
2270 	static struct intr_info pmtx_intr_info[] = {
2271 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2272 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2273 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2274 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2275 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2276 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2277 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2278 		  1 },
2279 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2280 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2281 		{ 0 }
2282 	};
2283 
2284 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2285 		t4_fatal_err(adapter);
2286 }
2287 
2288 /*
2289  * PM RX interrupt handler.
2290  */
2291 static void pmrx_intr_handler(struct adapter *adapter)
2292 {
2293 	static struct intr_info pmrx_intr_info[] = {
2294 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2295 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2296 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2297 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2298 		  1 },
2299 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2300 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2301 		{ 0 }
2302 	};
2303 
2304 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2305 		t4_fatal_err(adapter);
2306 }
2307 
2308 /*
2309  * CPL switch interrupt handler.
2310  */
2311 static void cplsw_intr_handler(struct adapter *adapter)
2312 {
2313 	static struct intr_info cplsw_intr_info[] = {
2314 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2315 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2316 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2317 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2318 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2319 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2320 		{ 0 }
2321 	};
2322 
2323 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2324 		t4_fatal_err(adapter);
2325 }
2326 
2327 /*
2328  * LE interrupt handler.
2329  */
2330 static void le_intr_handler(struct adapter *adap)
2331 {
2332 	static struct intr_info le_intr_info[] = {
2333 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2334 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2335 		{ F_PARITYERR, "LE parity error", -1, 1 },
2336 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2337 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2338 		{ 0 }
2339 	};
2340 
2341 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2342 		t4_fatal_err(adap);
2343 }
2344 
2345 /*
2346  * MPS interrupt handler.
2347  */
2348 static void mps_intr_handler(struct adapter *adapter)
2349 {
2350 	static struct intr_info mps_rx_intr_info[] = {
2351 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2352 		{ 0 }
2353 	};
2354 	static struct intr_info mps_tx_intr_info[] = {
2355 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2356 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2357 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2358 		  -1, 1 },
2359 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2360 		  -1, 1 },
2361 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2362 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2363 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2364 		{ 0 }
2365 	};
2366 	static struct intr_info mps_trc_intr_info[] = {
2367 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2368 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2369 		  1 },
2370 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2371 		{ 0 }
2372 	};
2373 	static struct intr_info mps_stat_sram_intr_info[] = {
2374 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2375 		{ 0 }
2376 	};
2377 	static struct intr_info mps_stat_tx_intr_info[] = {
2378 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2379 		{ 0 }
2380 	};
2381 	static struct intr_info mps_stat_rx_intr_info[] = {
2382 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2383 		{ 0 }
2384 	};
2385 	static struct intr_info mps_cls_intr_info[] = {
2386 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2387 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2388 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2389 		{ 0 }
2390 	};
2391 
2392 	int fat;
2393 
2394 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2395 				    mps_rx_intr_info) +
2396 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2397 				    mps_tx_intr_info) +
2398 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2399 				    mps_trc_intr_info) +
2400 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2401 				    mps_stat_sram_intr_info) +
2402 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2403 				    mps_stat_tx_intr_info) +
2404 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2405 				    mps_stat_rx_intr_info) +
2406 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2407 				    mps_cls_intr_info);
2408 
2409 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2410 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2411 	if (fat)
2412 		t4_fatal_err(adapter);
2413 }
2414 
2415 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2416 
2417 /*
2418  * EDC/MC interrupt handler.
2419  */
2420 static void mem_intr_handler(struct adapter *adapter, int idx)
2421 {
2422 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2423 
2424 	unsigned int addr, cnt_addr, v;
2425 
2426 	if (idx <= MEM_EDC1) {
2427 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2428 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2429 	} else {
2430 		if (is_t4(adapter)) {
2431 			addr = A_MC_INT_CAUSE;
2432 			cnt_addr = A_MC_ECC_STATUS;
2433 		} else {
2434 			addr = A_MC_P_INT_CAUSE;
2435 			cnt_addr = A_MC_P_ECC_STATUS;
2436 		}
2437 	}
2438 
2439 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2440 	if (v & F_PERR_INT_CAUSE)
2441 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2442 	if (v & F_ECC_CE_INT_CAUSE) {
2443 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2444 
2445 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2446 		CH_WARN_RATELIMIT(adapter,
2447 				  "%u %s correctable ECC data error%s\n",
2448 				  cnt, name[idx], cnt > 1 ? "s" : "");
2449 	}
2450 	if (v & F_ECC_UE_INT_CAUSE)
2451 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2452 			 name[idx]);
2453 
2454 	t4_write_reg(adapter, addr, v);
2455 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2456 		t4_fatal_err(adapter);
2457 }
2458 
2459 /*
2460  * MA interrupt handler.
2461  */
2462 static void ma_intr_handler(struct adapter *adapter)
2463 {
2464 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2465 
2466 	if (status & F_MEM_PERR_INT_CAUSE)
2467 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2468 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2469 	if (status & F_MEM_WRAP_INT_CAUSE) {
2470 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2471 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2472 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2473 			 G_MEM_WRAP_ADDRESS(v) << 4);
2474 	}
2475 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2476 	t4_fatal_err(adapter);
2477 }
2478 
2479 /*
2480  * SMB interrupt handler.
2481  */
2482 static void smb_intr_handler(struct adapter *adap)
2483 {
2484 	static struct intr_info smb_intr_info[] = {
2485 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2486 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2487 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2488 		{ 0 }
2489 	};
2490 
2491 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2492 		t4_fatal_err(adap);
2493 }
2494 
2495 /*
2496  * NC-SI interrupt handler.
2497  */
2498 static void ncsi_intr_handler(struct adapter *adap)
2499 {
2500 	static struct intr_info ncsi_intr_info[] = {
2501 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2502 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2503 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2504 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2505 		{ 0 }
2506 	};
2507 
2508 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2509 		t4_fatal_err(adap);
2510 }
2511 
2512 /*
2513  * XGMAC interrupt handler.
2514  */
2515 static void xgmac_intr_handler(struct adapter *adap, int port)
2516 {
2517 	u32 v, int_cause_reg;
2518 
2519 	if (is_t4(adap))
2520 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2521 	else
2522 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2523 
2524 	v = t4_read_reg(adap, int_cause_reg);
2525 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2526 	if (!v)
2527 		return;
2528 
2529 	if (v & F_TXFIFO_PRTY_ERR)
2530 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2531 	if (v & F_RXFIFO_PRTY_ERR)
2532 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2533 	t4_write_reg(adap, int_cause_reg, v);
2534 	t4_fatal_err(adap);
2535 }
2536 
2537 /*
2538  * PL interrupt handler.
2539  */
2540 static void pl_intr_handler(struct adapter *adap)
2541 {
2542 	static struct intr_info pl_intr_info[] = {
2543 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2544 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2545 		{ 0 }
2546 	};
2547 
2548 	static struct intr_info t5_pl_intr_info[] = {
2549 		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2550 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2551 		{ 0 }
2552 	};
2553 
2554 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2555 	    is_t4(adap) ?  pl_intr_info : t5_pl_intr_info))
2556 		t4_fatal_err(adap);
2557 }
2558 
2559 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2560 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2561 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2562 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2563 
2564 /**
2565  *	t4_slow_intr_handler - control path interrupt handler
2566  *	@adapter: the adapter
2567  *
2568  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2569  *	The designation 'slow' is because it involves register reads, while
2570  *	data interrupts typically don't involve any MMIOs.
2571  */
2572 int t4_slow_intr_handler(struct adapter *adapter)
2573 {
2574 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2575 
2576 	if (!(cause & GLBL_INTR_MASK))
2577 		return 0;
2578 	if (cause & F_CIM)
2579 		cim_intr_handler(adapter);
2580 	if (cause & F_MPS)
2581 		mps_intr_handler(adapter);
2582 	if (cause & F_NCSI)
2583 		ncsi_intr_handler(adapter);
2584 	if (cause & F_PL)
2585 		pl_intr_handler(adapter);
2586 	if (cause & F_SMB)
2587 		smb_intr_handler(adapter);
2588 	if (cause & F_XGMAC0)
2589 		xgmac_intr_handler(adapter, 0);
2590 	if (cause & F_XGMAC1)
2591 		xgmac_intr_handler(adapter, 1);
2592 	if (cause & F_XGMAC_KR0)
2593 		xgmac_intr_handler(adapter, 2);
2594 	if (cause & F_XGMAC_KR1)
2595 		xgmac_intr_handler(adapter, 3);
2596 	if (cause & F_PCIE)
2597 		pcie_intr_handler(adapter);
2598 	if (cause & F_MC)
2599 		mem_intr_handler(adapter, MEM_MC);
2600 	if (cause & F_EDC0)
2601 		mem_intr_handler(adapter, MEM_EDC0);
2602 	if (cause & F_EDC1)
2603 		mem_intr_handler(adapter, MEM_EDC1);
2604 	if (cause & F_LE)
2605 		le_intr_handler(adapter);
2606 	if (cause & F_TP)
2607 		tp_intr_handler(adapter);
2608 	if (cause & F_MA)
2609 		ma_intr_handler(adapter);
2610 	if (cause & F_PM_TX)
2611 		pmtx_intr_handler(adapter);
2612 	if (cause & F_PM_RX)
2613 		pmrx_intr_handler(adapter);
2614 	if (cause & F_ULP_RX)
2615 		ulprx_intr_handler(adapter);
2616 	if (cause & F_CPL_SWITCH)
2617 		cplsw_intr_handler(adapter);
2618 	if (cause & F_SGE)
2619 		sge_intr_handler(adapter);
2620 	if (cause & F_ULP_TX)
2621 		ulptx_intr_handler(adapter);
2622 
2623 	/* Clear the interrupts just processed for which we are the master. */
2624 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2625 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2626 	return 1;
2627 }
2628 
2629 /**
2630  *	t4_intr_enable - enable interrupts
2631  *	@adapter: the adapter whose interrupts should be enabled
2632  *
2633  *	Enable PF-specific interrupts for the calling function and the top-level
2634  *	interrupt concentrator for global interrupts.  Interrupts are already
2635  *	enabled at each module,	here we just enable the roots of the interrupt
2636  *	hierarchies.
2637  *
2638  *	Note: this function should be called only when the driver manages
2639  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2640  *	function at a time should be doing this.
2641  */
2642 void t4_intr_enable(struct adapter *adapter)
2643 {
2644 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2645 
2646 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2647 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2648 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2649 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2650 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2651 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2652 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2653 		     F_EGRESS_SIZE_ERR);
2654 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2655 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2656 }
2657 
2658 /**
2659  *	t4_intr_disable - disable interrupts
2660  *	@adapter: the adapter whose interrupts should be disabled
2661  *
2662  *	Disable interrupts.  We only disable the top-level interrupt
2663  *	concentrators.  The caller must be a PCI function managing global
2664  *	interrupts.
2665  */
2666 void t4_intr_disable(struct adapter *adapter)
2667 {
2668 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2669 
2670 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2671 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2672 }
2673 
2674 /**
2675  *	t4_intr_clear - clear all interrupts
2676  *	@adapter: the adapter whose interrupts should be cleared
2677  *
2678  *	Clears all interrupts.  The caller must be a PCI function managing
2679  *	global interrupts.
2680  */
2681 void t4_intr_clear(struct adapter *adapter)
2682 {
2683 	static const unsigned int cause_reg[] = {
2684 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2685 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2686 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2687 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2688 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2689 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2690 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2691 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2692 		A_TP_INT_CAUSE,
2693 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2694 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2695 		A_MPS_RX_PERR_INT_CAUSE,
2696 		A_CPL_INTR_CAUSE,
2697 		MYPF_REG(A_PL_PF_INT_CAUSE),
2698 		A_PL_PL_INT_CAUSE,
2699 		A_LE_DB_INT_CAUSE,
2700 	};
2701 
2702 	unsigned int i;
2703 
2704 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2705 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2706 
2707 	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2708 				A_MC_P_INT_CAUSE, 0xffffffff);
2709 
2710 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2711 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2712 }
2713 
2714 /**
2715  *	hash_mac_addr - return the hash value of a MAC address
2716  *	@addr: the 48-bit Ethernet MAC address
2717  *
2718  *	Hashes a MAC address according to the hash function used by HW inexact
2719  *	(hash) address matching.
2720  */
2721 static int hash_mac_addr(const u8 *addr)
2722 {
2723 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2724 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2725 	a ^= b;
2726 	a ^= (a >> 12);
2727 	a ^= (a >> 6);
2728 	return a & 0x3f;
2729 }
2730 
2731 /**
2732  *	t4_config_rss_range - configure a portion of the RSS mapping table
2733  *	@adapter: the adapter
2734  *	@mbox: mbox to use for the FW command
2735  *	@viid: virtual interface whose RSS subtable is to be written
2736  *	@start: start entry in the table to write
2737  *	@n: how many table entries to write
2738  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2739  *	@nrspq: number of values in @rspq
2740  *
2741  *	Programs the selected part of the VI's RSS mapping table with the
2742  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2743  *	until the full table range is populated.
2744  *
2745  *	The caller must ensure the values in @rspq are in the range allowed for
2746  *	@viid.
2747  */
2748 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2749 			int start, int n, const u16 *rspq, unsigned int nrspq)
2750 {
2751 	int ret;
2752 	const u16 *rsp = rspq;
2753 	const u16 *rsp_end = rspq + nrspq;
2754 	struct fw_rss_ind_tbl_cmd cmd;
2755 
2756 	memset(&cmd, 0, sizeof(cmd));
2757 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2758 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2759 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2760 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2761 
2762 
2763 	/*
2764 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2765 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2766 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2767 	 * reserved.
2768 	 */
2769 	while (n > 0) {
2770 		int nq = min(n, 32);
2771 		int nq_packed = 0;
2772 		__be32 *qp = &cmd.iq0_to_iq2;
2773 
2774 		/*
2775 		 * Set up the firmware RSS command header to send the next
2776 		 * "nq" Ingress Queue IDs to the firmware.
2777 		 */
2778 		cmd.niqid = htons(nq);
2779 		cmd.startidx = htons(start);
2780 
2781 		/*
2782 		 * "nq" more done for the start of the next loop.
2783 		 */
2784 		start += nq;
2785 		n -= nq;
2786 
2787 		/*
2788 		 * While there are still Ingress Queue IDs to stuff into the
2789 		 * current firmware RSS command, retrieve them from the
2790 		 * Ingress Queue ID array and insert them into the command.
2791 		 */
2792 		while (nq > 0) {
2793 			/*
2794 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2795 			 * around the Ingress Queue ID array if necessary) and
2796 			 * insert them into the firmware RSS command at the
2797 			 * current 3-tuple position within the commad.
2798 			 */
2799 			u16 qbuf[3];
2800 			u16 *qbp = qbuf;
2801 			int nqbuf = min(3, nq);
2802 
2803 			nq -= nqbuf;
2804 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2805 			while (nqbuf && nq_packed < 32) {
2806 				nqbuf--;
2807 				nq_packed++;
2808 				*qbp++ = *rsp++;
2809 				if (rsp >= rsp_end)
2810 					rsp = rspq;
2811 			}
2812 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2813 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2814 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2815 		}
2816 
2817 		/*
2818 		 * Send this portion of the RRS table update to the firmware;
2819 		 * bail out on any errors.
2820 		 */
2821 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2822 		if (ret)
2823 			return ret;
2824 	}
2825 
2826 	return 0;
2827 }
2828 
2829 /**
2830  *	t4_config_glbl_rss - configure the global RSS mode
2831  *	@adapter: the adapter
2832  *	@mbox: mbox to use for the FW command
2833  *	@mode: global RSS mode
2834  *	@flags: mode-specific flags
2835  *
2836  *	Sets the global RSS mode.
2837  */
2838 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2839 		       unsigned int flags)
2840 {
2841 	struct fw_rss_glb_config_cmd c;
2842 
2843 	memset(&c, 0, sizeof(c));
2844 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2845 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2846 	c.retval_len16 = htonl(FW_LEN16(c));
2847 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2848 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2849 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2850 		c.u.basicvirtual.mode_pkd =
2851 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2852 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2853 	} else
2854 		return -EINVAL;
2855 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2856 }
2857 
2858 /**
2859  *	t4_config_vi_rss - configure per VI RSS settings
2860  *	@adapter: the adapter
2861  *	@mbox: mbox to use for the FW command
2862  *	@viid: the VI id
2863  *	@flags: RSS flags
2864  *	@defq: id of the default RSS queue for the VI.
2865  *
2866  *	Configures VI-specific RSS properties.
2867  */
2868 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2869 		     unsigned int flags, unsigned int defq)
2870 {
2871 	struct fw_rss_vi_config_cmd c;
2872 
2873 	memset(&c, 0, sizeof(c));
2874 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2875 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2876 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2877 	c.retval_len16 = htonl(FW_LEN16(c));
2878 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2879 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2880 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2881 }
2882 
2883 /* Read an RSS table row */
2884 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2885 {
2886 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2887 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2888 				   5, 0, val);
2889 }
2890 
2891 /**
2892  *	t4_read_rss - read the contents of the RSS mapping table
2893  *	@adapter: the adapter
2894  *	@map: holds the contents of the RSS mapping table
2895  *
2896  *	Reads the contents of the RSS hash->queue mapping table.
2897  */
2898 int t4_read_rss(struct adapter *adapter, u16 *map)
2899 {
2900 	u32 val;
2901 	int i, ret;
2902 
2903 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2904 		ret = rd_rss_row(adapter, i, &val);
2905 		if (ret)
2906 			return ret;
2907 		*map++ = G_LKPTBLQUEUE0(val);
2908 		*map++ = G_LKPTBLQUEUE1(val);
2909 	}
2910 	return 0;
2911 }
2912 
2913 /**
2914  *	t4_read_rss_key - read the global RSS key
2915  *	@adap: the adapter
2916  *	@key: 10-entry array holding the 320-bit RSS key
2917  *
2918  *	Reads the global 320-bit RSS key.
2919  */
2920 void t4_read_rss_key(struct adapter *adap, u32 *key)
2921 {
2922 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2923 			 A_TP_RSS_SECRET_KEY0);
2924 }
2925 
2926 /**
2927  *	t4_write_rss_key - program one of the RSS keys
2928  *	@adap: the adapter
2929  *	@key: 10-entry array holding the 320-bit RSS key
2930  *	@idx: which RSS key to write
2931  *
2932  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2933  *	0..15 the corresponding entry in the RSS key table is written,
2934  *	otherwise the global RSS key is written.
2935  */
2936 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2937 {
2938 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2939 			  A_TP_RSS_SECRET_KEY0);
2940 	if (idx >= 0 && idx < 16)
2941 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2942 			     V_KEYWRADDR(idx) | F_KEYWREN);
2943 }
2944 
2945 /**
2946  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2947  *	@adapter: the adapter
2948  *	@index: the entry in the PF RSS table to read
2949  *	@valp: where to store the returned value
2950  *
2951  *	Reads the PF RSS Configuration Table at the specified index and returns
2952  *	the value found there.
2953  */
2954 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2955 {
2956 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2957 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2958 }
2959 
2960 /**
2961  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2962  *	@adapter: the adapter
2963  *	@index: the entry in the VF RSS table to read
2964  *	@val: the value to store
2965  *
2966  *	Writes the PF RSS Configuration Table at the specified index with the
2967  *	specified value.
2968  */
2969 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2970 {
2971 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2972 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2973 }
2974 
2975 /**
2976  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2977  *	@adapter: the adapter
2978  *	@index: the entry in the VF RSS table to read
2979  *	@vfl: where to store the returned VFL
2980  *	@vfh: where to store the returned VFH
2981  *
2982  *	Reads the VF RSS Configuration Table at the specified index and returns
2983  *	the (VFL, VFH) values found there.
2984  */
2985 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2986 			   u32 *vfl, u32 *vfh)
2987 {
2988 	u32 vrt;
2989 
2990 	/*
2991 	 * Request that the index'th VF Table values be read into VFL/VFH.
2992 	 */
2993 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2994 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2995 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2996 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2997 
2998 	/*
2999 	 * Grab the VFL/VFH values ...
3000 	 */
3001 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3002 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
3003 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3004 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
3005 }
3006 
3007 /**
3008  *	t4_write_rss_vf_config - write VF RSS Configuration Table
3009  *
3010  *	@adapter: the adapter
3011  *	@index: the entry in the VF RSS table to write
3012  *	@vfl: the VFL to store
3013  *	@vfh: the VFH to store
3014  *
3015  *	Writes the VF RSS Configuration Table at the specified index with the
3016  *	specified (VFL, VFH) values.
3017  */
3018 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3019 			    u32 vfl, u32 vfh)
3020 {
3021 	u32 vrt;
3022 
3023 	/*
3024 	 * Load up VFL/VFH with the values to be written ...
3025 	 */
3026 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3027 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
3028 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3029 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
3030 
3031 	/*
3032 	 * Write the VFL/VFH into the VF Table at index'th location.
3033 	 */
3034 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3035 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3036 	vrt |= V_VFWRADDR(index) | F_VFWREN;
3037 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3038 }
3039 
3040 /**
3041  *	t4_read_rss_pf_map - read PF RSS Map
3042  *	@adapter: the adapter
3043  *
3044  *	Reads the PF RSS Map register and returns its value.
3045  */
3046 u32 t4_read_rss_pf_map(struct adapter *adapter)
3047 {
3048 	u32 pfmap;
3049 
3050 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3051 			 &pfmap, 1, A_TP_RSS_PF_MAP);
3052 	return pfmap;
3053 }
3054 
3055 /**
3056  *	t4_write_rss_pf_map - write PF RSS Map
3057  *	@adapter: the adapter
3058  *	@pfmap: PF RSS Map value
3059  *
3060  *	Writes the specified value to the PF RSS Map register.
3061  */
3062 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3063 {
3064 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3065 			  &pfmap, 1, A_TP_RSS_PF_MAP);
3066 }
3067 
3068 /**
3069  *	t4_read_rss_pf_mask - read PF RSS Mask
3070  *	@adapter: the adapter
3071  *
3072  *	Reads the PF RSS Mask register and returns its value.
3073  */
3074 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3075 {
3076 	u32 pfmask;
3077 
3078 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3079 			 &pfmask, 1, A_TP_RSS_PF_MSK);
3080 	return pfmask;
3081 }
3082 
3083 /**
3084  *	t4_write_rss_pf_mask - write PF RSS Mask
3085  *	@adapter: the adapter
3086  *	@pfmask: PF RSS Mask value
3087  *
3088  *	Writes the specified value to the PF RSS Mask register.
3089  */
3090 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3091 {
3092 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3093 			  &pfmask, 1, A_TP_RSS_PF_MSK);
3094 }
3095 
3096 /**
3097  *	t4_set_filter_mode - configure the optional components of filter tuples
3098  *	@adap: the adapter
3099  *	@mode_map: a bitmap selcting which optional filter components to enable
3100  *
3101  *	Sets the filter mode by selecting the optional components to enable
3102  *	in filter tuples.  Returns 0 on success and a negative error if the
3103  *	requested mode needs more bits than are available for optional
3104  *	components.
3105  */
3106 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3107 {
3108 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3109 
3110 	int i, nbits = 0;
3111 
3112 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3113 		if (mode_map & (1 << i))
3114 			nbits += width[i];
3115 	if (nbits > FILTER_OPT_LEN)
3116 		return -EINVAL;
3117 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3118 			  A_TP_VLAN_PRI_MAP);
3119 	return 0;
3120 }
3121 
3122 /**
3123  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3124  *	@adap: the adapter
3125  *	@v4: holds the TCP/IP counter values
3126  *	@v6: holds the TCP/IPv6 counter values
3127  *
3128  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3129  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3130  */
3131 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3132 			 struct tp_tcp_stats *v6)
3133 {
3134 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3135 
3136 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3137 #define STAT(x)     val[STAT_IDX(x)]
3138 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3139 
3140 	if (v4) {
3141 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3142 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3143 		v4->tcpOutRsts = STAT(OUT_RST);
3144 		v4->tcpInSegs  = STAT64(IN_SEG);
3145 		v4->tcpOutSegs = STAT64(OUT_SEG);
3146 		v4->tcpRetransSegs = STAT64(RXT_SEG);
3147 	}
3148 	if (v6) {
3149 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3150 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3151 		v6->tcpOutRsts = STAT(OUT_RST);
3152 		v6->tcpInSegs  = STAT64(IN_SEG);
3153 		v6->tcpOutSegs = STAT64(OUT_SEG);
3154 		v6->tcpRetransSegs = STAT64(RXT_SEG);
3155 	}
3156 #undef STAT64
3157 #undef STAT
3158 #undef STAT_IDX
3159 }
3160 
3161 /**
3162  *	t4_tp_get_err_stats - read TP's error MIB counters
3163  *	@adap: the adapter
3164  *	@st: holds the counter values
3165  *
3166  *	Returns the values of TP's error counters.
3167  */
3168 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3169 {
3170 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3171 			 12, A_TP_MIB_MAC_IN_ERR_0);
3172 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3173 			 8, A_TP_MIB_TNL_CNG_DROP_0);
3174 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3175 			 4, A_TP_MIB_TNL_DROP_0);
3176 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3177 			 4, A_TP_MIB_OFD_VLN_DROP_0);
3178 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3179 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3180 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3181 			 2, A_TP_MIB_OFD_ARP_DROP);
3182 }
3183 
3184 /**
3185  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3186  *	@adap: the adapter
3187  *	@st: holds the counter values
3188  *
3189  *	Returns the values of TP's proxy counters.
3190  */
3191 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3192 {
3193 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3194 			 4, A_TP_MIB_TNL_LPBK_0);
3195 }
3196 
3197 /**
3198  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3199  *	@adap: the adapter
3200  *	@st: holds the counter values
3201  *
3202  *	Returns the values of TP's CPL counters.
3203  */
3204 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3205 {
3206 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3207 			 8, A_TP_MIB_CPL_IN_REQ_0);
3208 }
3209 
3210 /**
3211  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3212  *	@adap: the adapter
3213  *	@st: holds the counter values
3214  *
3215  *	Returns the values of TP's RDMA counters.
3216  */
3217 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3218 {
3219 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3220 			 2, A_TP_MIB_RQE_DFR_MOD);
3221 }
3222 
3223 /**
3224  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3225  *	@adap: the adapter
3226  *	@idx: the port index
3227  *	@st: holds the counter values
3228  *
3229  *	Returns the values of TP's FCoE counters for the selected port.
3230  */
3231 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3232 		       struct tp_fcoe_stats *st)
3233 {
3234 	u32 val[2];
3235 
3236 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3237 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3238 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3239 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3240 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3241 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3242 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3243 }
3244 
3245 /**
3246  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3247  *	@adap: the adapter
3248  *	@st: holds the counter values
3249  *
3250  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3251  */
3252 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3253 {
3254 	u32 val[4];
3255 
3256 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3257 			 A_TP_MIB_USM_PKTS);
3258 	st->frames = val[0];
3259 	st->drops = val[1];
3260 	st->octets = ((u64)val[2] << 32) | val[3];
3261 }
3262 
3263 /**
3264  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3265  *	@adap: the adapter
3266  *	@mtus: where to store the MTU values
3267  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3268  *
3269  *	Reads the HW path MTU table.
3270  */
3271 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3272 {
3273 	u32 v;
3274 	int i;
3275 
3276 	for (i = 0; i < NMTUS; ++i) {
3277 		t4_write_reg(adap, A_TP_MTU_TABLE,
3278 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3279 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3280 		mtus[i] = G_MTUVALUE(v);
3281 		if (mtu_log)
3282 			mtu_log[i] = G_MTUWIDTH(v);
3283 	}
3284 }
3285 
3286 /**
3287  *	t4_read_cong_tbl - reads the congestion control table
3288  *	@adap: the adapter
3289  *	@incr: where to store the alpha values
3290  *
3291  *	Reads the additive increments programmed into the HW congestion
3292  *	control table.
3293  */
3294 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3295 {
3296 	unsigned int mtu, w;
3297 
3298 	for (mtu = 0; mtu < NMTUS; ++mtu)
3299 		for (w = 0; w < NCCTRL_WIN; ++w) {
3300 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3301 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3302 			incr[mtu][w] = (u16)t4_read_reg(adap,
3303 						A_TP_CCTRL_TABLE) & 0x1fff;
3304 		}
3305 }
3306 
3307 /**
3308  *	t4_read_pace_tbl - read the pace table
3309  *	@adap: the adapter
3310  *	@pace_vals: holds the returned values
3311  *
3312  *	Returns the values of TP's pace table in microseconds.
3313  */
3314 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3315 {
3316 	unsigned int i, v;
3317 
3318 	for (i = 0; i < NTX_SCHED; i++) {
3319 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3320 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3321 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3322 	}
3323 }
3324 
3325 /**
3326  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3327  *	@adap: the adapter
3328  *	@addr: the indirect TP register address
3329  *	@mask: specifies the field within the register to modify
3330  *	@val: new value for the field
3331  *
3332  *	Sets a field of an indirect TP register to the given value.
3333  */
3334 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3335 			    unsigned int mask, unsigned int val)
3336 {
3337 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3338 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3339 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3340 }
3341 
3342 /**
3343  *	init_cong_ctrl - initialize congestion control parameters
3344  *	@a: the alpha values for congestion control
3345  *	@b: the beta values for congestion control
3346  *
3347  *	Initialize the congestion control parameters.
3348  */
3349 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3350 {
3351 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3352 	a[9] = 2;
3353 	a[10] = 3;
3354 	a[11] = 4;
3355 	a[12] = 5;
3356 	a[13] = 6;
3357 	a[14] = 7;
3358 	a[15] = 8;
3359 	a[16] = 9;
3360 	a[17] = 10;
3361 	a[18] = 14;
3362 	a[19] = 17;
3363 	a[20] = 21;
3364 	a[21] = 25;
3365 	a[22] = 30;
3366 	a[23] = 35;
3367 	a[24] = 45;
3368 	a[25] = 60;
3369 	a[26] = 80;
3370 	a[27] = 100;
3371 	a[28] = 200;
3372 	a[29] = 300;
3373 	a[30] = 400;
3374 	a[31] = 500;
3375 
3376 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3377 	b[9] = b[10] = 1;
3378 	b[11] = b[12] = 2;
3379 	b[13] = b[14] = b[15] = b[16] = 3;
3380 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3381 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3382 	b[28] = b[29] = 6;
3383 	b[30] = b[31] = 7;
3384 }
3385 
3386 /* The minimum additive increment value for the congestion control table */
3387 #define CC_MIN_INCR 2U
3388 
3389 /**
3390  *	t4_load_mtus - write the MTU and congestion control HW tables
3391  *	@adap: the adapter
3392  *	@mtus: the values for the MTU table
3393  *	@alpha: the values for the congestion control alpha parameter
3394  *	@beta: the values for the congestion control beta parameter
3395  *
3396  *	Write the HW MTU table with the supplied MTUs and the high-speed
3397  *	congestion control table with the supplied alpha, beta, and MTUs.
3398  *	We write the two tables together because the additive increments
3399  *	depend on the MTUs.
3400  */
3401 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3402 		  const unsigned short *alpha, const unsigned short *beta)
3403 {
3404 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3405 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3406 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3407 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3408 	};
3409 
3410 	unsigned int i, w;
3411 
3412 	for (i = 0; i < NMTUS; ++i) {
3413 		unsigned int mtu = mtus[i];
3414 		unsigned int log2 = fls(mtu);
3415 
3416 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3417 			log2--;
3418 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3419 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3420 
3421 		for (w = 0; w < NCCTRL_WIN; ++w) {
3422 			unsigned int inc;
3423 
3424 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3425 				  CC_MIN_INCR);
3426 
3427 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3428 				     (w << 16) | (beta[w] << 13) | inc);
3429 		}
3430 	}
3431 }
3432 
3433 /**
3434  *	t4_set_pace_tbl - set the pace table
3435  *	@adap: the adapter
3436  *	@pace_vals: the pace values in microseconds
3437  *	@start: index of the first entry in the HW pace table to set
3438  *	@n: how many entries to set
3439  *
3440  *	Sets (a subset of the) HW pace table.
3441  */
3442 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3443 		     unsigned int start, unsigned int n)
3444 {
3445 	unsigned int vals[NTX_SCHED], i;
3446 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3447 
3448 	if (n > NTX_SCHED)
3449 	    return -ERANGE;
3450 
3451 	/* convert values from us to dack ticks, rounding to closest value */
3452 	for (i = 0; i < n; i++, pace_vals++) {
3453 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3454 		if (vals[i] > 0x7ff)
3455 			return -ERANGE;
3456 		if (*pace_vals && vals[i] == 0)
3457 			return -ERANGE;
3458 	}
3459 	for (i = 0; i < n; i++, start++)
3460 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3461 	return 0;
3462 }
3463 
3464 /**
3465  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3466  *	@adap: the adapter
3467  *	@kbps: target rate in Kbps
3468  *	@sched: the scheduler index
3469  *
3470  *	Configure a Tx HW scheduler for the target rate.
3471  */
3472 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3473 {
3474 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3475 	unsigned int clk = adap->params.vpd.cclk * 1000;
3476 	unsigned int selected_cpt = 0, selected_bpt = 0;
3477 
3478 	if (kbps > 0) {
3479 		kbps *= 125;     /* -> bytes */
3480 		for (cpt = 1; cpt <= 255; cpt++) {
3481 			tps = clk / cpt;
3482 			bpt = (kbps + tps / 2) / tps;
3483 			if (bpt > 0 && bpt <= 255) {
3484 				v = bpt * tps;
3485 				delta = v >= kbps ? v - kbps : kbps - v;
3486 				if (delta < mindelta) {
3487 					mindelta = delta;
3488 					selected_cpt = cpt;
3489 					selected_bpt = bpt;
3490 				}
3491 			} else if (selected_cpt)
3492 				break;
3493 		}
3494 		if (!selected_cpt)
3495 			return -EINVAL;
3496 	}
3497 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3498 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3499 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3500 	if (sched & 1)
3501 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3502 	else
3503 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3504 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3505 	return 0;
3506 }
3507 
3508 /**
3509  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3510  *	@adap: the adapter
3511  *	@sched: the scheduler index
3512  *	@ipg: the interpacket delay in tenths of nanoseconds
3513  *
3514  *	Set the interpacket delay for a HW packet rate scheduler.
3515  */
3516 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3517 {
3518 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3519 
3520 	/* convert ipg to nearest number of core clocks */
3521 	ipg *= core_ticks_per_usec(adap);
3522 	ipg = (ipg + 5000) / 10000;
3523 	if (ipg > M_TXTIMERSEPQ0)
3524 		return -EINVAL;
3525 
3526 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3527 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3528 	if (sched & 1)
3529 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3530 	else
3531 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3532 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3533 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3534 	return 0;
3535 }
3536 
3537 /**
3538  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3539  *	@adap: the adapter
3540  *	@sched: the scheduler index
3541  *	@kbps: the byte rate in Kbps
3542  *	@ipg: the interpacket delay in tenths of nanoseconds
3543  *
3544  *	Return the current configuration of a HW Tx scheduler.
3545  */
3546 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3547 		     unsigned int *ipg)
3548 {
3549 	unsigned int v, addr, bpt, cpt;
3550 
3551 	if (kbps) {
3552 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3553 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3554 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3555 		if (sched & 1)
3556 			v >>= 16;
3557 		bpt = (v >> 8) & 0xff;
3558 		cpt = v & 0xff;
3559 		if (!cpt)
3560 			*kbps = 0;        /* scheduler disabled */
3561 		else {
3562 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3563 			*kbps = (v * bpt) / 125;
3564 		}
3565 	}
3566 	if (ipg) {
3567 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3568 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3569 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3570 		if (sched & 1)
3571 			v >>= 16;
3572 		v &= 0xffff;
3573 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3574 	}
3575 }
3576 
3577 /*
3578  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3579  * clocks.  The formula is
3580  *
3581  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3582  *
3583  * which is equivalent to
3584  *
3585  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3586  */
3587 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3588 {
3589 	u64 v = bytes256 * adap->params.vpd.cclk;
3590 
3591 	return v * 62 + v / 2;
3592 }
3593 
3594 /**
3595  *	t4_get_chan_txrate - get the current per channel Tx rates
3596  *	@adap: the adapter
3597  *	@nic_rate: rates for NIC traffic
3598  *	@ofld_rate: rates for offloaded traffic
3599  *
3600  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3601  *	for each channel.
3602  */
3603 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3604 {
3605 	u32 v;
3606 
3607 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3608 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3609 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3610 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3611 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3612 
3613 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3614 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3615 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3616 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3617 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3618 }
3619 
3620 /**
3621  *	t4_set_trace_filter - configure one of the tracing filters
3622  *	@adap: the adapter
3623  *	@tp: the desired trace filter parameters
3624  *	@idx: which filter to configure
3625  *	@enable: whether to enable or disable the filter
3626  *
3627  *	Configures one of the tracing filters available in HW.  If @tp is %NULL
3628  *	it indicates that the filter is already written in the register and it
3629  *	just needs to be enabled or disabled.
3630  */
3631 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3632     int idx, int enable)
3633 {
3634 	int i, ofst = idx * 4;
3635 	u32 data_reg, mask_reg, cfg;
3636 	u32 multitrc = F_TRCMULTIFILTER;
3637 	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
3638 
3639 	if (idx < 0 || idx >= NTRACE)
3640 		return -EINVAL;
3641 
3642 	if (tp == NULL || !enable) {
3643 		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
3644 		    enable ? en : 0);
3645 		return 0;
3646 	}
3647 
3648 	/*
3649 	 * TODO - After T4 data book is updated, specify the exact
3650 	 * section below.
3651 	 *
3652 	 * See T4 data book - MPS section for a complete description
3653 	 * of the below if..else handling of A_MPS_TRC_CFG register
3654 	 * value.
3655 	 */
3656 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3657 	if (cfg & F_TRCMULTIFILTER) {
3658 		/*
3659 		 * If multiple tracers are enabled, then maximum
3660 		 * capture size is 2.5KB (FIFO size of a single channel)
3661 		 * minus 2 flits for CPL_TRACE_PKT header.
3662 		 */
3663 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3664 			return -EINVAL;
3665 	} else {
3666 		/*
3667 		 * If multiple tracers are disabled, to avoid deadlocks
3668 		 * maximum packet capture size of 9600 bytes is recommended.
3669 		 * Also in this mode, only trace0 can be enabled and running.
3670 		 */
3671 		multitrc = 0;
3672 		if (tp->snap_len > 9600 || idx)
3673 			return -EINVAL;
3674 	}
3675 
3676 	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
3677 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
3678 	    tp->min_len > M_TFMINPKTSIZE)
3679 		return -EINVAL;
3680 
3681 	/* stop the tracer we'll be changing */
3682 	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
3683 
3684 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3685 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3686 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3687 
3688 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3689 		t4_write_reg(adap, data_reg, tp->data[i]);
3690 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3691 	}
3692 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3693 		     V_TFCAPTUREMAX(tp->snap_len) |
3694 		     V_TFMINPKTSIZE(tp->min_len));
3695 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3696 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
3697 		     (is_t4(adap) ?
3698 		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
3699 		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
3700 
3701 	return 0;
3702 }
3703 
3704 /**
3705  *	t4_get_trace_filter - query one of the tracing filters
3706  *	@adap: the adapter
3707  *	@tp: the current trace filter parameters
3708  *	@idx: which trace filter to query
3709  *	@enabled: non-zero if the filter is enabled
3710  *
3711  *	Returns the current settings of one of the HW tracing filters.
3712  */
3713 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3714 			 int *enabled)
3715 {
3716 	u32 ctla, ctlb;
3717 	int i, ofst = idx * 4;
3718 	u32 data_reg, mask_reg;
3719 
3720 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3721 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3722 
3723 	if (is_t4(adap)) {
3724 		*enabled = !!(ctla & F_TFEN);
3725 		tp->port =  G_TFPORT(ctla);
3726 		tp->invert = !!(ctla & F_TFINVERTMATCH);
3727 	} else {
3728 		*enabled = !!(ctla & F_T5_TFEN);
3729 		tp->port = G_T5_TFPORT(ctla);
3730 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
3731 	}
3732 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3733 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3734 	tp->skip_ofst = G_TFOFFSET(ctla);
3735 	tp->skip_len = G_TFLENGTH(ctla);
3736 
3737 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3738 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3739 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3740 
3741 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3742 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3743 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3744 	}
3745 }
3746 
3747 /**
3748  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3749  *	@adap: the adapter
3750  *	@cnt: where to store the count statistics
3751  *	@cycles: where to store the cycle statistics
3752  *
3753  *	Returns performance statistics from PMTX.
3754  */
3755 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3756 {
3757 	int i;
3758 	u32 data[2];
3759 
3760 	for (i = 0; i < PM_NSTATS; i++) {
3761 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3762 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3763 		if (is_t4(adap))
3764 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3765 		else {
3766 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3767 					 A_PM_TX_DBG_DATA, data, 2,
3768 					 A_PM_TX_DBG_STAT_MSB);
3769 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3770 		}
3771 	}
3772 }
3773 
3774 /**
3775  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3776  *	@adap: the adapter
3777  *	@cnt: where to store the count statistics
3778  *	@cycles: where to store the cycle statistics
3779  *
3780  *	Returns performance statistics from PMRX.
3781  */
3782 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3783 {
3784 	int i;
3785 	u32 data[2];
3786 
3787 	for (i = 0; i < PM_NSTATS; i++) {
3788 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3789 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3790 		if (is_t4(adap))
3791 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3792 		else {
3793 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3794 					 A_PM_RX_DBG_DATA, data, 2,
3795 					 A_PM_RX_DBG_STAT_MSB);
3796 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3797 		}
3798 	}
3799 }
3800 
3801 /**
3802  *	get_mps_bg_map - return the buffer groups associated with a port
3803  *	@adap: the adapter
3804  *	@idx: the port index
3805  *
3806  *	Returns a bitmap indicating which MPS buffer groups are associated
3807  *	with the given port.  Bit i is set if buffer group i is used by the
3808  *	port.
3809  */
3810 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3811 {
3812 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3813 
3814 	if (n == 0)
3815 		return idx == 0 ? 0xf : 0;
3816 	if (n == 1)
3817 		return idx < 2 ? (3 << (2 * idx)) : 0;
3818 	return 1 << idx;
3819 }
3820 
3821 /**
3822  *      t4_get_port_stats_offset - collect port stats relative to a previous
3823  *                                 snapshot
3824  *      @adap: The adapter
3825  *      @idx: The port
3826  *      @stats: Current stats to fill
3827  *      @offset: Previous stats snapshot
3828  */
3829 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3830 		struct port_stats *stats,
3831 		struct port_stats *offset)
3832 {
3833 	u64 *s, *o;
3834 	int i;
3835 
3836 	t4_get_port_stats(adap, idx, stats);
3837 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3838 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3839 			i++, s++, o++)
3840 		*s -= *o;
3841 }
3842 
3843 /**
3844  *	t4_get_port_stats - collect port statistics
3845  *	@adap: the adapter
3846  *	@idx: the port index
3847  *	@p: the stats structure to fill
3848  *
3849  *	Collect statistics related to the given port from HW.
3850  */
3851 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3852 {
3853 	u32 bgmap = get_mps_bg_map(adap, idx);
3854 
3855 #define GET_STAT(name) \
3856 	t4_read_reg64(adap, \
3857 	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3858 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3859 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3860 
3861 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3862 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3863 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3864 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3865 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3866 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3867 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3868 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3869 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3870 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3871 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3872 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3873 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3874 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3875 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3876 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3877 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3878 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3879 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3880 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3881 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3882 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3883 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3884 
3885 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3886 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3887 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3888 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3889 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3890 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3891 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3892 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3893 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3894 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3895 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3896 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3897 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3898 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3899 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3900 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3901 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3902 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3903 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3904 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3905 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3906 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3907 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3908 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3909 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3910 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3911 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3912 
3913 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3914 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3915 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3916 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3917 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3918 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3919 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3920 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3921 
3922 #undef GET_STAT
3923 #undef GET_STAT_COM
3924 }
3925 
3926 /**
3927  *	t4_clr_port_stats - clear port statistics
3928  *	@adap: the adapter
3929  *	@idx: the port index
3930  *
3931  *	Clear HW statistics for the given port.
3932  */
3933 void t4_clr_port_stats(struct adapter *adap, int idx)
3934 {
3935 	unsigned int i;
3936 	u32 bgmap = get_mps_bg_map(adap, idx);
3937 	u32 port_base_addr;
3938 
3939 	if (is_t4(adap))
3940 		port_base_addr = PORT_BASE(idx);
3941 	else
3942 		port_base_addr = T5_PORT_BASE(idx);
3943 
3944 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3945 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3946 		t4_write_reg(adap, port_base_addr + i, 0);
3947 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3948 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3949 		t4_write_reg(adap, port_base_addr + i, 0);
3950 	for (i = 0; i < 4; i++)
3951 		if (bgmap & (1 << i)) {
3952 			t4_write_reg(adap,
3953 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3954 			t4_write_reg(adap,
3955 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3956 		}
3957 }
3958 
3959 /**
3960  *	t4_get_lb_stats - collect loopback port statistics
3961  *	@adap: the adapter
3962  *	@idx: the loopback port index
3963  *	@p: the stats structure to fill
3964  *
3965  *	Return HW statistics for the given loopback port.
3966  */
3967 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3968 {
3969 	u32 bgmap = get_mps_bg_map(adap, idx);
3970 
3971 #define GET_STAT(name) \
3972 	t4_read_reg64(adap, \
3973 	(is_t4(adap) ? \
3974 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
3975 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
3976 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3977 
3978 	p->octets           = GET_STAT(BYTES);
3979 	p->frames           = GET_STAT(FRAMES);
3980 	p->bcast_frames     = GET_STAT(BCAST);
3981 	p->mcast_frames     = GET_STAT(MCAST);
3982 	p->ucast_frames     = GET_STAT(UCAST);
3983 	p->error_frames     = GET_STAT(ERROR);
3984 
3985 	p->frames_64        = GET_STAT(64B);
3986 	p->frames_65_127    = GET_STAT(65B_127B);
3987 	p->frames_128_255   = GET_STAT(128B_255B);
3988 	p->frames_256_511   = GET_STAT(256B_511B);
3989 	p->frames_512_1023  = GET_STAT(512B_1023B);
3990 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3991 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3992 	p->drop             = GET_STAT(DROP_FRAMES);
3993 
3994 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3995 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3996 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3997 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3998 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3999 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4000 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4001 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4002 
4003 #undef GET_STAT
4004 #undef GET_STAT_COM
4005 }
4006 
4007 /**
4008  *	t4_wol_magic_enable - enable/disable magic packet WoL
4009  *	@adap: the adapter
4010  *	@port: the physical port index
4011  *	@addr: MAC address expected in magic packets, %NULL to disable
4012  *
4013  *	Enables/disables magic packet wake-on-LAN for the selected port.
4014  */
4015 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4016 			 const u8 *addr)
4017 {
4018 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4019 
4020 	if (is_t4(adap)) {
4021 		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4022 		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4023 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4024 	} else {
4025 		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4026 		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4027 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4028 	}
4029 
4030 	if (addr) {
4031 		t4_write_reg(adap, mag_id_reg_l,
4032 			     (addr[2] << 24) | (addr[3] << 16) |
4033 			     (addr[4] << 8) | addr[5]);
4034 		t4_write_reg(adap, mag_id_reg_h,
4035 			     (addr[0] << 8) | addr[1]);
4036 	}
4037 	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4038 			 V_MAGICEN(addr != NULL));
4039 }
4040 
4041 /**
4042  *	t4_wol_pat_enable - enable/disable pattern-based WoL
4043  *	@adap: the adapter
4044  *	@port: the physical port index
4045  *	@map: bitmap of which HW pattern filters to set
4046  *	@mask0: byte mask for bytes 0-63 of a packet
4047  *	@mask1: byte mask for bytes 64-127 of a packet
4048  *	@crc: Ethernet CRC for selected bytes
4049  *	@enable: enable/disable switch
4050  *
4051  *	Sets the pattern filters indicated in @map to mask out the bytes
4052  *	specified in @mask0/@mask1 in received packets and compare the CRC of
4053  *	the resulting packet against @crc.  If @enable is %true pattern-based
4054  *	WoL is enabled, otherwise disabled.
4055  */
4056 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4057 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4058 {
4059 	int i;
4060 	u32 port_cfg_reg;
4061 
4062 	if (is_t4(adap))
4063 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4064 	else
4065 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4066 
4067 	if (!enable) {
4068 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4069 		return 0;
4070 	}
4071 	if (map > 0xff)
4072 		return -EINVAL;
4073 
4074 #define EPIO_REG(name) \
4075 	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4076 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4077 
4078 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4079 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4080 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4081 
4082 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4083 		if (!(map & 1))
4084 			continue;
4085 
4086 		/* write byte masks */
4087 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4088 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4089 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4090 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4091 			return -ETIMEDOUT;
4092 
4093 		/* write CRC */
4094 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4095 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4096 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4097 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4098 			return -ETIMEDOUT;
4099 	}
4100 #undef EPIO_REG
4101 
4102 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4103 	return 0;
4104 }
4105 
4106 /**
4107  *	t4_mk_filtdelwr - create a delete filter WR
4108  *	@ftid: the filter ID
4109  *	@wr: the filter work request to populate
4110  *	@qid: ingress queue to receive the delete notification
4111  *
4112  *	Creates a filter work request to delete the supplied filter.  If @qid is
4113  *	negative the delete notification is suppressed.
4114  */
4115 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4116 {
4117 	memset(wr, 0, sizeof(*wr));
4118 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4119 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4120 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4121 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4122 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4123 	if (qid >= 0)
4124 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4125 }
4126 
4127 #define INIT_CMD(var, cmd, rd_wr) do { \
4128 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4129 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4130 	(var).retval_len16 = htonl(FW_LEN16(var)); \
4131 } while (0)
4132 
4133 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4134 {
4135 	struct fw_ldst_cmd c;
4136 
4137 	memset(&c, 0, sizeof(c));
4138 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4139 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4140 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4141 	c.u.addrval.addr = htonl(addr);
4142 	c.u.addrval.val = htonl(val);
4143 
4144 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4145 }
4146 
4147 /**
4148  *	t4_i2c_rd - read a byte from an i2c addressable device
4149  *	@adap: the adapter
4150  *	@mbox: mailbox to use for the FW command
4151  *	@port_id: the port id
4152  *	@dev_addr: the i2c device address
4153  *	@offset: the byte offset to read from
4154  *	@valp: where to store the value
4155  */
4156 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
4157 	       u8 dev_addr, u8 offset, u8 *valp)
4158 {
4159 	int ret;
4160 	struct fw_ldst_cmd c;
4161 
4162 	memset(&c, 0, sizeof(c));
4163 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4164 		F_FW_CMD_READ |
4165 		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
4166 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4167 	c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
4168 	c.u.i2c_deprecated.base = dev_addr;
4169 	c.u.i2c_deprecated.boffset = offset;
4170 
4171 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4172 	if (ret == 0)
4173 		*valp = c.u.i2c_deprecated.data;
4174 	return ret;
4175 }
4176 
4177 /**
4178  *	t4_mdio_rd - read a PHY register through MDIO
4179  *	@adap: the adapter
4180  *	@mbox: mailbox to use for the FW command
4181  *	@phy_addr: the PHY address
4182  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4183  *	@reg: the register to read
4184  *	@valp: where to store the value
4185  *
4186  *	Issues a FW command through the given mailbox to read a PHY register.
4187  */
4188 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4189 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4190 {
4191 	int ret;
4192 	struct fw_ldst_cmd c;
4193 
4194 	memset(&c, 0, sizeof(c));
4195 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4196 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4197 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4198 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4199 				   V_FW_LDST_CMD_MMD(mmd));
4200 	c.u.mdio.raddr = htons(reg);
4201 
4202 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4203 	if (ret == 0)
4204 		*valp = ntohs(c.u.mdio.rval);
4205 	return ret;
4206 }
4207 
4208 /**
4209  *	t4_mdio_wr - write a PHY register through MDIO
4210  *	@adap: the adapter
4211  *	@mbox: mailbox to use for the FW command
4212  *	@phy_addr: the PHY address
4213  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4214  *	@reg: the register to write
4215  *	@valp: value to write
4216  *
4217  *	Issues a FW command through the given mailbox to write a PHY register.
4218  */
4219 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4220 	       unsigned int mmd, unsigned int reg, unsigned int val)
4221 {
4222 	struct fw_ldst_cmd c;
4223 
4224 	memset(&c, 0, sizeof(c));
4225 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4226 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4227 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4228 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4229 				   V_FW_LDST_CMD_MMD(mmd));
4230 	c.u.mdio.raddr = htons(reg);
4231 	c.u.mdio.rval = htons(val);
4232 
4233 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4234 }
4235 
4236 /**
4237  *	t4_sge_ctxt_flush - flush the SGE context cache
4238  *	@adap: the adapter
4239  *	@mbox: mailbox to use for the FW command
4240  *
4241  *	Issues a FW command through the given mailbox to flush the
4242  *	SGE context cache.
4243  */
4244 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4245 {
4246 	int ret;
4247 	struct fw_ldst_cmd c;
4248 
4249 	memset(&c, 0, sizeof(c));
4250 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4251 			F_FW_CMD_READ |
4252 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4253 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4254 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4255 
4256 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4257 	return ret;
4258 }
4259 
4260 /**
4261  *	t4_sge_ctxt_rd - read an SGE context through FW
4262  *	@adap: the adapter
4263  *	@mbox: mailbox to use for the FW command
4264  *	@cid: the context id
4265  *	@ctype: the context type
4266  *	@data: where to store the context data
4267  *
4268  *	Issues a FW command through the given mailbox to read an SGE context.
4269  */
4270 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4271 		   enum ctxt_type ctype, u32 *data)
4272 {
4273 	int ret;
4274 	struct fw_ldst_cmd c;
4275 
4276 	if (ctype == CTXT_EGRESS)
4277 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4278 	else if (ctype == CTXT_INGRESS)
4279 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4280 	else if (ctype == CTXT_FLM)
4281 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4282 	else
4283 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4284 
4285 	memset(&c, 0, sizeof(c));
4286 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4287 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4288 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4289 	c.u.idctxt.physid = htonl(cid);
4290 
4291 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4292 	if (ret == 0) {
4293 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4294 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4295 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4296 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4297 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4298 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4299 	}
4300 	return ret;
4301 }
4302 
4303 /**
4304  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4305  *	@adap: the adapter
4306  *	@cid: the context id
4307  *	@ctype: the context type
4308  *	@data: where to store the context data
4309  *
4310  *	Reads an SGE context directly, bypassing FW.  This is only for
4311  *	debugging when FW is unavailable.
4312  */
4313 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4314 		      u32 *data)
4315 {
4316 	int i, ret;
4317 
4318 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4319 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4320 	if (!ret)
4321 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4322 			*data++ = t4_read_reg(adap, i);
4323 	return ret;
4324 }
4325 
4326 /**
4327  *	t4_fw_hello - establish communication with FW
4328  *	@adap: the adapter
4329  *	@mbox: mailbox to use for the FW command
4330  *	@evt_mbox: mailbox to receive async FW events
4331  *	@master: specifies the caller's willingness to be the device master
4332  *	@state: returns the current device state (if non-NULL)
4333  *
4334  *	Issues a command to establish communication with FW.  Returns either
4335  *	an error (negative integer) or the mailbox of the Master PF.
4336  */
4337 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4338 		enum dev_master master, enum dev_state *state)
4339 {
4340 	int ret;
4341 	struct fw_hello_cmd c;
4342 	u32 v;
4343 	unsigned int master_mbox;
4344 	int retries = FW_CMD_HELLO_RETRIES;
4345 
4346 retry:
4347 	memset(&c, 0, sizeof(c));
4348 	INIT_CMD(c, HELLO, WRITE);
4349 	c.err_to_clearinit = htonl(
4350 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4351 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4352 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4353 			M_FW_HELLO_CMD_MBMASTER) |
4354 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4355 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4356 		F_FW_HELLO_CMD_CLEARINIT);
4357 
4358 	/*
4359 	 * Issue the HELLO command to the firmware.  If it's not successful
4360 	 * but indicates that we got a "busy" or "timeout" condition, retry
4361 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4362 	 * retry limit, check to see if the firmware left us any error
4363 	 * information and report that if so ...
4364 	 */
4365 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4366 	if (ret != FW_SUCCESS) {
4367 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4368 			goto retry;
4369 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4370 			t4_report_fw_error(adap);
4371 		return ret;
4372 	}
4373 
4374 	v = ntohl(c.err_to_clearinit);
4375 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4376 	if (state) {
4377 		if (v & F_FW_HELLO_CMD_ERR)
4378 			*state = DEV_STATE_ERR;
4379 		else if (v & F_FW_HELLO_CMD_INIT)
4380 			*state = DEV_STATE_INIT;
4381 		else
4382 			*state = DEV_STATE_UNINIT;
4383 	}
4384 
4385 	/*
4386 	 * If we're not the Master PF then we need to wait around for the
4387 	 * Master PF Driver to finish setting up the adapter.
4388 	 *
4389 	 * Note that we also do this wait if we're a non-Master-capable PF and
4390 	 * there is no current Master PF; a Master PF may show up momentarily
4391 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4392 	 * OS loads lots of different drivers rapidly at the same time).  In
4393 	 * this case, the Master PF returned by the firmware will be
4394 	 * M_PCIE_FW_MASTER so the test below will work ...
4395 	 */
4396 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4397 	    master_mbox != mbox) {
4398 		int waiting = FW_CMD_HELLO_TIMEOUT;
4399 
4400 		/*
4401 		 * Wait for the firmware to either indicate an error or
4402 		 * initialized state.  If we see either of these we bail out
4403 		 * and report the issue to the caller.  If we exhaust the
4404 		 * "hello timeout" and we haven't exhausted our retries, try
4405 		 * again.  Otherwise bail with a timeout error.
4406 		 */
4407 		for (;;) {
4408 			u32 pcie_fw;
4409 
4410 			msleep(50);
4411 			waiting -= 50;
4412 
4413 			/*
4414 			 * If neither Error nor Initialialized are indicated
4415 			 * by the firmware keep waiting till we exhaust our
4416 			 * timeout ... and then retry if we haven't exhausted
4417 			 * our retries ...
4418 			 */
4419 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4420 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4421 				if (waiting <= 0) {
4422 					if (retries-- > 0)
4423 						goto retry;
4424 
4425 					return -ETIMEDOUT;
4426 				}
4427 				continue;
4428 			}
4429 
4430 			/*
4431 			 * We either have an Error or Initialized condition
4432 			 * report errors preferentially.
4433 			 */
4434 			if (state) {
4435 				if (pcie_fw & F_PCIE_FW_ERR)
4436 					*state = DEV_STATE_ERR;
4437 				else if (pcie_fw & F_PCIE_FW_INIT)
4438 					*state = DEV_STATE_INIT;
4439 			}
4440 
4441 			/*
4442 			 * If we arrived before a Master PF was selected and
4443 			 * there's not a valid Master PF, grab its identity
4444 			 * for our caller.
4445 			 */
4446 			if (master_mbox == M_PCIE_FW_MASTER &&
4447 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4448 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4449 			break;
4450 		}
4451 	}
4452 
4453 	return master_mbox;
4454 }
4455 
4456 /**
4457  *	t4_fw_bye - end communication with FW
4458  *	@adap: the adapter
4459  *	@mbox: mailbox to use for the FW command
4460  *
4461  *	Issues a command to terminate communication with FW.
4462  */
4463 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4464 {
4465 	struct fw_bye_cmd c;
4466 
4467 	memset(&c, 0, sizeof(c));
4468 	INIT_CMD(c, BYE, WRITE);
4469 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4470 }
4471 
4472 /**
4473  *	t4_fw_reset - issue a reset to FW
4474  *	@adap: the adapter
4475  *	@mbox: mailbox to use for the FW command
4476  *	@reset: specifies the type of reset to perform
4477  *
4478  *	Issues a reset command of the specified type to FW.
4479  */
4480 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4481 {
4482 	struct fw_reset_cmd c;
4483 
4484 	memset(&c, 0, sizeof(c));
4485 	INIT_CMD(c, RESET, WRITE);
4486 	c.val = htonl(reset);
4487 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4488 }
4489 
4490 /**
4491  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4492  *	@adap: the adapter
4493  *	@mbox: mailbox to use for the FW RESET command (if desired)
4494  *	@force: force uP into RESET even if FW RESET command fails
4495  *
4496  *	Issues a RESET command to firmware (if desired) with a HALT indication
4497  *	and then puts the microprocessor into RESET state.  The RESET command
4498  *	will only be issued if a legitimate mailbox is provided (mbox <=
4499  *	M_PCIE_FW_MASTER).
4500  *
4501  *	This is generally used in order for the host to safely manipulate the
4502  *	adapter without fear of conflicting with whatever the firmware might
4503  *	be doing.  The only way out of this state is to RESTART the firmware
4504  *	...
4505  */
4506 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4507 {
4508 	int ret = 0;
4509 
4510 	/*
4511 	 * If a legitimate mailbox is provided, issue a RESET command
4512 	 * with a HALT indication.
4513 	 */
4514 	if (mbox <= M_PCIE_FW_MASTER) {
4515 		struct fw_reset_cmd c;
4516 
4517 		memset(&c, 0, sizeof(c));
4518 		INIT_CMD(c, RESET, WRITE);
4519 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4520 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4521 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4522 	}
4523 
4524 	/*
4525 	 * Normally we won't complete the operation if the firmware RESET
4526 	 * command fails but if our caller insists we'll go ahead and put the
4527 	 * uP into RESET.  This can be useful if the firmware is hung or even
4528 	 * missing ...  We'll have to take the risk of putting the uP into
4529 	 * RESET without the cooperation of firmware in that case.
4530 	 *
4531 	 * We also force the firmware's HALT flag to be on in case we bypassed
4532 	 * the firmware RESET command above or we're dealing with old firmware
4533 	 * which doesn't have the HALT capability.  This will serve as a flag
4534 	 * for the incoming firmware to know that it's coming out of a HALT
4535 	 * rather than a RESET ... if it's new enough to understand that ...
4536 	 */
4537 	if (ret == 0 || force) {
4538 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4539 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4540 	}
4541 
4542 	/*
4543 	 * And we always return the result of the firmware RESET command
4544 	 * even when we force the uP into RESET ...
4545 	 */
4546 	return ret;
4547 }
4548 
4549 /**
4550  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4551  *	@adap: the adapter
4552  *	@reset: if we want to do a RESET to restart things
4553  *
4554  *	Restart firmware previously halted by t4_fw_halt().  On successful
4555  *	return the previous PF Master remains as the new PF Master and there
4556  *	is no need to issue a new HELLO command, etc.
4557  *
4558  *	We do this in two ways:
4559  *
4560  *	 1. If we're dealing with newer firmware we'll simply want to take
4561  *	    the chip's microprocessor out of RESET.  This will cause the
4562  *	    firmware to start up from its start vector.  And then we'll loop
4563  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4564  *	    reset to 0) or we timeout.
4565  *
4566  *	 2. If we're dealing with older firmware then we'll need to RESET
4567  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4568  *	    flag and automatically RESET itself on startup.
4569  */
4570 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4571 {
4572 	if (reset) {
4573 		/*
4574 		 * Since we're directing the RESET instead of the firmware
4575 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4576 		 * bit.
4577 		 */
4578 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4579 
4580 		/*
4581 		 * If we've been given a valid mailbox, first try to get the
4582 		 * firmware to do the RESET.  If that works, great and we can
4583 		 * return success.  Otherwise, if we haven't been given a
4584 		 * valid mailbox or the RESET command failed, fall back to
4585 		 * hitting the chip with a hammer.
4586 		 */
4587 		if (mbox <= M_PCIE_FW_MASTER) {
4588 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4589 			msleep(100);
4590 			if (t4_fw_reset(adap, mbox,
4591 					F_PIORST | F_PIORSTMODE) == 0)
4592 				return 0;
4593 		}
4594 
4595 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4596 		msleep(2000);
4597 	} else {
4598 		int ms;
4599 
4600 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4601 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4602 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4603 				return FW_SUCCESS;
4604 			msleep(100);
4605 			ms += 100;
4606 		}
4607 		return -ETIMEDOUT;
4608 	}
4609 	return 0;
4610 }
4611 
4612 /**
4613  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4614  *	@adap: the adapter
4615  *	@mbox: mailbox to use for the FW RESET command (if desired)
4616  *	@fw_data: the firmware image to write
4617  *	@size: image size
4618  *	@force: force upgrade even if firmware doesn't cooperate
4619  *
4620  *	Perform all of the steps necessary for upgrading an adapter's
4621  *	firmware image.  Normally this requires the cooperation of the
4622  *	existing firmware in order to halt all existing activities
4623  *	but if an invalid mailbox token is passed in we skip that step
4624  *	(though we'll still put the adapter microprocessor into RESET in
4625  *	that case).
4626  *
4627  *	On successful return the new firmware will have been loaded and
4628  *	the adapter will have been fully RESET losing all previous setup
4629  *	state.  On unsuccessful return the adapter may be completely hosed ...
4630  *	positive errno indicates that the adapter is ~probably~ intact, a
4631  *	negative errno indicates that things are looking bad ...
4632  */
4633 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4634 		  const u8 *fw_data, unsigned int size, int force)
4635 {
4636 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4637 	unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4638 	int reset, ret;
4639 
4640 	if (!bootstrap) {
4641 		ret = t4_fw_halt(adap, mbox, force);
4642 		if (ret < 0 && !force)
4643 			return ret;
4644 	}
4645 
4646 	ret = t4_load_fw(adap, fw_data, size);
4647 	if (ret < 0 || bootstrap)
4648 		return ret;
4649 
4650 	/*
4651 	 * Older versions of the firmware don't understand the new
4652 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4653 	 * restart.  So for newly loaded older firmware we'll have to do the
4654 	 * RESET for it so it starts up on a clean slate.  We can tell if
4655 	 * the newly loaded firmware will handle this right by checking
4656 	 * its header flags to see if it advertises the capability.
4657 	 */
4658 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4659 	return t4_fw_restart(adap, mbox, reset);
4660 }
4661 
4662 /**
4663  *	t4_fw_initialize - ask FW to initialize the device
4664  *	@adap: the adapter
4665  *	@mbox: mailbox to use for the FW command
4666  *
4667  *	Issues a command to FW to partially initialize the device.  This
4668  *	performs initialization that generally doesn't depend on user input.
4669  */
4670 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4671 {
4672 	struct fw_initialize_cmd c;
4673 
4674 	memset(&c, 0, sizeof(c));
4675 	INIT_CMD(c, INITIALIZE, WRITE);
4676 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4677 }
4678 
4679 /**
4680  *	t4_query_params - query FW or device parameters
4681  *	@adap: the adapter
4682  *	@mbox: mailbox to use for the FW command
4683  *	@pf: the PF
4684  *	@vf: the VF
4685  *	@nparams: the number of parameters
4686  *	@params: the parameter names
4687  *	@val: the parameter values
4688  *
4689  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4690  *	queried at once.
4691  */
4692 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4693 		    unsigned int vf, unsigned int nparams, const u32 *params,
4694 		    u32 *val)
4695 {
4696 	int i, ret;
4697 	struct fw_params_cmd c;
4698 	__be32 *p = &c.param[0].mnem;
4699 
4700 	if (nparams > 7)
4701 		return -EINVAL;
4702 
4703 	memset(&c, 0, sizeof(c));
4704 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4705 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4706 			    V_FW_PARAMS_CMD_VFN(vf));
4707 	c.retval_len16 = htonl(FW_LEN16(c));
4708 
4709 	for (i = 0; i < nparams; i++, p += 2, params++)
4710 		*p = htonl(*params);
4711 
4712 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4713 	if (ret == 0)
4714 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4715 			*val++ = ntohl(*p);
4716 	return ret;
4717 }
4718 
4719 /**
4720  *	t4_set_params - sets FW or device parameters
4721  *	@adap: the adapter
4722  *	@mbox: mailbox to use for the FW command
4723  *	@pf: the PF
4724  *	@vf: the VF
4725  *	@nparams: the number of parameters
4726  *	@params: the parameter names
4727  *	@val: the parameter values
4728  *
4729  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4730  *	specified at once.
4731  */
4732 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4733 		  unsigned int vf, unsigned int nparams, const u32 *params,
4734 		  const u32 *val)
4735 {
4736 	struct fw_params_cmd c;
4737 	__be32 *p = &c.param[0].mnem;
4738 
4739 	if (nparams > 7)
4740 		return -EINVAL;
4741 
4742 	memset(&c, 0, sizeof(c));
4743 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4744 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4745 			    V_FW_PARAMS_CMD_VFN(vf));
4746 	c.retval_len16 = htonl(FW_LEN16(c));
4747 
4748 	while (nparams--) {
4749 		*p++ = htonl(*params);
4750 		params++;
4751 		*p++ = htonl(*val);
4752 		val++;
4753 	}
4754 
4755 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4756 }
4757 
4758 /**
4759  *	t4_cfg_pfvf - configure PF/VF resource limits
4760  *	@adap: the adapter
4761  *	@mbox: mailbox to use for the FW command
4762  *	@pf: the PF being configured
4763  *	@vf: the VF being configured
4764  *	@txq: the max number of egress queues
4765  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4766  *	@rxqi: the max number of interrupt-capable ingress queues
4767  *	@rxq: the max number of interruptless ingress queues
4768  *	@tc: the PCI traffic class
4769  *	@vi: the max number of virtual interfaces
4770  *	@cmask: the channel access rights mask for the PF/VF
4771  *	@pmask: the port access rights mask for the PF/VF
4772  *	@nexact: the maximum number of exact MPS filters
4773  *	@rcaps: read capabilities
4774  *	@wxcaps: write/execute capabilities
4775  *
4776  *	Configures resource limits and capabilities for a physical or virtual
4777  *	function.
4778  */
4779 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4780 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4781 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4782 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4783 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4784 {
4785 	struct fw_pfvf_cmd c;
4786 
4787 	memset(&c, 0, sizeof(c));
4788 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4789 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4790 			    V_FW_PFVF_CMD_VFN(vf));
4791 	c.retval_len16 = htonl(FW_LEN16(c));
4792 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4793 			       V_FW_PFVF_CMD_NIQ(rxq));
4794 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4795 			      V_FW_PFVF_CMD_PMASK(pmask) |
4796 			      V_FW_PFVF_CMD_NEQ(txq));
4797 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4798 				V_FW_PFVF_CMD_NEXACTF(nexact));
4799 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4800 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4801 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4802 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4803 }
4804 
4805 /**
4806  *	t4_alloc_vi_func - allocate a virtual interface
4807  *	@adap: the adapter
4808  *	@mbox: mailbox to use for the FW command
4809  *	@port: physical port associated with the VI
4810  *	@pf: the PF owning the VI
4811  *	@vf: the VF owning the VI
4812  *	@nmac: number of MAC addresses needed (1 to 5)
4813  *	@mac: the MAC addresses of the VI
4814  *	@rss_size: size of RSS table slice associated with this VI
4815  *	@portfunc: which Port Application Function MAC Address is desired
4816  *	@idstype: Intrusion Detection Type
4817  *
4818  *	Allocates a virtual interface for the given physical port.  If @mac is
4819  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4820  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4821  *	stored consecutively so the space needed is @nmac * 6 bytes.
4822  *	Returns a negative error number or the non-negative VI id.
4823  */
4824 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4825 		     unsigned int port, unsigned int pf, unsigned int vf,
4826 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4827 		     unsigned int portfunc, unsigned int idstype)
4828 {
4829 	int ret;
4830 	struct fw_vi_cmd c;
4831 
4832 	memset(&c, 0, sizeof(c));
4833 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4834 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4835 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4836 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4837 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4838 			       V_FW_VI_CMD_FUNC(portfunc));
4839 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4840 	c.nmac = nmac - 1;
4841 
4842 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4843 	if (ret)
4844 		return ret;
4845 
4846 	if (mac) {
4847 		memcpy(mac, c.mac, sizeof(c.mac));
4848 		switch (nmac) {
4849 		case 5:
4850 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4851 		case 4:
4852 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4853 		case 3:
4854 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4855 		case 2:
4856 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4857 		}
4858 	}
4859 	if (rss_size)
4860 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4861 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4862 }
4863 
4864 /**
4865  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4866  *	@adap: the adapter
4867  *	@mbox: mailbox to use for the FW command
4868  *	@port: physical port associated with the VI
4869  *	@pf: the PF owning the VI
4870  *	@vf: the VF owning the VI
4871  *	@nmac: number of MAC addresses needed (1 to 5)
4872  *	@mac: the MAC addresses of the VI
4873  *	@rss_size: size of RSS table slice associated with this VI
4874  *
4875  *	backwards compatible and convieniance routine to allocate a Virtual
4876  *	Interface with a Ethernet Port Application Function and Intrustion
4877  *	Detection System disabled.
4878  */
4879 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4880 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4881 		unsigned int *rss_size)
4882 {
4883 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4884 				FW_VI_FUNC_ETH, 0);
4885 }
4886 
4887 /**
4888  *	t4_free_vi - free a virtual interface
4889  *	@adap: the adapter
4890  *	@mbox: mailbox to use for the FW command
4891  *	@pf: the PF owning the VI
4892  *	@vf: the VF owning the VI
4893  *	@viid: virtual interface identifiler
4894  *
4895  *	Free a previously allocated virtual interface.
4896  */
4897 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4898 	       unsigned int vf, unsigned int viid)
4899 {
4900 	struct fw_vi_cmd c;
4901 
4902 	memset(&c, 0, sizeof(c));
4903 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4904 			    F_FW_CMD_REQUEST |
4905 			    F_FW_CMD_EXEC |
4906 			    V_FW_VI_CMD_PFN(pf) |
4907 			    V_FW_VI_CMD_VFN(vf));
4908 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4909 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4910 
4911 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4912 }
4913 
4914 /**
4915  *	t4_set_rxmode - set Rx properties of a virtual interface
4916  *	@adap: the adapter
4917  *	@mbox: mailbox to use for the FW command
4918  *	@viid: the VI id
4919  *	@mtu: the new MTU or -1
4920  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4921  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4922  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4923  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4924  *	@sleep_ok: if true we may sleep while awaiting command completion
4925  *
4926  *	Sets Rx properties of a virtual interface.
4927  */
4928 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4929 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4930 		  bool sleep_ok)
4931 {
4932 	struct fw_vi_rxmode_cmd c;
4933 
4934 	/* convert to FW values */
4935 	if (mtu < 0)
4936 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4937 	if (promisc < 0)
4938 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4939 	if (all_multi < 0)
4940 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4941 	if (bcast < 0)
4942 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4943 	if (vlanex < 0)
4944 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4945 
4946 	memset(&c, 0, sizeof(c));
4947 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4948 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4949 	c.retval_len16 = htonl(FW_LEN16(c));
4950 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4951 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4952 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4953 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4954 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4955 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4956 }
4957 
4958 /**
4959  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4960  *	@adap: the adapter
4961  *	@mbox: mailbox to use for the FW command
4962  *	@viid: the VI id
4963  *	@free: if true any existing filters for this VI id are first removed
4964  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4965  *	@addr: the MAC address(es)
4966  *	@idx: where to store the index of each allocated filter
4967  *	@hash: pointer to hash address filter bitmap
4968  *	@sleep_ok: call is allowed to sleep
4969  *
4970  *	Allocates an exact-match filter for each of the supplied addresses and
4971  *	sets it to the corresponding address.  If @idx is not %NULL it should
4972  *	have at least @naddr entries, each of which will be set to the index of
4973  *	the filter allocated for the corresponding MAC address.  If a filter
4974  *	could not be allocated for an address its index is set to 0xffff.
4975  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4976  *	are hashed and update the hash filter bitmap pointed at by @hash.
4977  *
4978  *	Returns a negative error number or the number of filters allocated.
4979  */
4980 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4981 		      unsigned int viid, bool free, unsigned int naddr,
4982 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4983 {
4984 	int offset, ret = 0;
4985 	struct fw_vi_mac_cmd c;
4986 	unsigned int nfilters = 0;
4987 	unsigned int max_naddr = is_t4(adap) ?
4988 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
4989 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4990 	unsigned int rem = naddr;
4991 
4992 	if (naddr > max_naddr)
4993 		return -EINVAL;
4994 
4995 	for (offset = 0; offset < naddr ; /**/) {
4996 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4997 					 ? rem
4998 					 : ARRAY_SIZE(c.u.exact));
4999 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5000 						     u.exact[fw_naddr]), 16);
5001 		struct fw_vi_mac_exact *p;
5002 		int i;
5003 
5004 		memset(&c, 0, sizeof(c));
5005 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5006 				     F_FW_CMD_REQUEST |
5007 				     F_FW_CMD_WRITE |
5008 				     V_FW_CMD_EXEC(free) |
5009 				     V_FW_VI_MAC_CMD_VIID(viid));
5010 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5011 					    V_FW_CMD_LEN16(len16));
5012 
5013 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5014 			p->valid_to_idx = htons(
5015 				F_FW_VI_MAC_CMD_VALID |
5016 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5017 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5018 		}
5019 
5020 		/*
5021 		 * It's okay if we run out of space in our MAC address arena.
5022 		 * Some of the addresses we submit may get stored so we need
5023 		 * to run through the reply to see what the results were ...
5024 		 */
5025 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5026 		if (ret && ret != -FW_ENOMEM)
5027 			break;
5028 
5029 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5030 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5031 
5032 			if (idx)
5033 				idx[offset+i] = (index >=  max_naddr
5034 						 ? 0xffff
5035 						 : index);
5036 			if (index < max_naddr)
5037 				nfilters++;
5038 			else if (hash)
5039 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5040 		}
5041 
5042 		free = false;
5043 		offset += fw_naddr;
5044 		rem -= fw_naddr;
5045 	}
5046 
5047 	if (ret == 0 || ret == -FW_ENOMEM)
5048 		ret = nfilters;
5049 	return ret;
5050 }
5051 
5052 /**
5053  *	t4_change_mac - modifies the exact-match filter for a MAC address
5054  *	@adap: the adapter
5055  *	@mbox: mailbox to use for the FW command
5056  *	@viid: the VI id
5057  *	@idx: index of existing filter for old value of MAC address, or -1
5058  *	@addr: the new MAC address value
5059  *	@persist: whether a new MAC allocation should be persistent
5060  *	@add_smt: if true also add the address to the HW SMT
5061  *
5062  *	Modifies an exact-match filter and sets it to the new MAC address if
5063  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5064  *	latter case the address is added persistently if @persist is %true.
5065  *
5066  *	Note that in general it is not possible to modify the value of a given
5067  *	filter so the generic way to modify an address filter is to free the one
5068  *	being used by the old address value and allocate a new filter for the
5069  *	new address value.
5070  *
5071  *	Returns a negative error number or the index of the filter with the new
5072  *	MAC value.  Note that this index may differ from @idx.
5073  */
5074 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5075 		  int idx, const u8 *addr, bool persist, bool add_smt)
5076 {
5077 	int ret, mode;
5078 	struct fw_vi_mac_cmd c;
5079 	struct fw_vi_mac_exact *p = c.u.exact;
5080 	unsigned int max_mac_addr = is_t4(adap) ?
5081 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5082 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5083 
5084 	if (idx < 0)                             /* new allocation */
5085 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5086 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5087 
5088 	memset(&c, 0, sizeof(c));
5089 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5090 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5091 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5092 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5093 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5094 				V_FW_VI_MAC_CMD_IDX(idx));
5095 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5096 
5097 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5098 	if (ret == 0) {
5099 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5100 		if (ret >= max_mac_addr)
5101 			ret = -ENOMEM;
5102 	}
5103 	return ret;
5104 }
5105 
5106 /**
5107  *	t4_set_addr_hash - program the MAC inexact-match hash filter
5108  *	@adap: the adapter
5109  *	@mbox: mailbox to use for the FW command
5110  *	@viid: the VI id
5111  *	@ucast: whether the hash filter should also match unicast addresses
5112  *	@vec: the value to be written to the hash filter
5113  *	@sleep_ok: call is allowed to sleep
5114  *
5115  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5116  */
5117 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5118 		     bool ucast, u64 vec, bool sleep_ok)
5119 {
5120 	struct fw_vi_mac_cmd c;
5121 
5122 	memset(&c, 0, sizeof(c));
5123 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5124 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5125 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5126 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5127 				    V_FW_CMD_LEN16(1));
5128 	c.u.hash.hashvec = cpu_to_be64(vec);
5129 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5130 }
5131 
5132 /**
5133  *	t4_enable_vi - enable/disable a virtual interface
5134  *	@adap: the adapter
5135  *	@mbox: mailbox to use for the FW command
5136  *	@viid: the VI id
5137  *	@rx_en: 1=enable Rx, 0=disable Rx
5138  *	@tx_en: 1=enable Tx, 0=disable Tx
5139  *
5140  *	Enables/disables a virtual interface.
5141  */
5142 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5143 		 bool rx_en, bool tx_en)
5144 {
5145 	struct fw_vi_enable_cmd c;
5146 
5147 	memset(&c, 0, sizeof(c));
5148 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5149 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5150 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5151 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5152 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5153 }
5154 
5155 /**
5156  *	t4_identify_port - identify a VI's port by blinking its LED
5157  *	@adap: the adapter
5158  *	@mbox: mailbox to use for the FW command
5159  *	@viid: the VI id
5160  *	@nblinks: how many times to blink LED at 2.5 Hz
5161  *
5162  *	Identifies a VI's port by blinking its LED.
5163  */
5164 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5165 		     unsigned int nblinks)
5166 {
5167 	struct fw_vi_enable_cmd c;
5168 
5169 	memset(&c, 0, sizeof(c));
5170 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5171 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5172 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5173 	c.blinkdur = htons(nblinks);
5174 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5175 }
5176 
5177 /**
5178  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
5179  *	@adap: the adapter
5180  *	@mbox: mailbox to use for the FW command
5181  *	@start: %true to enable the queues, %false to disable them
5182  *	@pf: the PF owning the queues
5183  *	@vf: the VF owning the queues
5184  *	@iqid: ingress queue id
5185  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5186  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5187  *
5188  *	Starts or stops an ingress queue and its associated FLs, if any.
5189  */
5190 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5191 		     unsigned int pf, unsigned int vf, unsigned int iqid,
5192 		     unsigned int fl0id, unsigned int fl1id)
5193 {
5194 	struct fw_iq_cmd c;
5195 
5196 	memset(&c, 0, sizeof(c));
5197 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5198 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5199 			    V_FW_IQ_CMD_VFN(vf));
5200 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5201 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5202 	c.iqid = htons(iqid);
5203 	c.fl0id = htons(fl0id);
5204 	c.fl1id = htons(fl1id);
5205 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5206 }
5207 
5208 /**
5209  *	t4_iq_free - free an ingress queue and its FLs
5210  *	@adap: the adapter
5211  *	@mbox: mailbox to use for the FW command
5212  *	@pf: the PF owning the queues
5213  *	@vf: the VF owning the queues
5214  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5215  *	@iqid: ingress queue id
5216  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5217  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5218  *
5219  *	Frees an ingress queue and its associated FLs, if any.
5220  */
5221 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5222 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5223 	       unsigned int fl0id, unsigned int fl1id)
5224 {
5225 	struct fw_iq_cmd c;
5226 
5227 	memset(&c, 0, sizeof(c));
5228 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5229 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5230 			    V_FW_IQ_CMD_VFN(vf));
5231 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5232 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5233 	c.iqid = htons(iqid);
5234 	c.fl0id = htons(fl0id);
5235 	c.fl1id = htons(fl1id);
5236 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5237 }
5238 
5239 /**
5240  *	t4_eth_eq_free - free an Ethernet egress queue
5241  *	@adap: the adapter
5242  *	@mbox: mailbox to use for the FW command
5243  *	@pf: the PF owning the queue
5244  *	@vf: the VF owning the queue
5245  *	@eqid: egress queue id
5246  *
5247  *	Frees an Ethernet egress queue.
5248  */
5249 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5250 		   unsigned int vf, unsigned int eqid)
5251 {
5252 	struct fw_eq_eth_cmd c;
5253 
5254 	memset(&c, 0, sizeof(c));
5255 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5256 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5257 			    V_FW_EQ_ETH_CMD_VFN(vf));
5258 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5259 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5260 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5261 }
5262 
5263 /**
5264  *	t4_ctrl_eq_free - free a control egress queue
5265  *	@adap: the adapter
5266  *	@mbox: mailbox to use for the FW command
5267  *	@pf: the PF owning the queue
5268  *	@vf: the VF owning the queue
5269  *	@eqid: egress queue id
5270  *
5271  *	Frees a control egress queue.
5272  */
5273 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5274 		    unsigned int vf, unsigned int eqid)
5275 {
5276 	struct fw_eq_ctrl_cmd c;
5277 
5278 	memset(&c, 0, sizeof(c));
5279 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5280 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5281 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5282 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5283 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5284 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5285 }
5286 
5287 /**
5288  *	t4_ofld_eq_free - free an offload egress queue
5289  *	@adap: the adapter
5290  *	@mbox: mailbox to use for the FW command
5291  *	@pf: the PF owning the queue
5292  *	@vf: the VF owning the queue
5293  *	@eqid: egress queue id
5294  *
5295  *	Frees a control egress queue.
5296  */
5297 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5298 		    unsigned int vf, unsigned int eqid)
5299 {
5300 	struct fw_eq_ofld_cmd c;
5301 
5302 	memset(&c, 0, sizeof(c));
5303 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5304 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5305 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5306 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5307 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5308 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5309 }
5310 
5311 /**
5312  *	t4_handle_fw_rpl - process a FW reply message
5313  *	@adap: the adapter
5314  *	@rpl: start of the FW message
5315  *
5316  *	Processes a FW message, such as link state change messages.
5317  */
5318 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5319 {
5320 	u8 opcode = *(const u8 *)rpl;
5321 	const struct fw_port_cmd *p = (const void *)rpl;
5322 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5323 
5324 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5325 		/* link/module state change message */
5326 		int speed = 0, fc = 0, i;
5327 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5328 		struct port_info *pi = NULL;
5329 		struct link_config *lc;
5330 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5331 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5332 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5333 
5334 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5335 			fc |= PAUSE_RX;
5336 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5337 			fc |= PAUSE_TX;
5338 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5339 			speed = SPEED_100;
5340 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5341 			speed = SPEED_1000;
5342 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5343 			speed = SPEED_10000;
5344 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5345 			speed = SPEED_40000;
5346 
5347 		for_each_port(adap, i) {
5348 			pi = adap2pinfo(adap, i);
5349 			if (pi->tx_chan == chan)
5350 				break;
5351 		}
5352 		lc = &pi->link_cfg;
5353 
5354 		if (link_ok != lc->link_ok || speed != lc->speed ||
5355 		    fc != lc->fc) {                    /* something changed */
5356 			int reason;
5357 
5358 			if (!link_ok && lc->link_ok)
5359 				reason = G_FW_PORT_CMD_LINKDNRC(stat);
5360 			else
5361 				reason = -1;
5362 
5363 			lc->link_ok = link_ok;
5364 			lc->speed = speed;
5365 			lc->fc = fc;
5366 			lc->supported = ntohs(p->u.info.pcap);
5367 			t4_os_link_changed(adap, i, link_ok, reason);
5368 		}
5369 		if (mod != pi->mod_type) {
5370 			pi->mod_type = mod;
5371 			t4_os_portmod_changed(adap, i);
5372 		}
5373 	} else {
5374 		CH_WARN_RATELIMIT(adap,
5375 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5376 		return -EINVAL;
5377 	}
5378 	return 0;
5379 }
5380 
5381 /**
5382  *	get_pci_mode - determine a card's PCI mode
5383  *	@adapter: the adapter
5384  *	@p: where to store the PCI settings
5385  *
5386  *	Determines a card's PCI mode and associated parameters, such as speed
5387  *	and width.
5388  */
5389 static void __devinit get_pci_mode(struct adapter *adapter,
5390 				   struct pci_params *p)
5391 {
5392 	u16 val;
5393 	u32 pcie_cap;
5394 
5395 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5396 	if (pcie_cap) {
5397 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5398 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5399 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5400 	}
5401 }
5402 
5403 /**
5404  *	init_link_config - initialize a link's SW state
5405  *	@lc: structure holding the link state
5406  *	@caps: link capabilities
5407  *
5408  *	Initializes the SW state maintained for each link, including the link's
5409  *	capabilities and default speed/flow-control/autonegotiation settings.
5410  */
5411 static void __devinit init_link_config(struct link_config *lc,
5412 				       unsigned int caps)
5413 {
5414 	lc->supported = caps;
5415 	lc->requested_speed = 0;
5416 	lc->speed = 0;
5417 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5418 	if (lc->supported & FW_PORT_CAP_ANEG) {
5419 		lc->advertising = lc->supported & ADVERT_MASK;
5420 		lc->autoneg = AUTONEG_ENABLE;
5421 		lc->requested_fc |= PAUSE_AUTONEG;
5422 	} else {
5423 		lc->advertising = 0;
5424 		lc->autoneg = AUTONEG_DISABLE;
5425 	}
5426 }
5427 
5428 static int __devinit get_flash_params(struct adapter *adapter)
5429 {
5430 	int ret;
5431 	u32 info = 0;
5432 
5433 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5434 	if (!ret)
5435 		ret = sf1_read(adapter, 3, 0, 1, &info);
5436 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5437 	if (ret < 0)
5438 		return ret;
5439 
5440 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5441 		return -EINVAL;
5442 	info >>= 16;                           /* log2 of size */
5443 	if (info >= 0x14 && info < 0x18)
5444 		adapter->params.sf_nsec = 1 << (info - 16);
5445 	else if (info == 0x18)
5446 		adapter->params.sf_nsec = 64;
5447 	else
5448 		return -EINVAL;
5449 	adapter->params.sf_size = 1 << info;
5450 	return 0;
5451 }
5452 
5453 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5454 						  u8 range)
5455 {
5456 	u16 val;
5457 	u32 pcie_cap;
5458 
5459 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5460 	if (pcie_cap) {
5461 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5462 		val &= 0xfff0;
5463 		val |= range ;
5464 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5465 	}
5466 }
5467 
5468 /**
5469  *	t4_prep_adapter - prepare SW and HW for operation
5470  *	@adapter: the adapter
5471  *	@reset: if true perform a HW reset
5472  *
5473  *	Initialize adapter SW state for the various HW modules, set initial
5474  *	values for some adapter tunables, take PHYs out of reset, and
5475  *	initialize the MDIO interface.
5476  */
5477 int __devinit t4_prep_adapter(struct adapter *adapter)
5478 {
5479 	int ret;
5480 	uint16_t device_id;
5481 	uint32_t pl_rev;
5482 
5483 	get_pci_mode(adapter, &adapter->params.pci);
5484 
5485 	pl_rev = t4_read_reg(adapter, A_PL_REV);
5486 	adapter->params.chipid = G_CHIPID(pl_rev);
5487 	adapter->params.rev = G_REV(pl_rev);
5488 	if (adapter->params.chipid == 0) {
5489 		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5490 		adapter->params.chipid = CHELSIO_T4;
5491 
5492 		/* T4A1 chip is not supported */
5493 		if (adapter->params.rev == 1) {
5494 			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5495 			return -EINVAL;
5496 		}
5497 	}
5498 	adapter->params.pci.vpd_cap_addr =
5499 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5500 
5501 	ret = get_flash_params(adapter);
5502 	if (ret < 0)
5503 		return ret;
5504 
5505 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5506 	if (ret < 0)
5507 		return ret;
5508 
5509 	/* Cards with real ASICs have the chipid in the PCIe device id */
5510 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5511 	if (device_id >> 12 == adapter->params.chipid)
5512 		adapter->params.cim_la_size = CIMLA_SIZE;
5513 	else {
5514 		/* FPGA */
5515 		adapter->params.fpga = 1;
5516 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5517 	}
5518 
5519 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5520 
5521 	/*
5522 	 * Default port and clock for debugging in case we can't reach FW.
5523 	 */
5524 	adapter->params.nports = 1;
5525 	adapter->params.portvec = 1;
5526 	adapter->params.vpd.cclk = 50000;
5527 
5528 	/* Set pci completion timeout value to 4 seconds. */
5529 	set_pcie_completion_timeout(adapter, 0xd);
5530 	return 0;
5531 }
5532 
5533 /**
5534  *	t4_init_tp_params - initialize adap->params.tp
5535  *	@adap: the adapter
5536  *
5537  *	Initialize various fields of the adapter's TP Parameters structure.
5538  */
5539 int __devinit t4_init_tp_params(struct adapter *adap)
5540 {
5541 	int chan;
5542 	u32 v;
5543 
5544 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5545 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
5546 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5547 
5548 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5549 	for (chan = 0; chan < NCHAN; chan++)
5550 		adap->params.tp.tx_modq[chan] = chan;
5551 
5552 	/*
5553 	 * Cache the adapter's Compressed Filter Mode and global Incress
5554 	 * Configuration.
5555 	 */
5556         t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5557                          &adap->params.tp.vlan_pri_map, 1,
5558                          A_TP_VLAN_PRI_MAP);
5559 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5560 			 &adap->params.tp.ingress_config, 1,
5561 			 A_TP_INGRESS_CONFIG);
5562 
5563 	/*
5564 	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5565 	 * shift positions of several elements of the Compressed Filter Tuple
5566 	 * for this adapter which we need frequently ...
5567 	 */
5568 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5569 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5570 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5571 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
5572 
5573 	/*
5574 	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5575 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
5576 	 */
5577 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
5578 		adap->params.tp.vnic_shift = -1;
5579 
5580 	return 0;
5581 }
5582 
5583 /**
5584  *	t4_filter_field_shift - calculate filter field shift
5585  *	@adap: the adapter
5586  *	@filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5587  *
5588  *	Return the shift position of a filter field within the Compressed
5589  *	Filter Tuple.  The filter field is specified via its selection bit
5590  *	within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
5591  */
5592 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5593 {
5594 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5595 	unsigned int sel;
5596 	int field_shift;
5597 
5598 	if ((filter_mode & filter_sel) == 0)
5599 		return -1;
5600 
5601 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5602 	    switch (filter_mode & sel) {
5603 		case F_FCOE:          field_shift += W_FT_FCOE;          break;
5604 		case F_PORT:          field_shift += W_FT_PORT;          break;
5605 		case F_VNIC_ID:       field_shift += W_FT_VNIC_ID;       break;
5606 		case F_VLAN:          field_shift += W_FT_VLAN;          break;
5607 		case F_TOS:           field_shift += W_FT_TOS;           break;
5608 		case F_PROTOCOL:      field_shift += W_FT_PROTOCOL;      break;
5609 		case F_ETHERTYPE:     field_shift += W_FT_ETHERTYPE;     break;
5610 		case F_MACMATCH:      field_shift += W_FT_MACMATCH;      break;
5611 		case F_MPSHITTYPE:    field_shift += W_FT_MPSHITTYPE;    break;
5612 		case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5613 	    }
5614 	}
5615 	return field_shift;
5616 }
5617 
5618 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5619 {
5620 	u8 addr[6];
5621 	int ret, i, j;
5622 	struct fw_port_cmd c;
5623 	unsigned int rss_size;
5624 	adapter_t *adap = p->adapter;
5625 
5626 	memset(&c, 0, sizeof(c));
5627 
5628 	for (i = 0, j = -1; i <= p->port_id; i++) {
5629 		do {
5630 			j++;
5631 		} while ((adap->params.portvec & (1 << j)) == 0);
5632 	}
5633 
5634 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5635 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5636 			       V_FW_PORT_CMD_PORTID(j));
5637 	c.action_to_len16 = htonl(
5638 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5639 		FW_LEN16(c));
5640 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5641 	if (ret)
5642 		return ret;
5643 
5644 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5645 	if (ret < 0)
5646 		return ret;
5647 
5648 	p->viid = ret;
5649 	p->tx_chan = j;
5650 	p->lport = j;
5651 	p->rss_size = rss_size;
5652 	t4_os_set_hw_addr(adap, p->port_id, addr);
5653 
5654 	ret = ntohl(c.u.info.lstatus_to_modtype);
5655 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5656 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5657 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5658 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5659 
5660 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5661 
5662 	return 0;
5663 }
5664