xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision 342af4d5efec74bb4bc11261fdd9991c53616f54)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 
35 #include "common.h"
36 #include "t4_regs.h"
37 #include "t4_regs_values.h"
38 #include "firmware/t4fw_interface.h"
39 
40 #undef msleep
41 #define msleep(x) do { \
42 	if (cold) \
43 		DELAY((x) * 1000); \
44 	else \
45 		pause("t4hw", (x) * hz / 1000); \
46 } while (0)
47 
48 /**
49  *	t4_wait_op_done_val - wait until an operation is completed
50  *	@adapter: the adapter performing the operation
51  *	@reg: the register to check for completion
52  *	@mask: a single-bit field within @reg that indicates completion
53  *	@polarity: the value of the field when the operation is completed
54  *	@attempts: number of check iterations
55  *	@delay: delay in usecs between iterations
56  *	@valp: where to store the value of the register at completion time
57  *
58  *	Wait until an operation is completed by checking a bit in a register
59  *	up to @attempts times.  If @valp is not NULL the value of the register
60  *	at the time it indicated completion is stored there.  Returns 0 if the
61  *	operation completes and	-EAGAIN	otherwise.
62  */
63 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
64 			       int polarity, int attempts, int delay, u32 *valp)
65 {
66 	while (1) {
67 		u32 val = t4_read_reg(adapter, reg);
68 
69 		if (!!(val & mask) == polarity) {
70 			if (valp)
71 				*valp = val;
72 			return 0;
73 		}
74 		if (--attempts == 0)
75 			return -EAGAIN;
76 		if (delay)
77 			udelay(delay);
78 	}
79 }
80 
81 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
82 				  int polarity, int attempts, int delay)
83 {
84 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
85 				   delay, NULL);
86 }
87 
88 /**
89  *	t4_set_reg_field - set a register field to a value
90  *	@adapter: the adapter to program
91  *	@addr: the register address
92  *	@mask: specifies the portion of the register to modify
93  *	@val: the new value for the register field
94  *
95  *	Sets a register field specified by the supplied mask to the
96  *	given value.
97  */
98 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
99 		      u32 val)
100 {
101 	u32 v = t4_read_reg(adapter, addr) & ~mask;
102 
103 	t4_write_reg(adapter, addr, v | val);
104 	(void) t4_read_reg(adapter, addr);      /* flush */
105 }
106 
107 /**
108  *	t4_read_indirect - read indirectly addressed registers
109  *	@adap: the adapter
110  *	@addr_reg: register holding the indirect address
111  *	@data_reg: register holding the value of the indirect register
112  *	@vals: where the read register values are stored
113  *	@nregs: how many indirect registers to read
114  *	@start_idx: index of first indirect register to read
115  *
116  *	Reads registers that are accessed indirectly through an address/data
117  *	register pair.
118  */
119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
121 		      unsigned int start_idx)
122 {
123 	while (nregs--) {
124 		t4_write_reg(adap, addr_reg, start_idx);
125 		*vals++ = t4_read_reg(adap, data_reg);
126 		start_idx++;
127 	}
128 }
129 
130 /**
131  *	t4_write_indirect - write indirectly addressed registers
132  *	@adap: the adapter
133  *	@addr_reg: register holding the indirect addresses
134  *	@data_reg: register holding the value for the indirect registers
135  *	@vals: values to write
136  *	@nregs: how many indirect registers to write
137  *	@start_idx: address of first indirect register to write
138  *
139  *	Writes a sequential block of registers that are accessed indirectly
140  *	through an address/data register pair.
141  */
142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
143 		       unsigned int data_reg, const u32 *vals,
144 		       unsigned int nregs, unsigned int start_idx)
145 {
146 	while (nregs--) {
147 		t4_write_reg(adap, addr_reg, start_idx++);
148 		t4_write_reg(adap, data_reg, *vals++);
149 	}
150 }
151 
152 /*
153  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
154  * mechanism.  This guarantees that we get the real value even if we're
155  * operating within a Virtual Machine and the Hypervisor is trapping our
156  * Configuration Space accesses.
157  */
158 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
159 {
160 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
161 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
162 		     V_REGISTER(reg));
163 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
164 }
165 
166 /*
167  *	t4_report_fw_error - report firmware error
168  *	@adap: the adapter
169  *
170  *	The adapter firmware can indicate error conditions to the host.
171  *	This routine prints out the reason for the firmware error (as
172  *	reported by the firmware).
173  */
174 static void t4_report_fw_error(struct adapter *adap)
175 {
176 	static const char *reason[] = {
177 		"Crash",			/* PCIE_FW_EVAL_CRASH */
178 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
179 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
180 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
181 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
182 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
183 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
184 		"Reserved",			/* reserved */
185 	};
186 	u32 pcie_fw;
187 
188 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
189 	if (pcie_fw & F_PCIE_FW_ERR)
190 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
191 		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
192 }
193 
194 /*
195  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
196  */
197 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
198 			 u32 mbox_addr)
199 {
200 	for ( ; nflit; nflit--, mbox_addr += 8)
201 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
202 }
203 
204 /*
205  * Handle a FW assertion reported in a mailbox.
206  */
207 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
208 {
209 	struct fw_debug_cmd asrt;
210 
211 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
212 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
213 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
214 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
215 }
216 
217 #define X_CIM_PF_NOACCESS 0xeeeeeeee
218 /**
219  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
220  *	@adap: the adapter
221  *	@mbox: index of the mailbox to use
222  *	@cmd: the command to write
223  *	@size: command length in bytes
224  *	@rpl: where to optionally store the reply
225  *	@sleep_ok: if true we may sleep while awaiting command completion
226  *
227  *	Sends the given command to FW through the selected mailbox and waits
228  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
229  *	store the FW's reply to the command.  The command and its optional
230  *	reply are of the same length.  Some FW commands like RESET and
231  *	INITIALIZE can take a considerable amount of time to execute.
232  *	@sleep_ok determines whether we may sleep while awaiting the response.
233  *	If sleeping is allowed we use progressive backoff otherwise we spin.
234  *
235  *	The return value is 0 on success or a negative errno on failure.  A
236  *	failure can happen either because we are not able to execute the
237  *	command or FW executes it but signals an error.  In the latter case
238  *	the return value is the error code indicated by FW (negated).
239  */
240 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
241 		    void *rpl, bool sleep_ok)
242 {
243 	/*
244 	 * We delay in small increments at first in an effort to maintain
245 	 * responsiveness for simple, fast executing commands but then back
246 	 * off to larger delays to a maximum retry delay.
247 	 */
248 	static const int delay[] = {
249 		1, 1, 3, 5, 10, 10, 20, 50, 100
250 	};
251 
252 	u32 v;
253 	u64 res;
254 	int i, ms, delay_idx;
255 	const __be64 *p = cmd;
256 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
257 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
258 
259 	if ((size & 15) || size > MBOX_LEN)
260 		return -EINVAL;
261 
262 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
263 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
264 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
265 
266 	if (v != X_MBOWNER_PL)
267 		return v ? -EBUSY : -ETIMEDOUT;
268 
269 	for (i = 0; i < size; i += 8, p++)
270 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
271 
272 	CH_DUMP_MBOX(adap, mbox, data_reg);
273 
274 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
275 	t4_read_reg(adap, ctl_reg);          /* flush write */
276 
277 	delay_idx = 0;
278 	ms = delay[0];
279 
280 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
281 		if (sleep_ok) {
282 			ms = delay[delay_idx];  /* last element may repeat */
283 			if (delay_idx < ARRAY_SIZE(delay) - 1)
284 				delay_idx++;
285 			msleep(ms);
286 		} else
287 			mdelay(ms);
288 
289 		v = t4_read_reg(adap, ctl_reg);
290 		if (v == X_CIM_PF_NOACCESS)
291 			continue;
292 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
293 			if (!(v & F_MBMSGVALID)) {
294 				t4_write_reg(adap, ctl_reg,
295 					     V_MBOWNER(X_MBOWNER_NONE));
296 				continue;
297 			}
298 
299 			CH_DUMP_MBOX(adap, mbox, data_reg);
300 
301 			res = t4_read_reg64(adap, data_reg);
302 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
303 				fw_asrt(adap, data_reg);
304 				res = V_FW_CMD_RETVAL(EIO);
305 			} else if (rpl)
306 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
307 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
308 			return -G_FW_CMD_RETVAL((int)res);
309 		}
310 	}
311 
312 	/*
313 	 * We timed out waiting for a reply to our mailbox command.  Report
314 	 * the error and also check to see if the firmware reported any
315 	 * errors ...
316 	 */
317 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
318 	       *(const u8 *)cmd, mbox);
319 	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
320 		t4_report_fw_error(adap);
321 	return -ETIMEDOUT;
322 }
323 
324 /**
325  *	t4_mc_read - read from MC through backdoor accesses
326  *	@adap: the adapter
327  *	@idx: which MC to access
328  *	@addr: address of first byte requested
329  *	@data: 64 bytes of data containing the requested address
330  *	@ecc: where to store the corresponding 64-bit ECC word
331  *
332  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
333  *	that covers the requested address @addr.  If @parity is not %NULL it
334  *	is assigned the 64-bit ECC word for the read data.
335  */
336 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
337 {
338 	int i;
339 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
340 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
341 
342 	if (is_t4(adap)) {
343 		mc_bist_cmd_reg = A_MC_BIST_CMD;
344 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
345 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
346 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
347 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
348 	} else {
349 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
350 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
351 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
352 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
353 						  idx);
354 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
355 						  idx);
356 	}
357 
358 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
359 		return -EBUSY;
360 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
361 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
362 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
363 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
364 		     F_START_BIST | V_BIST_CMD_GAP(1));
365 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
366 	if (i)
367 		return i;
368 
369 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
370 
371 	for (i = 15; i >= 0; i--)
372 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
373 	if (ecc)
374 		*ecc = t4_read_reg64(adap, MC_DATA(16));
375 #undef MC_DATA
376 	return 0;
377 }
378 
379 /**
380  *	t4_edc_read - read from EDC through backdoor accesses
381  *	@adap: the adapter
382  *	@idx: which EDC to access
383  *	@addr: address of first byte requested
384  *	@data: 64 bytes of data containing the requested address
385  *	@ecc: where to store the corresponding 64-bit ECC word
386  *
387  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
388  *	that covers the requested address @addr.  If @parity is not %NULL it
389  *	is assigned the 64-bit ECC word for the read data.
390  */
391 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
392 {
393 	int i;
394 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
395 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
396 
397 	if (is_t4(adap)) {
398 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
399 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
400 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
401 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
402 						    idx);
403 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
404 						    idx);
405 	} else {
406 /*
407  * These macro are missing in t4_regs.h file.
408  * Added temporarily for testing.
409  */
410 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
411 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
412 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
413 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
414 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
415 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
416 						    idx);
417 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
418 						    idx);
419 #undef EDC_REG_T5
420 #undef EDC_STRIDE_T5
421 	}
422 
423 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
424 		return -EBUSY;
425 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
426 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
427 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
428 	t4_write_reg(adap, edc_bist_cmd_reg,
429 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
430 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
431 	if (i)
432 		return i;
433 
434 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
435 
436 	for (i = 15; i >= 0; i--)
437 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
438 	if (ecc)
439 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
440 #undef EDC_DATA
441 	return 0;
442 }
443 
444 /**
445  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
446  *	@adap: the adapter
447  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
448  *	@addr: address within indicated memory type
449  *	@len: amount of memory to read
450  *	@buf: host memory buffer
451  *
452  *	Reads an [almost] arbitrary memory region in the firmware: the
453  *	firmware memory address, length and host buffer must be aligned on
454  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
455  *	the firmware's memory.  If this memory contains data structures which
456  *	contain multi-byte integers, it's the callers responsibility to
457  *	perform appropriate byte order conversions.
458  */
459 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
460 		__be32 *buf)
461 {
462 	u32 pos, start, end, offset;
463 	int ret;
464 
465 	/*
466 	 * Argument sanity checks ...
467 	 */
468 	if ((addr & 0x3) || (len & 0x3))
469 		return -EINVAL;
470 
471 	/*
472 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
473 	 * need to round down the start and round up the end.  We'll start
474 	 * copying out of the first line at (addr - start) a word at a time.
475 	 */
476 	start = addr & ~(64-1);
477 	end = (addr + len + 64-1) & ~(64-1);
478 	offset = (addr - start)/sizeof(__be32);
479 
480 	for (pos = start; pos < end; pos += 64, offset = 0) {
481 		__be32 data[16];
482 
483 		/*
484 		 * Read the chip's memory block and bail if there's an error.
485 		 */
486 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
487 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
488 		else
489 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
490 		if (ret)
491 			return ret;
492 
493 		/*
494 		 * Copy the data into the caller's memory buffer.
495 		 */
496 		while (offset < 16 && len > 0) {
497 			*buf++ = data[offset++];
498 			len -= sizeof(__be32);
499 		}
500 	}
501 
502 	return 0;
503 }
504 
505 /*
506  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
507  * VPD-R header.
508  */
509 struct t4_vpd_hdr {
510 	u8  id_tag;
511 	u8  id_len[2];
512 	u8  id_data[ID_LEN];
513 	u8  vpdr_tag;
514 	u8  vpdr_len[2];
515 };
516 
517 /*
518  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
519  */
520 #define EEPROM_MAX_RD_POLL 40
521 #define EEPROM_MAX_WR_POLL 6
522 #define EEPROM_STAT_ADDR   0x7bfc
523 #define VPD_BASE           0x400
524 #define VPD_BASE_OLD       0
525 #define VPD_LEN            1024
526 #define VPD_INFO_FLD_HDR_SIZE	3
527 #define CHELSIO_VPD_UNIQUE_ID 0x82
528 
529 /**
530  *	t4_seeprom_read - read a serial EEPROM location
531  *	@adapter: adapter to read
532  *	@addr: EEPROM virtual address
533  *	@data: where to store the read data
534  *
535  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
536  *	VPD capability.  Note that this function must be called with a virtual
537  *	address.
538  */
539 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
540 {
541 	u16 val;
542 	int attempts = EEPROM_MAX_RD_POLL;
543 	unsigned int base = adapter->params.pci.vpd_cap_addr;
544 
545 	if (addr >= EEPROMVSIZE || (addr & 3))
546 		return -EINVAL;
547 
548 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
549 	do {
550 		udelay(10);
551 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
552 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
553 
554 	if (!(val & PCI_VPD_ADDR_F)) {
555 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
556 		return -EIO;
557 	}
558 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
559 	*data = le32_to_cpu(*data);
560 	return 0;
561 }
562 
563 /**
564  *	t4_seeprom_write - write a serial EEPROM location
565  *	@adapter: adapter to write
566  *	@addr: virtual EEPROM address
567  *	@data: value to write
568  *
569  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
570  *	VPD capability.  Note that this function must be called with a virtual
571  *	address.
572  */
573 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574 {
575 	u16 val;
576 	int attempts = EEPROM_MAX_WR_POLL;
577 	unsigned int base = adapter->params.pci.vpd_cap_addr;
578 
579 	if (addr >= EEPROMVSIZE || (addr & 3))
580 		return -EINVAL;
581 
582 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
583 				 cpu_to_le32(data));
584 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
585 				 (u16)addr | PCI_VPD_ADDR_F);
586 	do {
587 		msleep(1);
588 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
589 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
590 
591 	if (val & PCI_VPD_ADDR_F) {
592 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 		return -EIO;
594 	}
595 	return 0;
596 }
597 
598 /**
599  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
600  *	@phys_addr: the physical EEPROM address
601  *	@fn: the PCI function number
602  *	@sz: size of function-specific area
603  *
604  *	Translate a physical EEPROM address to virtual.  The first 1K is
605  *	accessed through virtual addresses starting at 31K, the rest is
606  *	accessed through virtual addresses starting at 0.
607  *
608  *	The mapping is as follows:
609  *	[0..1K) -> [31K..32K)
610  *	[1K..1K+A) -> [ES-A..ES)
611  *	[1K+A..ES) -> [0..ES-A-1K)
612  *
613  *	where A = @fn * @sz, and ES = EEPROM size.
614  */
615 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
616 {
617 	fn *= sz;
618 	if (phys_addr < 1024)
619 		return phys_addr + (31 << 10);
620 	if (phys_addr < 1024 + fn)
621 		return EEPROMSIZE - fn + phys_addr - 1024;
622 	if (phys_addr < EEPROMSIZE)
623 		return phys_addr - 1024 - fn;
624 	return -EINVAL;
625 }
626 
627 /**
628  *	t4_seeprom_wp - enable/disable EEPROM write protection
629  *	@adapter: the adapter
630  *	@enable: whether to enable or disable write protection
631  *
632  *	Enables or disables write protection on the serial EEPROM.
633  */
634 int t4_seeprom_wp(struct adapter *adapter, int enable)
635 {
636 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
637 }
638 
639 /**
640  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
641  *	@v: Pointer to buffered vpd data structure
642  *	@kw: The keyword to search for
643  *
644  *	Returns the value of the information field keyword or
645  *	-ENOENT otherwise.
646  */
647 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
648 {
649          int i;
650 	 unsigned int offset , len;
651 	 const u8 *buf = &v->id_tag;
652 	 const u8 *vpdr_len = &v->vpdr_tag;
653 	 offset = sizeof(struct t4_vpd_hdr);
654 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
655 
656 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
657 		 return -ENOENT;
658 	 }
659 
660          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
661 		 if(memcmp(buf + i , kw , 2) == 0){
662 			 i += VPD_INFO_FLD_HDR_SIZE;
663                          return i;
664 		  }
665 
666                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
667          }
668 
669          return -ENOENT;
670 }
671 
672 
673 /**
674  *	get_vpd_params - read VPD parameters from VPD EEPROM
675  *	@adapter: adapter to read
676  *	@p: where to store the parameters
677  *
678  *	Reads card parameters stored in VPD EEPROM.
679  */
680 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
681 {
682 	int i, ret, addr;
683 	int ec, sn, pn, na;
684 	u8 vpd[VPD_LEN], csum;
685 	const struct t4_vpd_hdr *v;
686 
687 	/*
688 	 * Card information normally starts at VPD_BASE but early cards had
689 	 * it at 0.
690 	 */
691 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
692 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
693 
694 	for (i = 0; i < sizeof(vpd); i += 4) {
695 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
696 		if (ret)
697 			return ret;
698 	}
699  	v = (const struct t4_vpd_hdr *)vpd;
700 
701 #define FIND_VPD_KW(var,name) do { \
702 	var = get_vpd_keyword_val(v , name); \
703 	if (var < 0) { \
704 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
705 		return -EINVAL; \
706 	} \
707 } while (0)
708 
709 	FIND_VPD_KW(i, "RV");
710 	for (csum = 0; i >= 0; i--)
711 		csum += vpd[i];
712 
713 	if (csum) {
714 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
715 		return -EINVAL;
716 	}
717 	FIND_VPD_KW(ec, "EC");
718 	FIND_VPD_KW(sn, "SN");
719 	FIND_VPD_KW(pn, "PN");
720 	FIND_VPD_KW(na, "NA");
721 #undef FIND_VPD_KW
722 
723 	memcpy(p->id, v->id_data, ID_LEN);
724 	strstrip(p->id);
725 	memcpy(p->ec, vpd + ec, EC_LEN);
726 	strstrip(p->ec);
727 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
728 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
729 	strstrip(p->sn);
730 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
731 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
732 	strstrip((char *)p->pn);
733 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
734 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
735 	strstrip((char *)p->na);
736 
737 	return 0;
738 }
739 
740 /* serial flash and firmware constants and flash config file constants */
741 enum {
742 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
743 
744 	/* flash command opcodes */
745 	SF_PROG_PAGE    = 2,          /* program page */
746 	SF_WR_DISABLE   = 4,          /* disable writes */
747 	SF_RD_STATUS    = 5,          /* read status register */
748 	SF_WR_ENABLE    = 6,          /* enable writes */
749 	SF_RD_DATA_FAST = 0xb,        /* read flash */
750 	SF_RD_ID        = 0x9f,       /* read ID */
751 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
752 };
753 
754 /**
755  *	sf1_read - read data from the serial flash
756  *	@adapter: the adapter
757  *	@byte_cnt: number of bytes to read
758  *	@cont: whether another operation will be chained
759  *	@lock: whether to lock SF for PL access only
760  *	@valp: where to store the read data
761  *
762  *	Reads up to 4 bytes of data from the serial flash.  The location of
763  *	the read needs to be specified prior to calling this by issuing the
764  *	appropriate commands to the serial flash.
765  */
766 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
767 		    int lock, u32 *valp)
768 {
769 	int ret;
770 
771 	if (!byte_cnt || byte_cnt > 4)
772 		return -EINVAL;
773 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
774 		return -EBUSY;
775 	t4_write_reg(adapter, A_SF_OP,
776 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
777 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
778 	if (!ret)
779 		*valp = t4_read_reg(adapter, A_SF_DATA);
780 	return ret;
781 }
782 
783 /**
784  *	sf1_write - write data to the serial flash
785  *	@adapter: the adapter
786  *	@byte_cnt: number of bytes to write
787  *	@cont: whether another operation will be chained
788  *	@lock: whether to lock SF for PL access only
789  *	@val: value to write
790  *
791  *	Writes up to 4 bytes of data to the serial flash.  The location of
792  *	the write needs to be specified prior to calling this by issuing the
793  *	appropriate commands to the serial flash.
794  */
795 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
796 		     int lock, u32 val)
797 {
798 	if (!byte_cnt || byte_cnt > 4)
799 		return -EINVAL;
800 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
801 		return -EBUSY;
802 	t4_write_reg(adapter, A_SF_DATA, val);
803 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
804 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
805 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
806 }
807 
808 /**
809  *	flash_wait_op - wait for a flash operation to complete
810  *	@adapter: the adapter
811  *	@attempts: max number of polls of the status register
812  *	@delay: delay between polls in ms
813  *
814  *	Wait for a flash operation to complete by polling the status register.
815  */
816 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
817 {
818 	int ret;
819 	u32 status;
820 
821 	while (1) {
822 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
823 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
824 			return ret;
825 		if (!(status & 1))
826 			return 0;
827 		if (--attempts == 0)
828 			return -EAGAIN;
829 		if (delay)
830 			msleep(delay);
831 	}
832 }
833 
834 /**
835  *	t4_read_flash - read words from serial flash
836  *	@adapter: the adapter
837  *	@addr: the start address for the read
838  *	@nwords: how many 32-bit words to read
839  *	@data: where to store the read data
840  *	@byte_oriented: whether to store data as bytes or as words
841  *
842  *	Read the specified number of 32-bit words from the serial flash.
843  *	If @byte_oriented is set the read data is stored as a byte array
844  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
845  *	natural endianess.
846  */
847 int t4_read_flash(struct adapter *adapter, unsigned int addr,
848 		  unsigned int nwords, u32 *data, int byte_oriented)
849 {
850 	int ret;
851 
852 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
853 		return -EINVAL;
854 
855 	addr = swab32(addr) | SF_RD_DATA_FAST;
856 
857 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
858 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
859 		return ret;
860 
861 	for ( ; nwords; nwords--, data++) {
862 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
863 		if (nwords == 1)
864 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
865 		if (ret)
866 			return ret;
867 		if (byte_oriented)
868 			*data = htonl(*data);
869 	}
870 	return 0;
871 }
872 
873 /**
874  *	t4_write_flash - write up to a page of data to the serial flash
875  *	@adapter: the adapter
876  *	@addr: the start address to write
877  *	@n: length of data to write in bytes
878  *	@data: the data to write
879  *	@byte_oriented: whether to store data as bytes or as words
880  *
881  *	Writes up to a page of data (256 bytes) to the serial flash starting
882  *	at the given address.  All the data must be written to the same page.
883  *	If @byte_oriented is set the write data is stored as byte stream
884  *	(i.e. matches what on disk), otherwise in big-endian.
885  */
886 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
887 			  unsigned int n, const u8 *data, int byte_oriented)
888 {
889 	int ret;
890 	u32 buf[SF_PAGE_SIZE / 4];
891 	unsigned int i, c, left, val, offset = addr & 0xff;
892 
893 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
894 		return -EINVAL;
895 
896 	val = swab32(addr) | SF_PROG_PAGE;
897 
898 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
899 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
900 		goto unlock;
901 
902 	for (left = n; left; left -= c) {
903 		c = min(left, 4U);
904 		for (val = 0, i = 0; i < c; ++i)
905 			val = (val << 8) + *data++;
906 
907 		if (!byte_oriented)
908 			val = htonl(val);
909 
910 		ret = sf1_write(adapter, c, c != left, 1, val);
911 		if (ret)
912 			goto unlock;
913 	}
914 	ret = flash_wait_op(adapter, 8, 1);
915 	if (ret)
916 		goto unlock;
917 
918 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
919 
920 	/* Read the page to verify the write succeeded */
921 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
922 			    byte_oriented);
923 	if (ret)
924 		return ret;
925 
926 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
927 		CH_ERR(adapter, "failed to correctly write the flash page "
928 		       "at %#x\n", addr);
929 		return -EIO;
930 	}
931 	return 0;
932 
933 unlock:
934 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
935 	return ret;
936 }
937 
938 /**
939  *	t4_get_fw_version - read the firmware version
940  *	@adapter: the adapter
941  *	@vers: where to place the version
942  *
943  *	Reads the FW version from flash.
944  */
945 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
946 {
947 	return t4_read_flash(adapter,
948 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
949 			     vers, 0);
950 }
951 
952 /**
953  *	t4_get_tp_version - read the TP microcode version
954  *	@adapter: the adapter
955  *	@vers: where to place the version
956  *
957  *	Reads the TP microcode version from flash.
958  */
959 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
960 {
961 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
962 							      tp_microcode_ver),
963 			     1, vers, 0);
964 }
965 
966 /**
967  *	t4_check_fw_version - check if the FW is compatible with this driver
968  *	@adapter: the adapter
969  *
970  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
971  *	if there's exact match, a negative error if the version could not be
972  *	read or there's a major version mismatch, and a positive value if the
973  *	expected major version is found but there's a minor version mismatch.
974  */
975 int t4_check_fw_version(struct adapter *adapter)
976 {
977 	int ret, major, minor, micro;
978 	int exp_major, exp_minor, exp_micro;
979 
980 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
981 	if (!ret)
982 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
983 	if (ret)
984 		return ret;
985 
986 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
987 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
988 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
989 
990 	switch (chip_id(adapter)) {
991 	case CHELSIO_T4:
992 		exp_major = T4FW_VERSION_MAJOR;
993 		exp_minor = T4FW_VERSION_MINOR;
994 		exp_micro = T4FW_VERSION_MICRO;
995 		break;
996 	case CHELSIO_T5:
997 		exp_major = T5FW_VERSION_MAJOR;
998 		exp_minor = T5FW_VERSION_MINOR;
999 		exp_micro = T5FW_VERSION_MICRO;
1000 		break;
1001 	default:
1002 		CH_ERR(adapter, "Unsupported chip type, %x\n",
1003 		    chip_id(adapter));
1004 		return -EINVAL;
1005 	}
1006 
1007 	if (major != exp_major) {            /* major mismatch - fail */
1008 		CH_ERR(adapter, "card FW has major version %u, driver wants "
1009 		       "%u\n", major, exp_major);
1010 		return -EINVAL;
1011 	}
1012 
1013 	if (minor == exp_minor && micro == exp_micro)
1014 		return 0;                                   /* perfect match */
1015 
1016 	/* Minor/micro version mismatch.  Report it but often it's OK. */
1017 	return 1;
1018 }
1019 
1020 /**
1021  *	t4_flash_erase_sectors - erase a range of flash sectors
1022  *	@adapter: the adapter
1023  *	@start: the first sector to erase
1024  *	@end: the last sector to erase
1025  *
1026  *	Erases the sectors in the given inclusive range.
1027  */
1028 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1029 {
1030 	int ret = 0;
1031 
1032 	while (start <= end) {
1033 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1034 		    (ret = sf1_write(adapter, 4, 0, 1,
1035 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1036 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1037 			CH_ERR(adapter, "erase of flash sector %d failed, "
1038 			       "error %d\n", start, ret);
1039 			break;
1040 		}
1041 		start++;
1042 	}
1043 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1044 	return ret;
1045 }
1046 
1047 /**
1048  *	t4_flash_cfg_addr - return the address of the flash configuration file
1049  *	@adapter: the adapter
1050  *
1051  *	Return the address within the flash where the Firmware Configuration
1052  *	File is stored, or an error if the device FLASH is too small to contain
1053  *	a Firmware Configuration File.
1054  */
1055 int t4_flash_cfg_addr(struct adapter *adapter)
1056 {
1057 	/*
1058 	 * If the device FLASH isn't large enough to hold a Firmware
1059 	 * Configuration File, return an error.
1060 	 */
1061 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1062 		return -ENOSPC;
1063 
1064 	return FLASH_CFG_START;
1065 }
1066 
1067 /**
1068  *	t4_load_cfg - download config file
1069  *	@adap: the adapter
1070  *	@cfg_data: the cfg text file to write
1071  *	@size: text file size
1072  *
1073  *	Write the supplied config text file to the card's serial flash.
1074  */
1075 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1076 {
1077 	int ret, i, n, cfg_addr;
1078 	unsigned int addr;
1079 	unsigned int flash_cfg_start_sec;
1080 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1081 
1082 	cfg_addr = t4_flash_cfg_addr(adap);
1083 	if (cfg_addr < 0)
1084 		return cfg_addr;
1085 
1086 	addr = cfg_addr;
1087 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1088 
1089 	if (size > FLASH_CFG_MAX_SIZE) {
1090 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1091 		       FLASH_CFG_MAX_SIZE);
1092 		return -EFBIG;
1093 	}
1094 
1095 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1096 			 sf_sec_size);
1097 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1098 				     flash_cfg_start_sec + i - 1);
1099 	/*
1100 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1101 	 * with the on-adapter Firmware Configuration File.
1102 	 */
1103 	if (ret || size == 0)
1104 		goto out;
1105 
1106 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1107 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1108 		if ( (size - i) <  SF_PAGE_SIZE)
1109 			n = size - i;
1110 		else
1111 			n = SF_PAGE_SIZE;
1112 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1113 		if (ret)
1114 			goto out;
1115 
1116 		addr += SF_PAGE_SIZE;
1117 		cfg_data += SF_PAGE_SIZE;
1118 	}
1119 
1120 out:
1121 	if (ret)
1122 		CH_ERR(adap, "config file %s failed %d\n",
1123 		       (size == 0 ? "clear" : "download"), ret);
1124 	return ret;
1125 }
1126 
1127 
1128 /**
1129  *	t4_load_fw - download firmware
1130  *	@adap: the adapter
1131  *	@fw_data: the firmware image to write
1132  *	@size: image size
1133  *
1134  *	Write the supplied firmware image to the card's serial flash.
1135  */
1136 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1137 {
1138 	u32 csum;
1139 	int ret, addr;
1140 	unsigned int i;
1141 	u8 first_page[SF_PAGE_SIZE];
1142 	const u32 *p = (const u32 *)fw_data;
1143 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1144 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1145 	unsigned int fw_start_sec;
1146 	unsigned int fw_start;
1147 	unsigned int fw_size;
1148 
1149 	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1150 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1151 		fw_start = FLASH_FWBOOTSTRAP_START;
1152 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1153 	} else {
1154 		fw_start_sec = FLASH_FW_START_SEC;
1155  		fw_start = FLASH_FW_START;
1156 		fw_size = FLASH_FW_MAX_SIZE;
1157 	}
1158 	if (!size) {
1159 		CH_ERR(adap, "FW image has no data\n");
1160 		return -EINVAL;
1161 	}
1162 	if (size & 511) {
1163 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1164 		return -EINVAL;
1165 	}
1166 	if (ntohs(hdr->len512) * 512 != size) {
1167 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1168 		return -EINVAL;
1169 	}
1170 	if (size > fw_size) {
1171 		CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1172 		return -EFBIG;
1173 	}
1174 	if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1175 	    (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1176 		CH_ERR(adap,
1177 		    "FW image (%d) is not suitable for this adapter (%d)\n",
1178 		    hdr->chip, chip_id(adap));
1179 		return -EINVAL;
1180 	}
1181 
1182 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1183 		csum += ntohl(p[i]);
1184 
1185 	if (csum != 0xffffffff) {
1186 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1187 		       csum);
1188 		return -EINVAL;
1189 	}
1190 
1191 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1192 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1193 	if (ret)
1194 		goto out;
1195 
1196 	/*
1197 	 * We write the correct version at the end so the driver can see a bad
1198 	 * version if the FW write fails.  Start by writing a copy of the
1199 	 * first page with a bad version.
1200 	 */
1201 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1202 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1203 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1204 	if (ret)
1205 		goto out;
1206 
1207 	addr = fw_start;
1208 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1209 		addr += SF_PAGE_SIZE;
1210 		fw_data += SF_PAGE_SIZE;
1211 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1212 		if (ret)
1213 			goto out;
1214 	}
1215 
1216 	ret = t4_write_flash(adap,
1217 			     fw_start + offsetof(struct fw_hdr, fw_ver),
1218 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1219 out:
1220 	if (ret)
1221 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1222 	return ret;
1223 }
1224 
1225 /* BIOS boot headers */
1226 typedef struct pci_expansion_rom_header {
1227 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1228 	u8	reserved[22]; /* Reserved per processor Architecture data */
1229 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1230 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1231 
1232 /* Legacy PCI Expansion ROM Header */
1233 typedef struct legacy_pci_expansion_rom_header {
1234 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1235 	u8	size512; /* Current Image Size in units of 512 bytes */
1236 	u8	initentry_point[4];
1237 	u8	cksum; /* Checksum computed on the entire Image */
1238 	u8	reserved[16]; /* Reserved */
1239 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1240 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1241 
1242 /* EFI PCI Expansion ROM Header */
1243 typedef struct efi_pci_expansion_rom_header {
1244 	u8	signature[2]; // ROM signature. The value 0xaa55
1245 	u8	initialization_size[2]; /* Units 512. Includes this header */
1246 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1247 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1248 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1249 	u8	compression_type[2]; /* Compression type. */
1250 		/*
1251 		 * Compression type definition
1252 		 * 0x0: uncompressed
1253 		 * 0x1: Compressed
1254 		 * 0x2-0xFFFF: Reserved
1255 		 */
1256 	u8	reserved[8]; /* Reserved */
1257 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1258 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1259 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1260 
1261 /* PCI Data Structure Format */
1262 typedef struct pcir_data_structure { /* PCI Data Structure */
1263 	u8	signature[4]; /* Signature. The string "PCIR" */
1264 	u8	vendor_id[2]; /* Vendor Identification */
1265 	u8	device_id[2]; /* Device Identification */
1266 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1267 	u8	length[2]; /* PCIR Data Structure Length */
1268 	u8	revision; /* PCIR Data Structure Revision */
1269 	u8	class_code[3]; /* Class Code */
1270 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1271 	u8	code_revision[2]; /* Revision Level of Code/Data */
1272 	u8	code_type; /* Code Type. */
1273 		/*
1274 		 * PCI Expansion ROM Code Types
1275 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1276 		 * 0x01: Open Firmware standard for PCI. FCODE
1277 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1278 		 * 0x03: EFI Image. EFI
1279 		 * 0x04-0xFF: Reserved.
1280 		 */
1281 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1282 	u8	reserved[2]; /* Reserved */
1283 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1284 
1285 /* BOOT constants */
1286 enum {
1287 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1288 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1289 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1290 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1291 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1292 	VENDOR_ID = 0x1425, /* Vendor ID */
1293 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1294 };
1295 
1296 /*
1297  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1298  *	@adatper: the device ID to write.
1299  *	@boot_data: the boot image to modify.
1300  *
1301  *	Write the supplied device ID to the boot BIOS image.
1302  */
1303 static void modify_device_id(int device_id, u8 *boot_data)
1304 {
1305 	legacy_pci_exp_rom_header_t *header;
1306 	pcir_data_t *pcir_header;
1307 	u32 cur_header = 0;
1308 
1309 	/*
1310 	 * Loop through all chained images and change the device ID's
1311 	 */
1312 	while (1) {
1313 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1314 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1315 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1316 
1317 		/*
1318 		 * Only modify the Device ID if code type is Legacy or HP.
1319 		 * 0x00: Okay to modify
1320 		 * 0x01: FCODE. Do not be modify
1321 		 * 0x03: Okay to modify
1322 		 * 0x04-0xFF: Do not modify
1323 		 */
1324 		if (pcir_header->code_type == 0x00) {
1325 			u8 csum = 0;
1326 			int i;
1327 
1328 			/*
1329 			 * Modify Device ID to match current adatper
1330 			 */
1331 			*(u16*) pcir_header->device_id = device_id;
1332 
1333 			/*
1334 			 * Set checksum temporarily to 0.
1335 			 * We will recalculate it later.
1336 			 */
1337 			header->cksum = 0x0;
1338 
1339 			/*
1340 			 * Calculate and update checksum
1341 			 */
1342 			for (i = 0; i < (header->size512 * 512); i++)
1343 				csum += (u8)boot_data[cur_header + i];
1344 
1345 			/*
1346 			 * Invert summed value to create the checksum
1347 			 * Writing new checksum value directly to the boot data
1348 			 */
1349 			boot_data[cur_header + 7] = -csum;
1350 
1351 		} else if (pcir_header->code_type == 0x03) {
1352 
1353 			/*
1354 			 * Modify Device ID to match current adatper
1355 			 */
1356 			*(u16*) pcir_header->device_id = device_id;
1357 
1358 		}
1359 
1360 
1361 		/*
1362 		 * Check indicator element to identify if this is the last
1363 		 * image in the ROM.
1364 		 */
1365 		if (pcir_header->indicator & 0x80)
1366 			break;
1367 
1368 		/*
1369 		 * Move header pointer up to the next image in the ROM.
1370 		 */
1371 		cur_header += header->size512 * 512;
1372 	}
1373 }
1374 
1375 /*
1376  *	t4_load_boot - download boot flash
1377  *	@adapter: the adapter
1378  *	@boot_data: the boot image to write
1379  *	@boot_addr: offset in flash to write boot_data
1380  *	@size: image size
1381  *
1382  *	Write the supplied boot image to the card's serial flash.
1383  *	The boot image has the following sections: a 28-byte header and the
1384  *	boot image.
1385  */
1386 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1387 		 unsigned int boot_addr, unsigned int size)
1388 {
1389 	pci_exp_rom_header_t *header;
1390 	int pcir_offset ;
1391 	pcir_data_t *pcir_header;
1392 	int ret, addr;
1393 	uint16_t device_id;
1394 	unsigned int i;
1395 	unsigned int boot_sector = boot_addr * 1024;
1396 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1397 
1398 	/*
1399 	 * Make sure the boot image does not encroach on the firmware region
1400 	 */
1401 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1402 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1403 		return -EFBIG;
1404 	}
1405 
1406 	/*
1407 	 * Number of sectors spanned
1408 	 */
1409 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1410 			sf_sec_size);
1411 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1412 				     (boot_sector >> 16) + i - 1);
1413 
1414 	/*
1415 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1416 	 * with the on-adapter option ROM file
1417 	 */
1418 	if (ret || (size == 0))
1419 		goto out;
1420 
1421 	/* Get boot header */
1422 	header = (pci_exp_rom_header_t *)boot_data;
1423 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1424 	/* PCIR Data Structure */
1425 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1426 
1427 	/*
1428 	 * Perform some primitive sanity testing to avoid accidentally
1429 	 * writing garbage over the boot sectors.  We ought to check for
1430 	 * more but it's not worth it for now ...
1431 	 */
1432 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1433 		CH_ERR(adap, "boot image too small/large\n");
1434 		return -EFBIG;
1435 	}
1436 
1437 	/*
1438 	 * Check BOOT ROM header signature
1439 	 */
1440 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1441 		CH_ERR(adap, "Boot image missing signature\n");
1442 		return -EINVAL;
1443 	}
1444 
1445 	/*
1446 	 * Check PCI header signature
1447 	 */
1448 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1449 		CH_ERR(adap, "PCI header missing signature\n");
1450 		return -EINVAL;
1451 	}
1452 
1453 	/*
1454 	 * Check Vendor ID matches Chelsio ID
1455 	 */
1456 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1457 		CH_ERR(adap, "Vendor ID missing signature\n");
1458 		return -EINVAL;
1459 	}
1460 
1461 	/*
1462 	 * Retrieve adapter's device ID
1463 	 */
1464 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1465 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1466 	device_id = (device_id & 0xff) | 0x4000;
1467 
1468 	/*
1469 	 * Check PCIE Device ID
1470 	 */
1471 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1472 		/*
1473 		 * Change the device ID in the Boot BIOS image to match
1474 		 * the Device ID of the current adapter.
1475 		 */
1476 		modify_device_id(device_id, boot_data);
1477 	}
1478 
1479 	/*
1480 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1481 	 * we finish copying the rest of the boot image. This will ensure
1482 	 * that the BIOS boot header will only be written if the boot image
1483 	 * was written in full.
1484 	 */
1485 	addr = boot_sector;
1486 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1487 		addr += SF_PAGE_SIZE;
1488 		boot_data += SF_PAGE_SIZE;
1489 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1490 		if (ret)
1491 			goto out;
1492 	}
1493 
1494 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1495 
1496 out:
1497 	if (ret)
1498 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1499 	return ret;
1500 }
1501 
1502 /**
1503  *	t4_read_cimq_cfg - read CIM queue configuration
1504  *	@adap: the adapter
1505  *	@base: holds the queue base addresses in bytes
1506  *	@size: holds the queue sizes in bytes
1507  *	@thres: holds the queue full thresholds in bytes
1508  *
1509  *	Returns the current configuration of the CIM queues, starting with
1510  *	the IBQs, then the OBQs.
1511  */
1512 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1513 {
1514 	unsigned int i, v;
1515 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1516 
1517 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1518 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1519 			     V_QUENUMSELECT(i));
1520 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1521 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1522 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1523 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1524 	}
1525 	for (i = 0; i < cim_num_obq; i++) {
1526 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1527 			     V_QUENUMSELECT(i));
1528 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1529 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1530 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1531 	}
1532 }
1533 
1534 /**
1535  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1536  *	@adap: the adapter
1537  *	@qid: the queue index
1538  *	@data: where to store the queue contents
1539  *	@n: capacity of @data in 32-bit words
1540  *
1541  *	Reads the contents of the selected CIM queue starting at address 0 up
1542  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1543  *	error and the number of 32-bit words actually read on success.
1544  */
1545 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1546 {
1547 	int i, err;
1548 	unsigned int addr;
1549 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1550 
1551 	if (qid > 5 || (n & 3))
1552 		return -EINVAL;
1553 
1554 	addr = qid * nwords;
1555 	if (n > nwords)
1556 		n = nwords;
1557 
1558 	for (i = 0; i < n; i++, addr++) {
1559 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1560 			     F_IBQDBGEN);
1561 		/*
1562 		 * It might take 3-10ms before the IBQ debug read access is
1563 		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1564 		 */
1565 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1566 				      1000000, 1);
1567 		if (err)
1568 			return err;
1569 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1570 	}
1571 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1572 	return i;
1573 }
1574 
1575 /**
1576  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1577  *	@adap: the adapter
1578  *	@qid: the queue index
1579  *	@data: where to store the queue contents
1580  *	@n: capacity of @data in 32-bit words
1581  *
1582  *	Reads the contents of the selected CIM queue starting at address 0 up
1583  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1584  *	error and the number of 32-bit words actually read on success.
1585  */
1586 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1587 {
1588 	int i, err;
1589 	unsigned int addr, v, nwords;
1590 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1591 
1592 	if (qid >= cim_num_obq || (n & 3))
1593 		return -EINVAL;
1594 
1595 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1596 		     V_QUENUMSELECT(qid));
1597 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1598 
1599 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1600 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1601 	if (n > nwords)
1602 		n = nwords;
1603 
1604 	for (i = 0; i < n; i++, addr++) {
1605 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1606 			     F_OBQDBGEN);
1607 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1608 				      2, 1);
1609 		if (err)
1610 			return err;
1611 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1612 	}
1613 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1614 	return i;
1615 }
1616 
1617 enum {
1618 	CIM_QCTL_BASE     = 0,
1619 	CIM_CTL_BASE      = 0x2000,
1620 	CIM_PBT_ADDR_BASE = 0x2800,
1621 	CIM_PBT_LRF_BASE  = 0x3000,
1622 	CIM_PBT_DATA_BASE = 0x3800
1623 };
1624 
1625 /**
1626  *	t4_cim_read - read a block from CIM internal address space
1627  *	@adap: the adapter
1628  *	@addr: the start address within the CIM address space
1629  *	@n: number of words to read
1630  *	@valp: where to store the result
1631  *
1632  *	Reads a block of 4-byte words from the CIM intenal address space.
1633  */
1634 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1635 		unsigned int *valp)
1636 {
1637 	int ret = 0;
1638 
1639 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1640 		return -EBUSY;
1641 
1642 	for ( ; !ret && n--; addr += 4) {
1643 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1644 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1645 				      0, 5, 2);
1646 		if (!ret)
1647 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1648 	}
1649 	return ret;
1650 }
1651 
1652 /**
1653  *	t4_cim_write - write a block into CIM internal address space
1654  *	@adap: the adapter
1655  *	@addr: the start address within the CIM address space
1656  *	@n: number of words to write
1657  *	@valp: set of values to write
1658  *
1659  *	Writes a block of 4-byte words into the CIM intenal address space.
1660  */
1661 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1662 		 const unsigned int *valp)
1663 {
1664 	int ret = 0;
1665 
1666 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1667 		return -EBUSY;
1668 
1669 	for ( ; !ret && n--; addr += 4) {
1670 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1671 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1672 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1673 				      0, 5, 2);
1674 	}
1675 	return ret;
1676 }
1677 
1678 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1679 {
1680 	return t4_cim_write(adap, addr, 1, &val);
1681 }
1682 
1683 /**
1684  *	t4_cim_ctl_read - read a block from CIM control region
1685  *	@adap: the adapter
1686  *	@addr: the start address within the CIM control region
1687  *	@n: number of words to read
1688  *	@valp: where to store the result
1689  *
1690  *	Reads a block of 4-byte words from the CIM control region.
1691  */
1692 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1693 		    unsigned int *valp)
1694 {
1695 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1696 }
1697 
1698 /**
1699  *	t4_cim_read_la - read CIM LA capture buffer
1700  *	@adap: the adapter
1701  *	@la_buf: where to store the LA data
1702  *	@wrptr: the HW write pointer within the capture buffer
1703  *
1704  *	Reads the contents of the CIM LA buffer with the most recent entry at
1705  *	the end	of the returned data and with the entry at @wrptr first.
1706  *	We try to leave the LA in the running state we find it in.
1707  */
1708 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1709 {
1710 	int i, ret;
1711 	unsigned int cfg, val, idx;
1712 
1713 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1714 	if (ret)
1715 		return ret;
1716 
1717 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1718 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1719 		if (ret)
1720 			return ret;
1721 	}
1722 
1723 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1724 	if (ret)
1725 		goto restart;
1726 
1727 	idx = G_UPDBGLAWRPTR(val);
1728 	if (wrptr)
1729 		*wrptr = idx;
1730 
1731 	for (i = 0; i < adap->params.cim_la_size; i++) {
1732 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1733 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1734 		if (ret)
1735 			break;
1736 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1737 		if (ret)
1738 			break;
1739 		if (val & F_UPDBGLARDEN) {
1740 			ret = -ETIMEDOUT;
1741 			break;
1742 		}
1743 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1744 		if (ret)
1745 			break;
1746 		idx = (idx + 1) & M_UPDBGLARDPTR;
1747 	}
1748 restart:
1749 	if (cfg & F_UPDBGLAEN) {
1750 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1751 				      cfg & ~F_UPDBGLARDEN);
1752 		if (!ret)
1753 			ret = r;
1754 	}
1755 	return ret;
1756 }
1757 
1758 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1759 			unsigned int *pif_req_wrptr,
1760 			unsigned int *pif_rsp_wrptr)
1761 {
1762 	int i, j;
1763 	u32 cfg, val, req, rsp;
1764 
1765 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1766 	if (cfg & F_LADBGEN)
1767 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1768 
1769 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1770 	req = G_POLADBGWRPTR(val);
1771 	rsp = G_PILADBGWRPTR(val);
1772 	if (pif_req_wrptr)
1773 		*pif_req_wrptr = req;
1774 	if (pif_rsp_wrptr)
1775 		*pif_rsp_wrptr = rsp;
1776 
1777 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1778 		for (j = 0; j < 6; j++) {
1779 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1780 				     V_PILADBGRDPTR(rsp));
1781 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1782 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1783 			req++;
1784 			rsp++;
1785 		}
1786 		req = (req + 2) & M_POLADBGRDPTR;
1787 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1788 	}
1789 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1790 }
1791 
1792 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1793 {
1794 	u32 cfg;
1795 	int i, j, idx;
1796 
1797 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1798 	if (cfg & F_LADBGEN)
1799 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1800 
1801 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1802 		for (j = 0; j < 5; j++) {
1803 			idx = 8 * i + j;
1804 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1805 				     V_PILADBGRDPTR(idx));
1806 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1807 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1808 		}
1809 	}
1810 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1811 }
1812 
1813 /**
1814  *	t4_tp_read_la - read TP LA capture buffer
1815  *	@adap: the adapter
1816  *	@la_buf: where to store the LA data
1817  *	@wrptr: the HW write pointer within the capture buffer
1818  *
1819  *	Reads the contents of the TP LA buffer with the most recent entry at
1820  *	the end	of the returned data and with the entry at @wrptr first.
1821  *	We leave the LA in the running state we find it in.
1822  */
1823 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1824 {
1825 	bool last_incomplete;
1826 	unsigned int i, cfg, val, idx;
1827 
1828 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1829 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1830 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1831 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1832 
1833 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1834 	idx = G_DBGLAWPTR(val);
1835 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1836 	if (last_incomplete)
1837 		idx = (idx + 1) & M_DBGLARPTR;
1838 	if (wrptr)
1839 		*wrptr = idx;
1840 
1841 	val &= 0xffff;
1842 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1843 	val |= adap->params.tp.la_mask;
1844 
1845 	for (i = 0; i < TPLA_SIZE; i++) {
1846 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1847 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1848 		idx = (idx + 1) & M_DBGLARPTR;
1849 	}
1850 
1851 	/* Wipe out last entry if it isn't valid */
1852 	if (last_incomplete)
1853 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1854 
1855 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1856 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1857 			     cfg | adap->params.tp.la_mask);
1858 }
1859 
1860 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1861 {
1862 	unsigned int i, j;
1863 
1864 	for (i = 0; i < 8; i++) {
1865 		u32 *p = la_buf + i;
1866 
1867 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1868 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1869 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1870 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1871 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1872 	}
1873 }
1874 
1875 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1876 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1877 		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1878 
1879 /**
1880  *	t4_link_l1cfg - apply link configuration to MAC/PHY
1881  *	@phy: the PHY to setup
1882  *	@mac: the MAC to setup
1883  *	@lc: the requested link configuration
1884  *
1885  *	Set up a port's MAC and PHY according to a desired link configuration.
1886  *	- If the PHY can auto-negotiate first decide what to advertise, then
1887  *	  enable/disable auto-negotiation as desired, and reset.
1888  *	- If the PHY does not auto-negotiate just reset it.
1889  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1890  *	  otherwise do it later based on the outcome of auto-negotiation.
1891  */
1892 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
1893 		  struct link_config *lc)
1894 {
1895 	struct fw_port_cmd c;
1896 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1897 
1898 	lc->link_ok = 0;
1899 	if (lc->requested_fc & PAUSE_RX)
1900 		fc |= FW_PORT_CAP_FC_RX;
1901 	if (lc->requested_fc & PAUSE_TX)
1902 		fc |= FW_PORT_CAP_FC_TX;
1903 
1904 	memset(&c, 0, sizeof(c));
1905 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1906 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1907 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1908 				  FW_LEN16(c));
1909 
1910 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1911 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1912 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1913 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1914 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1915 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1916 	} else
1917 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1918 
1919 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1920 }
1921 
1922 /**
1923  *	t4_restart_aneg - restart autonegotiation
1924  *	@adap: the adapter
1925  *	@mbox: mbox to use for the FW command
1926  *	@port: the port id
1927  *
1928  *	Restarts autonegotiation for the selected port.
1929  */
1930 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1931 {
1932 	struct fw_port_cmd c;
1933 
1934 	memset(&c, 0, sizeof(c));
1935 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1936 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1937 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1938 				  FW_LEN16(c));
1939 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1940 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1941 }
1942 
1943 struct intr_info {
1944 	unsigned int mask;       /* bits to check in interrupt status */
1945 	const char *msg;         /* message to print or NULL */
1946 	short stat_idx;          /* stat counter to increment or -1 */
1947 	unsigned short fatal;    /* whether the condition reported is fatal */
1948 };
1949 
1950 /**
1951  *	t4_handle_intr_status - table driven interrupt handler
1952  *	@adapter: the adapter that generated the interrupt
1953  *	@reg: the interrupt status register to process
1954  *	@acts: table of interrupt actions
1955  *
1956  *	A table driven interrupt handler that applies a set of masks to an
1957  *	interrupt status word and performs the corresponding actions if the
1958  *	interrupts described by the mask have occured.  The actions include
1959  *	optionally emitting a warning or alert message.  The table is terminated
1960  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1961  *	conditions.
1962  */
1963 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1964 				 const struct intr_info *acts)
1965 {
1966 	int fatal = 0;
1967 	unsigned int mask = 0;
1968 	unsigned int status = t4_read_reg(adapter, reg);
1969 
1970 	for ( ; acts->mask; ++acts) {
1971 		if (!(status & acts->mask))
1972 			continue;
1973 		if (acts->fatal) {
1974 			fatal++;
1975 			CH_ALERT(adapter, "%s (0x%x)\n",
1976 				 acts->msg, status & acts->mask);
1977 		} else if (acts->msg)
1978 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1979 					  acts->msg, status & acts->mask);
1980 		mask |= acts->mask;
1981 	}
1982 	status &= mask;
1983 	if (status)                           /* clear processed interrupts */
1984 		t4_write_reg(adapter, reg, status);
1985 	return fatal;
1986 }
1987 
1988 /*
1989  * Interrupt handler for the PCIE module.
1990  */
1991 static void pcie_intr_handler(struct adapter *adapter)
1992 {
1993 	static struct intr_info sysbus_intr_info[] = {
1994 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1995 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1996 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1997 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1998 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1999 		{ 0 }
2000 	};
2001 	static struct intr_info pcie_port_intr_info[] = {
2002 		{ F_TPCP, "TXPC array parity error", -1, 1 },
2003 		{ F_TNPP, "TXNP array parity error", -1, 1 },
2004 		{ F_TFTP, "TXFT array parity error", -1, 1 },
2005 		{ F_TCAP, "TXCA array parity error", -1, 1 },
2006 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
2007 		{ F_RCAP, "RXCA array parity error", -1, 1 },
2008 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
2009 		{ F_RDPE, "Rx data parity error", -1, 1 },
2010 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
2011 		{ 0 }
2012 	};
2013 	static struct intr_info pcie_intr_info[] = {
2014 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2015 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2016 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2017 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2018 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2019 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2020 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2021 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2022 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2023 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2024 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2025 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2026 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2027 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2028 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2029 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2030 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2031 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2032 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2033 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2034 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2035 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2036 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2037 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2038 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2039 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2040 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2041 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2042 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2043 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2044 		  0 },
2045 		{ 0 }
2046 	};
2047 
2048 	static struct intr_info t5_pcie_intr_info[] = {
2049 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2050 		  -1, 1 },
2051 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2052 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2053 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2054 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2055 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2056 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2057 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2058 		  -1, 1 },
2059 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2060 		  -1, 1 },
2061 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2062 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2063 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2064 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2065 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2066 		  -1, 1 },
2067 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2068 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2069 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2070 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2071 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2072 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2073 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2074 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2075 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2076 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2077 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2078 		  -1, 1 },
2079 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2080 		  -1, 1 },
2081 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2082 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2083 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2084 		{ F_READRSPERR, "Outbound read error", -1,
2085 		  0 },
2086 		{ 0 }
2087 	};
2088 
2089 	int fat;
2090 
2091 	if (is_t4(adapter))
2092 		fat = t4_handle_intr_status(adapter,
2093 					    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2094 					    sysbus_intr_info) +
2095 		      t4_handle_intr_status(adapter,
2096 					    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2097 					    pcie_port_intr_info) +
2098 		      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2099 					    pcie_intr_info);
2100 	else
2101 		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2102 					    t5_pcie_intr_info);
2103 	if (fat)
2104 		t4_fatal_err(adapter);
2105 }
2106 
2107 /*
2108  * TP interrupt handler.
2109  */
2110 static void tp_intr_handler(struct adapter *adapter)
2111 {
2112 	static struct intr_info tp_intr_info[] = {
2113 		{ 0x3fffffff, "TP parity error", -1, 1 },
2114 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2115 		{ 0 }
2116 	};
2117 
2118 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2119 		t4_fatal_err(adapter);
2120 }
2121 
2122 /*
2123  * SGE interrupt handler.
2124  */
2125 static void sge_intr_handler(struct adapter *adapter)
2126 {
2127 	u64 v;
2128 	u32 err;
2129 
2130 	static struct intr_info sge_intr_info[] = {
2131 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2132 		  "SGE received CPL exceeding IQE size", -1, 1 },
2133 		{ F_ERR_INVALID_CIDX_INC,
2134 		  "SGE GTS CIDX increment too large", -1, 0 },
2135 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2136 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2137 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2138 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2139 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2140 		  0 },
2141 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2142 		  0 },
2143 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2144 		  0 },
2145 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2146 		  0 },
2147 		{ F_ERR_ING_CTXT_PRIO,
2148 		  "SGE too many priority ingress contexts", -1, 0 },
2149 		{ F_ERR_EGR_CTXT_PRIO,
2150 		  "SGE too many priority egress contexts", -1, 0 },
2151 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2152 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2153 		{ 0 }
2154 	};
2155 
2156 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2157 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2158 	if (v) {
2159 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2160 			 (unsigned long long)v);
2161 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2162 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2163 	}
2164 
2165 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2166 
2167 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2168 	if (err & F_ERROR_QID_VALID) {
2169 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2170 		if (err & F_UNCAPTURED_ERROR)
2171 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2172 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2173 			     F_UNCAPTURED_ERROR);
2174 	}
2175 
2176 	if (v != 0)
2177 		t4_fatal_err(adapter);
2178 }
2179 
2180 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2181 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2182 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2183 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2184 
2185 /*
2186  * CIM interrupt handler.
2187  */
2188 static void cim_intr_handler(struct adapter *adapter)
2189 {
2190 	static struct intr_info cim_intr_info[] = {
2191 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2192 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2193 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2194 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2195 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2196 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2197 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2198 		{ 0 }
2199 	};
2200 	static struct intr_info cim_upintr_info[] = {
2201 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2202 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2203 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2204 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2205 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2206 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2207 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2208 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2209 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2210 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2211 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2212 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2213 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2214 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2215 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2216 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2217 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2218 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2219 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2220 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2221 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2222 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2223 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2224 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2225 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2226 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2227 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2228 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2229 		{ 0 }
2230 	};
2231 	int fat;
2232 
2233 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2234 		t4_report_fw_error(adapter);
2235 
2236 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2237 				    cim_intr_info) +
2238 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2239 				    cim_upintr_info);
2240 	if (fat)
2241 		t4_fatal_err(adapter);
2242 }
2243 
2244 /*
2245  * ULP RX interrupt handler.
2246  */
2247 static void ulprx_intr_handler(struct adapter *adapter)
2248 {
2249 	static struct intr_info ulprx_intr_info[] = {
2250 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2251 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2252 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2253 		{ 0 }
2254 	};
2255 
2256 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2257 		t4_fatal_err(adapter);
2258 }
2259 
2260 /*
2261  * ULP TX interrupt handler.
2262  */
2263 static void ulptx_intr_handler(struct adapter *adapter)
2264 {
2265 	static struct intr_info ulptx_intr_info[] = {
2266 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2267 		  0 },
2268 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2269 		  0 },
2270 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2271 		  0 },
2272 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2273 		  0 },
2274 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2275 		{ 0 }
2276 	};
2277 
2278 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2279 		t4_fatal_err(adapter);
2280 }
2281 
2282 /*
2283  * PM TX interrupt handler.
2284  */
2285 static void pmtx_intr_handler(struct adapter *adapter)
2286 {
2287 	static struct intr_info pmtx_intr_info[] = {
2288 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2289 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2290 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2291 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2292 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2293 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2294 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2295 		  1 },
2296 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2297 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2298 		{ 0 }
2299 	};
2300 
2301 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2302 		t4_fatal_err(adapter);
2303 }
2304 
2305 /*
2306  * PM RX interrupt handler.
2307  */
2308 static void pmrx_intr_handler(struct adapter *adapter)
2309 {
2310 	static struct intr_info pmrx_intr_info[] = {
2311 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2312 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2313 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2314 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2315 		  1 },
2316 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2317 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2318 		{ 0 }
2319 	};
2320 
2321 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2322 		t4_fatal_err(adapter);
2323 }
2324 
2325 /*
2326  * CPL switch interrupt handler.
2327  */
2328 static void cplsw_intr_handler(struct adapter *adapter)
2329 {
2330 	static struct intr_info cplsw_intr_info[] = {
2331 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2332 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2333 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2334 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2335 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2336 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2337 		{ 0 }
2338 	};
2339 
2340 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2341 		t4_fatal_err(adapter);
2342 }
2343 
2344 /*
2345  * LE interrupt handler.
2346  */
2347 static void le_intr_handler(struct adapter *adap)
2348 {
2349 	static struct intr_info le_intr_info[] = {
2350 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2351 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2352 		{ F_PARITYERR, "LE parity error", -1, 1 },
2353 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2354 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2355 		{ 0 }
2356 	};
2357 
2358 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2359 		t4_fatal_err(adap);
2360 }
2361 
2362 /*
2363  * MPS interrupt handler.
2364  */
2365 static void mps_intr_handler(struct adapter *adapter)
2366 {
2367 	static struct intr_info mps_rx_intr_info[] = {
2368 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2369 		{ 0 }
2370 	};
2371 	static struct intr_info mps_tx_intr_info[] = {
2372 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2373 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2374 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2375 		  -1, 1 },
2376 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2377 		  -1, 1 },
2378 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2379 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2380 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2381 		{ 0 }
2382 	};
2383 	static struct intr_info mps_trc_intr_info[] = {
2384 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2385 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2386 		  1 },
2387 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2388 		{ 0 }
2389 	};
2390 	static struct intr_info mps_stat_sram_intr_info[] = {
2391 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2392 		{ 0 }
2393 	};
2394 	static struct intr_info mps_stat_tx_intr_info[] = {
2395 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2396 		{ 0 }
2397 	};
2398 	static struct intr_info mps_stat_rx_intr_info[] = {
2399 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2400 		{ 0 }
2401 	};
2402 	static struct intr_info mps_cls_intr_info[] = {
2403 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2404 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2405 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2406 		{ 0 }
2407 	};
2408 
2409 	int fat;
2410 
2411 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2412 				    mps_rx_intr_info) +
2413 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2414 				    mps_tx_intr_info) +
2415 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2416 				    mps_trc_intr_info) +
2417 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2418 				    mps_stat_sram_intr_info) +
2419 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2420 				    mps_stat_tx_intr_info) +
2421 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2422 				    mps_stat_rx_intr_info) +
2423 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2424 				    mps_cls_intr_info);
2425 
2426 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2427 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2428 	if (fat)
2429 		t4_fatal_err(adapter);
2430 }
2431 
2432 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2433 
2434 /*
2435  * EDC/MC interrupt handler.
2436  */
2437 static void mem_intr_handler(struct adapter *adapter, int idx)
2438 {
2439 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2440 
2441 	unsigned int addr, cnt_addr, v;
2442 
2443 	if (idx <= MEM_EDC1) {
2444 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2445 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2446 	} else {
2447 		if (is_t4(adapter)) {
2448 			addr = A_MC_INT_CAUSE;
2449 			cnt_addr = A_MC_ECC_STATUS;
2450 		} else {
2451 			addr = A_MC_P_INT_CAUSE;
2452 			cnt_addr = A_MC_P_ECC_STATUS;
2453 		}
2454 	}
2455 
2456 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2457 	if (v & F_PERR_INT_CAUSE)
2458 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2459 	if (v & F_ECC_CE_INT_CAUSE) {
2460 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2461 
2462 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2463 		CH_WARN_RATELIMIT(adapter,
2464 				  "%u %s correctable ECC data error%s\n",
2465 				  cnt, name[idx], cnt > 1 ? "s" : "");
2466 	}
2467 	if (v & F_ECC_UE_INT_CAUSE)
2468 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2469 			 name[idx]);
2470 
2471 	t4_write_reg(adapter, addr, v);
2472 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2473 		t4_fatal_err(adapter);
2474 }
2475 
2476 /*
2477  * MA interrupt handler.
2478  */
2479 static void ma_intr_handler(struct adapter *adapter)
2480 {
2481 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2482 
2483 	if (status & F_MEM_PERR_INT_CAUSE) {
2484 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2485 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
2486 		if (is_t5(adapter))
2487 			CH_ALERT(adapter,
2488 				 "MA parity error, parity status %#x\n",
2489 				 t4_read_reg(adapter,
2490 				 	     A_MA_PARITY_ERROR_STATUS2));
2491 	}
2492 	if (status & F_MEM_WRAP_INT_CAUSE) {
2493 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2494 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2495 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2496 			 G_MEM_WRAP_ADDRESS(v) << 4);
2497 	}
2498 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2499 	t4_fatal_err(adapter);
2500 }
2501 
2502 /*
2503  * SMB interrupt handler.
2504  */
2505 static void smb_intr_handler(struct adapter *adap)
2506 {
2507 	static struct intr_info smb_intr_info[] = {
2508 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2509 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2510 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2511 		{ 0 }
2512 	};
2513 
2514 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2515 		t4_fatal_err(adap);
2516 }
2517 
2518 /*
2519  * NC-SI interrupt handler.
2520  */
2521 static void ncsi_intr_handler(struct adapter *adap)
2522 {
2523 	static struct intr_info ncsi_intr_info[] = {
2524 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2525 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2526 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2527 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2528 		{ 0 }
2529 	};
2530 
2531 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2532 		t4_fatal_err(adap);
2533 }
2534 
2535 /*
2536  * XGMAC interrupt handler.
2537  */
2538 static void xgmac_intr_handler(struct adapter *adap, int port)
2539 {
2540 	u32 v, int_cause_reg;
2541 
2542 	if (is_t4(adap))
2543 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2544 	else
2545 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2546 
2547 	v = t4_read_reg(adap, int_cause_reg);
2548 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2549 	if (!v)
2550 		return;
2551 
2552 	if (v & F_TXFIFO_PRTY_ERR)
2553 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2554 	if (v & F_RXFIFO_PRTY_ERR)
2555 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2556 	t4_write_reg(adap, int_cause_reg, v);
2557 	t4_fatal_err(adap);
2558 }
2559 
2560 /*
2561  * PL interrupt handler.
2562  */
2563 static void pl_intr_handler(struct adapter *adap)
2564 {
2565 	static struct intr_info pl_intr_info[] = {
2566 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2567 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2568 		{ 0 }
2569 	};
2570 
2571 	static struct intr_info t5_pl_intr_info[] = {
2572 		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2573 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2574 		{ 0 }
2575 	};
2576 
2577 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2578 	    is_t4(adap) ?  pl_intr_info : t5_pl_intr_info))
2579 		t4_fatal_err(adap);
2580 }
2581 
2582 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2583 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2584 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2585 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2586 
2587 /**
2588  *	t4_slow_intr_handler - control path interrupt handler
2589  *	@adapter: the adapter
2590  *
2591  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2592  *	The designation 'slow' is because it involves register reads, while
2593  *	data interrupts typically don't involve any MMIOs.
2594  */
2595 int t4_slow_intr_handler(struct adapter *adapter)
2596 {
2597 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2598 
2599 	if (!(cause & GLBL_INTR_MASK))
2600 		return 0;
2601 	if (cause & F_CIM)
2602 		cim_intr_handler(adapter);
2603 	if (cause & F_MPS)
2604 		mps_intr_handler(adapter);
2605 	if (cause & F_NCSI)
2606 		ncsi_intr_handler(adapter);
2607 	if (cause & F_PL)
2608 		pl_intr_handler(adapter);
2609 	if (cause & F_SMB)
2610 		smb_intr_handler(adapter);
2611 	if (cause & F_XGMAC0)
2612 		xgmac_intr_handler(adapter, 0);
2613 	if (cause & F_XGMAC1)
2614 		xgmac_intr_handler(adapter, 1);
2615 	if (cause & F_XGMAC_KR0)
2616 		xgmac_intr_handler(adapter, 2);
2617 	if (cause & F_XGMAC_KR1)
2618 		xgmac_intr_handler(adapter, 3);
2619 	if (cause & F_PCIE)
2620 		pcie_intr_handler(adapter);
2621 	if (cause & F_MC)
2622 		mem_intr_handler(adapter, MEM_MC);
2623 	if (cause & F_EDC0)
2624 		mem_intr_handler(adapter, MEM_EDC0);
2625 	if (cause & F_EDC1)
2626 		mem_intr_handler(adapter, MEM_EDC1);
2627 	if (cause & F_LE)
2628 		le_intr_handler(adapter);
2629 	if (cause & F_TP)
2630 		tp_intr_handler(adapter);
2631 	if (cause & F_MA)
2632 		ma_intr_handler(adapter);
2633 	if (cause & F_PM_TX)
2634 		pmtx_intr_handler(adapter);
2635 	if (cause & F_PM_RX)
2636 		pmrx_intr_handler(adapter);
2637 	if (cause & F_ULP_RX)
2638 		ulprx_intr_handler(adapter);
2639 	if (cause & F_CPL_SWITCH)
2640 		cplsw_intr_handler(adapter);
2641 	if (cause & F_SGE)
2642 		sge_intr_handler(adapter);
2643 	if (cause & F_ULP_TX)
2644 		ulptx_intr_handler(adapter);
2645 
2646 	/* Clear the interrupts just processed for which we are the master. */
2647 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2648 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2649 	return 1;
2650 }
2651 
2652 /**
2653  *	t4_intr_enable - enable interrupts
2654  *	@adapter: the adapter whose interrupts should be enabled
2655  *
2656  *	Enable PF-specific interrupts for the calling function and the top-level
2657  *	interrupt concentrator for global interrupts.  Interrupts are already
2658  *	enabled at each module,	here we just enable the roots of the interrupt
2659  *	hierarchies.
2660  *
2661  *	Note: this function should be called only when the driver manages
2662  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2663  *	function at a time should be doing this.
2664  */
2665 void t4_intr_enable(struct adapter *adapter)
2666 {
2667 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2668 
2669 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2670 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2671 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2672 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2673 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2674 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2675 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2676 		     F_EGRESS_SIZE_ERR);
2677 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2678 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2679 }
2680 
2681 /**
2682  *	t4_intr_disable - disable interrupts
2683  *	@adapter: the adapter whose interrupts should be disabled
2684  *
2685  *	Disable interrupts.  We only disable the top-level interrupt
2686  *	concentrators.  The caller must be a PCI function managing global
2687  *	interrupts.
2688  */
2689 void t4_intr_disable(struct adapter *adapter)
2690 {
2691 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2692 
2693 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2694 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2695 }
2696 
2697 /**
2698  *	t4_intr_clear - clear all interrupts
2699  *	@adapter: the adapter whose interrupts should be cleared
2700  *
2701  *	Clears all interrupts.  The caller must be a PCI function managing
2702  *	global interrupts.
2703  */
2704 void t4_intr_clear(struct adapter *adapter)
2705 {
2706 	static const unsigned int cause_reg[] = {
2707 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2708 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2709 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
2710 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2711 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2712 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2713 		A_TP_INT_CAUSE,
2714 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2715 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2716 		A_MPS_RX_PERR_INT_CAUSE,
2717 		A_CPL_INTR_CAUSE,
2718 		MYPF_REG(A_PL_PF_INT_CAUSE),
2719 		A_PL_PL_INT_CAUSE,
2720 		A_LE_DB_INT_CAUSE,
2721 	};
2722 
2723 	unsigned int i;
2724 
2725 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2726 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2727 
2728 	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2729 				A_MC_P_INT_CAUSE, 0xffffffff);
2730 
2731 	if (is_t4(adapter)) {
2732 		t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2733 				0xffffffff);
2734 		t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2735 				0xffffffff);
2736 	} else
2737 		t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
2738 
2739 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2740 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2741 }
2742 
2743 /**
2744  *	hash_mac_addr - return the hash value of a MAC address
2745  *	@addr: the 48-bit Ethernet MAC address
2746  *
2747  *	Hashes a MAC address according to the hash function used by HW inexact
2748  *	(hash) address matching.
2749  */
2750 static int hash_mac_addr(const u8 *addr)
2751 {
2752 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2753 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2754 	a ^= b;
2755 	a ^= (a >> 12);
2756 	a ^= (a >> 6);
2757 	return a & 0x3f;
2758 }
2759 
2760 /**
2761  *	t4_config_rss_range - configure a portion of the RSS mapping table
2762  *	@adapter: the adapter
2763  *	@mbox: mbox to use for the FW command
2764  *	@viid: virtual interface whose RSS subtable is to be written
2765  *	@start: start entry in the table to write
2766  *	@n: how many table entries to write
2767  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2768  *	@nrspq: number of values in @rspq
2769  *
2770  *	Programs the selected part of the VI's RSS mapping table with the
2771  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2772  *	until the full table range is populated.
2773  *
2774  *	The caller must ensure the values in @rspq are in the range allowed for
2775  *	@viid.
2776  */
2777 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2778 			int start, int n, const u16 *rspq, unsigned int nrspq)
2779 {
2780 	int ret;
2781 	const u16 *rsp = rspq;
2782 	const u16 *rsp_end = rspq + nrspq;
2783 	struct fw_rss_ind_tbl_cmd cmd;
2784 
2785 	memset(&cmd, 0, sizeof(cmd));
2786 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2787 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2788 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2789 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2790 
2791 
2792 	/*
2793 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2794 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2795 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2796 	 * reserved.
2797 	 */
2798 	while (n > 0) {
2799 		int nq = min(n, 32);
2800 		int nq_packed = 0;
2801 		__be32 *qp = &cmd.iq0_to_iq2;
2802 
2803 		/*
2804 		 * Set up the firmware RSS command header to send the next
2805 		 * "nq" Ingress Queue IDs to the firmware.
2806 		 */
2807 		cmd.niqid = htons(nq);
2808 		cmd.startidx = htons(start);
2809 
2810 		/*
2811 		 * "nq" more done for the start of the next loop.
2812 		 */
2813 		start += nq;
2814 		n -= nq;
2815 
2816 		/*
2817 		 * While there are still Ingress Queue IDs to stuff into the
2818 		 * current firmware RSS command, retrieve them from the
2819 		 * Ingress Queue ID array and insert them into the command.
2820 		 */
2821 		while (nq > 0) {
2822 			/*
2823 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2824 			 * around the Ingress Queue ID array if necessary) and
2825 			 * insert them into the firmware RSS command at the
2826 			 * current 3-tuple position within the commad.
2827 			 */
2828 			u16 qbuf[3];
2829 			u16 *qbp = qbuf;
2830 			int nqbuf = min(3, nq);
2831 
2832 			nq -= nqbuf;
2833 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2834 			while (nqbuf && nq_packed < 32) {
2835 				nqbuf--;
2836 				nq_packed++;
2837 				*qbp++ = *rsp++;
2838 				if (rsp >= rsp_end)
2839 					rsp = rspq;
2840 			}
2841 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2842 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2843 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2844 		}
2845 
2846 		/*
2847 		 * Send this portion of the RRS table update to the firmware;
2848 		 * bail out on any errors.
2849 		 */
2850 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2851 		if (ret)
2852 			return ret;
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 /**
2859  *	t4_config_glbl_rss - configure the global RSS mode
2860  *	@adapter: the adapter
2861  *	@mbox: mbox to use for the FW command
2862  *	@mode: global RSS mode
2863  *	@flags: mode-specific flags
2864  *
2865  *	Sets the global RSS mode.
2866  */
2867 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2868 		       unsigned int flags)
2869 {
2870 	struct fw_rss_glb_config_cmd c;
2871 
2872 	memset(&c, 0, sizeof(c));
2873 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2874 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2875 	c.retval_len16 = htonl(FW_LEN16(c));
2876 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2877 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2878 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2879 		c.u.basicvirtual.mode_pkd =
2880 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2881 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2882 	} else
2883 		return -EINVAL;
2884 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2885 }
2886 
2887 /**
2888  *	t4_config_vi_rss - configure per VI RSS settings
2889  *	@adapter: the adapter
2890  *	@mbox: mbox to use for the FW command
2891  *	@viid: the VI id
2892  *	@flags: RSS flags
2893  *	@defq: id of the default RSS queue for the VI.
2894  *
2895  *	Configures VI-specific RSS properties.
2896  */
2897 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2898 		     unsigned int flags, unsigned int defq)
2899 {
2900 	struct fw_rss_vi_config_cmd c;
2901 
2902 	memset(&c, 0, sizeof(c));
2903 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2904 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2905 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2906 	c.retval_len16 = htonl(FW_LEN16(c));
2907 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2908 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2909 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2910 }
2911 
2912 /* Read an RSS table row */
2913 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2914 {
2915 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2916 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2917 				   5, 0, val);
2918 }
2919 
2920 /**
2921  *	t4_read_rss - read the contents of the RSS mapping table
2922  *	@adapter: the adapter
2923  *	@map: holds the contents of the RSS mapping table
2924  *
2925  *	Reads the contents of the RSS hash->queue mapping table.
2926  */
2927 int t4_read_rss(struct adapter *adapter, u16 *map)
2928 {
2929 	u32 val;
2930 	int i, ret;
2931 
2932 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2933 		ret = rd_rss_row(adapter, i, &val);
2934 		if (ret)
2935 			return ret;
2936 		*map++ = G_LKPTBLQUEUE0(val);
2937 		*map++ = G_LKPTBLQUEUE1(val);
2938 	}
2939 	return 0;
2940 }
2941 
2942 /**
2943  *	t4_read_rss_key - read the global RSS key
2944  *	@adap: the adapter
2945  *	@key: 10-entry array holding the 320-bit RSS key
2946  *
2947  *	Reads the global 320-bit RSS key.
2948  */
2949 void t4_read_rss_key(struct adapter *adap, u32 *key)
2950 {
2951 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2952 			 A_TP_RSS_SECRET_KEY0);
2953 }
2954 
2955 /**
2956  *	t4_write_rss_key - program one of the RSS keys
2957  *	@adap: the adapter
2958  *	@key: 10-entry array holding the 320-bit RSS key
2959  *	@idx: which RSS key to write
2960  *
2961  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2962  *	0..15 the corresponding entry in the RSS key table is written,
2963  *	otherwise the global RSS key is written.
2964  */
2965 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2966 {
2967 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2968 			  A_TP_RSS_SECRET_KEY0);
2969 	if (idx >= 0 && idx < 16)
2970 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2971 			     V_KEYWRADDR(idx) | F_KEYWREN);
2972 }
2973 
2974 /**
2975  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2976  *	@adapter: the adapter
2977  *	@index: the entry in the PF RSS table to read
2978  *	@valp: where to store the returned value
2979  *
2980  *	Reads the PF RSS Configuration Table at the specified index and returns
2981  *	the value found there.
2982  */
2983 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2984 {
2985 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2986 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2987 }
2988 
2989 /**
2990  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2991  *	@adapter: the adapter
2992  *	@index: the entry in the VF RSS table to read
2993  *	@val: the value to store
2994  *
2995  *	Writes the PF RSS Configuration Table at the specified index with the
2996  *	specified value.
2997  */
2998 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2999 {
3000 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3001 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
3002 }
3003 
3004 /**
3005  *	t4_read_rss_vf_config - read VF RSS Configuration Table
3006  *	@adapter: the adapter
3007  *	@index: the entry in the VF RSS table to read
3008  *	@vfl: where to store the returned VFL
3009  *	@vfh: where to store the returned VFH
3010  *
3011  *	Reads the VF RSS Configuration Table at the specified index and returns
3012  *	the (VFL, VFH) values found there.
3013  */
3014 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3015 			   u32 *vfl, u32 *vfh)
3016 {
3017 	u32 vrt;
3018 
3019 	/*
3020 	 * Request that the index'th VF Table values be read into VFL/VFH.
3021 	 */
3022 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3023 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
3024 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
3025 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3026 
3027 	/*
3028 	 * Grab the VFL/VFH values ...
3029 	 */
3030 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3031 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
3032 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3033 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
3034 }
3035 
3036 /**
3037  *	t4_write_rss_vf_config - write VF RSS Configuration Table
3038  *
3039  *	@adapter: the adapter
3040  *	@index: the entry in the VF RSS table to write
3041  *	@vfl: the VFL to store
3042  *	@vfh: the VFH to store
3043  *
3044  *	Writes the VF RSS Configuration Table at the specified index with the
3045  *	specified (VFL, VFH) values.
3046  */
3047 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3048 			    u32 vfl, u32 vfh)
3049 {
3050 	u32 vrt;
3051 
3052 	/*
3053 	 * Load up VFL/VFH with the values to be written ...
3054 	 */
3055 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3056 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
3057 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3058 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
3059 
3060 	/*
3061 	 * Write the VFL/VFH into the VF Table at index'th location.
3062 	 */
3063 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3064 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3065 	vrt |= V_VFWRADDR(index) | F_VFWREN;
3066 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3067 }
3068 
3069 /**
3070  *	t4_read_rss_pf_map - read PF RSS Map
3071  *	@adapter: the adapter
3072  *
3073  *	Reads the PF RSS Map register and returns its value.
3074  */
3075 u32 t4_read_rss_pf_map(struct adapter *adapter)
3076 {
3077 	u32 pfmap;
3078 
3079 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3080 			 &pfmap, 1, A_TP_RSS_PF_MAP);
3081 	return pfmap;
3082 }
3083 
3084 /**
3085  *	t4_write_rss_pf_map - write PF RSS Map
3086  *	@adapter: the adapter
3087  *	@pfmap: PF RSS Map value
3088  *
3089  *	Writes the specified value to the PF RSS Map register.
3090  */
3091 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3092 {
3093 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3094 			  &pfmap, 1, A_TP_RSS_PF_MAP);
3095 }
3096 
3097 /**
3098  *	t4_read_rss_pf_mask - read PF RSS Mask
3099  *	@adapter: the adapter
3100  *
3101  *	Reads the PF RSS Mask register and returns its value.
3102  */
3103 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3104 {
3105 	u32 pfmask;
3106 
3107 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3108 			 &pfmask, 1, A_TP_RSS_PF_MSK);
3109 	return pfmask;
3110 }
3111 
3112 /**
3113  *	t4_write_rss_pf_mask - write PF RSS Mask
3114  *	@adapter: the adapter
3115  *	@pfmask: PF RSS Mask value
3116  *
3117  *	Writes the specified value to the PF RSS Mask register.
3118  */
3119 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3120 {
3121 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3122 			  &pfmask, 1, A_TP_RSS_PF_MSK);
3123 }
3124 
3125 static void refresh_vlan_pri_map(struct adapter *adap)
3126 {
3127 
3128         t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3129                          &adap->params.tp.vlan_pri_map, 1,
3130                          A_TP_VLAN_PRI_MAP);
3131 
3132 	/*
3133 	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3134 	 * shift positions of several elements of the Compressed Filter Tuple
3135 	 * for this adapter which we need frequently ...
3136 	 */
3137 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3138 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3139 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3140 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
3141 
3142 	/*
3143 	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3144 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
3145 	 */
3146 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3147 		adap->params.tp.vnic_shift = -1;
3148 }
3149 
3150 /**
3151  *	t4_set_filter_mode - configure the optional components of filter tuples
3152  *	@adap: the adapter
3153  *	@mode_map: a bitmap selcting which optional filter components to enable
3154  *
3155  *	Sets the filter mode by selecting the optional components to enable
3156  *	in filter tuples.  Returns 0 on success and a negative error if the
3157  *	requested mode needs more bits than are available for optional
3158  *	components.
3159  */
3160 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3161 {
3162 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3163 
3164 	int i, nbits = 0;
3165 
3166 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3167 		if (mode_map & (1 << i))
3168 			nbits += width[i];
3169 	if (nbits > FILTER_OPT_LEN)
3170 		return -EINVAL;
3171 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3172 			  A_TP_VLAN_PRI_MAP);
3173 	refresh_vlan_pri_map(adap);
3174 
3175 	return 0;
3176 }
3177 
3178 /**
3179  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3180  *	@adap: the adapter
3181  *	@v4: holds the TCP/IP counter values
3182  *	@v6: holds the TCP/IPv6 counter values
3183  *
3184  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3185  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3186  */
3187 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3188 			 struct tp_tcp_stats *v6)
3189 {
3190 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3191 
3192 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3193 #define STAT(x)     val[STAT_IDX(x)]
3194 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3195 
3196 	if (v4) {
3197 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3198 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3199 		v4->tcp_out_rsts = STAT(OUT_RST);
3200 		v4->tcp_in_segs  = STAT64(IN_SEG);
3201 		v4->tcp_out_segs = STAT64(OUT_SEG);
3202 		v4->tcp_retrans_segs = STAT64(RXT_SEG);
3203 	}
3204 	if (v6) {
3205 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3206 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3207 		v6->tcp_out_rsts = STAT(OUT_RST);
3208 		v6->tcp_in_segs  = STAT64(IN_SEG);
3209 		v6->tcp_out_segs = STAT64(OUT_SEG);
3210 		v6->tcp_retrans_segs = STAT64(RXT_SEG);
3211 	}
3212 #undef STAT64
3213 #undef STAT
3214 #undef STAT_IDX
3215 }
3216 
3217 /**
3218  *	t4_tp_get_err_stats - read TP's error MIB counters
3219  *	@adap: the adapter
3220  *	@st: holds the counter values
3221  *
3222  *	Returns the values of TP's error counters.
3223  */
3224 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3225 {
3226 	int nchan = NCHAN;
3227 
3228 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3229 			st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
3230 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3231 			st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
3232 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3233 			st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
3234 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3235 			st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
3236 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3237 			st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
3238 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3239 			st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
3240 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3241 			st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
3242 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3243 			st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
3244 
3245 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
3246 			 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
3247 }
3248 
3249 /**
3250  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3251  *	@adap: the adapter
3252  *	@st: holds the counter values
3253  *
3254  *	Returns the values of TP's proxy counters.
3255  */
3256 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3257 {
3258 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3259 			 4, A_TP_MIB_TNL_LPBK_0);
3260 }
3261 
3262 /**
3263  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3264  *	@adap: the adapter
3265  *	@st: holds the counter values
3266  *
3267  *	Returns the values of TP's CPL counters.
3268  */
3269 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3270 {
3271 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3272 			 8, A_TP_MIB_CPL_IN_REQ_0);
3273 }
3274 
3275 /**
3276  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3277  *	@adap: the adapter
3278  *	@st: holds the counter values
3279  *
3280  *	Returns the values of TP's RDMA counters.
3281  */
3282 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3283 {
3284 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3285 			 2, A_TP_MIB_RQE_DFR_PKT);
3286 }
3287 
3288 /**
3289  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3290  *	@adap: the adapter
3291  *	@idx: the port index
3292  *	@st: holds the counter values
3293  *
3294  *	Returns the values of TP's FCoE counters for the selected port.
3295  */
3296 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3297 		       struct tp_fcoe_stats *st)
3298 {
3299 	u32 val[2];
3300 
3301 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
3302 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3303 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
3304 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3305 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3306 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3307 	st->octets_ddp = ((u64)val[0] << 32) | val[1];
3308 }
3309 
3310 /**
3311  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3312  *	@adap: the adapter
3313  *	@st: holds the counter values
3314  *
3315  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3316  */
3317 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3318 {
3319 	u32 val[4];
3320 
3321 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3322 			 A_TP_MIB_USM_PKTS);
3323 	st->frames = val[0];
3324 	st->drops = val[1];
3325 	st->octets = ((u64)val[2] << 32) | val[3];
3326 }
3327 
3328 /**
3329  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3330  *	@adap: the adapter
3331  *	@mtus: where to store the MTU values
3332  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3333  *
3334  *	Reads the HW path MTU table.
3335  */
3336 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3337 {
3338 	u32 v;
3339 	int i;
3340 
3341 	for (i = 0; i < NMTUS; ++i) {
3342 		t4_write_reg(adap, A_TP_MTU_TABLE,
3343 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3344 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3345 		mtus[i] = G_MTUVALUE(v);
3346 		if (mtu_log)
3347 			mtu_log[i] = G_MTUWIDTH(v);
3348 	}
3349 }
3350 
3351 /**
3352  *	t4_read_cong_tbl - reads the congestion control table
3353  *	@adap: the adapter
3354  *	@incr: where to store the alpha values
3355  *
3356  *	Reads the additive increments programmed into the HW congestion
3357  *	control table.
3358  */
3359 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3360 {
3361 	unsigned int mtu, w;
3362 
3363 	for (mtu = 0; mtu < NMTUS; ++mtu)
3364 		for (w = 0; w < NCCTRL_WIN; ++w) {
3365 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3366 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3367 			incr[mtu][w] = (u16)t4_read_reg(adap,
3368 						A_TP_CCTRL_TABLE) & 0x1fff;
3369 		}
3370 }
3371 
3372 /**
3373  *	t4_read_pace_tbl - read the pace table
3374  *	@adap: the adapter
3375  *	@pace_vals: holds the returned values
3376  *
3377  *	Returns the values of TP's pace table in microseconds.
3378  */
3379 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3380 {
3381 	unsigned int i, v;
3382 
3383 	for (i = 0; i < NTX_SCHED; i++) {
3384 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3385 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3386 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3387 	}
3388 }
3389 
3390 /**
3391  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3392  *	@adap: the adapter
3393  *	@addr: the indirect TP register address
3394  *	@mask: specifies the field within the register to modify
3395  *	@val: new value for the field
3396  *
3397  *	Sets a field of an indirect TP register to the given value.
3398  */
3399 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3400 			    unsigned int mask, unsigned int val)
3401 {
3402 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3403 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3404 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3405 }
3406 
3407 /**
3408  *	init_cong_ctrl - initialize congestion control parameters
3409  *	@a: the alpha values for congestion control
3410  *	@b: the beta values for congestion control
3411  *
3412  *	Initialize the congestion control parameters.
3413  */
3414 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3415 {
3416 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3417 	a[9] = 2;
3418 	a[10] = 3;
3419 	a[11] = 4;
3420 	a[12] = 5;
3421 	a[13] = 6;
3422 	a[14] = 7;
3423 	a[15] = 8;
3424 	a[16] = 9;
3425 	a[17] = 10;
3426 	a[18] = 14;
3427 	a[19] = 17;
3428 	a[20] = 21;
3429 	a[21] = 25;
3430 	a[22] = 30;
3431 	a[23] = 35;
3432 	a[24] = 45;
3433 	a[25] = 60;
3434 	a[26] = 80;
3435 	a[27] = 100;
3436 	a[28] = 200;
3437 	a[29] = 300;
3438 	a[30] = 400;
3439 	a[31] = 500;
3440 
3441 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3442 	b[9] = b[10] = 1;
3443 	b[11] = b[12] = 2;
3444 	b[13] = b[14] = b[15] = b[16] = 3;
3445 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3446 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3447 	b[28] = b[29] = 6;
3448 	b[30] = b[31] = 7;
3449 }
3450 
3451 /* The minimum additive increment value for the congestion control table */
3452 #define CC_MIN_INCR 2U
3453 
3454 /**
3455  *	t4_load_mtus - write the MTU and congestion control HW tables
3456  *	@adap: the adapter
3457  *	@mtus: the values for the MTU table
3458  *	@alpha: the values for the congestion control alpha parameter
3459  *	@beta: the values for the congestion control beta parameter
3460  *
3461  *	Write the HW MTU table with the supplied MTUs and the high-speed
3462  *	congestion control table with the supplied alpha, beta, and MTUs.
3463  *	We write the two tables together because the additive increments
3464  *	depend on the MTUs.
3465  */
3466 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3467 		  const unsigned short *alpha, const unsigned short *beta)
3468 {
3469 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3470 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3471 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3472 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3473 	};
3474 
3475 	unsigned int i, w;
3476 
3477 	for (i = 0; i < NMTUS; ++i) {
3478 		unsigned int mtu = mtus[i];
3479 		unsigned int log2 = fls(mtu);
3480 
3481 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3482 			log2--;
3483 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3484 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3485 
3486 		for (w = 0; w < NCCTRL_WIN; ++w) {
3487 			unsigned int inc;
3488 
3489 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3490 				  CC_MIN_INCR);
3491 
3492 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3493 				     (w << 16) | (beta[w] << 13) | inc);
3494 		}
3495 	}
3496 }
3497 
3498 /**
3499  *	t4_set_pace_tbl - set the pace table
3500  *	@adap: the adapter
3501  *	@pace_vals: the pace values in microseconds
3502  *	@start: index of the first entry in the HW pace table to set
3503  *	@n: how many entries to set
3504  *
3505  *	Sets (a subset of the) HW pace table.
3506  */
3507 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3508 		     unsigned int start, unsigned int n)
3509 {
3510 	unsigned int vals[NTX_SCHED], i;
3511 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3512 
3513 	if (n > NTX_SCHED)
3514 	    return -ERANGE;
3515 
3516 	/* convert values from us to dack ticks, rounding to closest value */
3517 	for (i = 0; i < n; i++, pace_vals++) {
3518 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3519 		if (vals[i] > 0x7ff)
3520 			return -ERANGE;
3521 		if (*pace_vals && vals[i] == 0)
3522 			return -ERANGE;
3523 	}
3524 	for (i = 0; i < n; i++, start++)
3525 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3526 	return 0;
3527 }
3528 
3529 /**
3530  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3531  *	@adap: the adapter
3532  *	@kbps: target rate in Kbps
3533  *	@sched: the scheduler index
3534  *
3535  *	Configure a Tx HW scheduler for the target rate.
3536  */
3537 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3538 {
3539 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3540 	unsigned int clk = adap->params.vpd.cclk * 1000;
3541 	unsigned int selected_cpt = 0, selected_bpt = 0;
3542 
3543 	if (kbps > 0) {
3544 		kbps *= 125;     /* -> bytes */
3545 		for (cpt = 1; cpt <= 255; cpt++) {
3546 			tps = clk / cpt;
3547 			bpt = (kbps + tps / 2) / tps;
3548 			if (bpt > 0 && bpt <= 255) {
3549 				v = bpt * tps;
3550 				delta = v >= kbps ? v - kbps : kbps - v;
3551 				if (delta < mindelta) {
3552 					mindelta = delta;
3553 					selected_cpt = cpt;
3554 					selected_bpt = bpt;
3555 				}
3556 			} else if (selected_cpt)
3557 				break;
3558 		}
3559 		if (!selected_cpt)
3560 			return -EINVAL;
3561 	}
3562 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3563 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3564 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3565 	if (sched & 1)
3566 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3567 	else
3568 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3569 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3570 	return 0;
3571 }
3572 
3573 /**
3574  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3575  *	@adap: the adapter
3576  *	@sched: the scheduler index
3577  *	@ipg: the interpacket delay in tenths of nanoseconds
3578  *
3579  *	Set the interpacket delay for a HW packet rate scheduler.
3580  */
3581 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3582 {
3583 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3584 
3585 	/* convert ipg to nearest number of core clocks */
3586 	ipg *= core_ticks_per_usec(adap);
3587 	ipg = (ipg + 5000) / 10000;
3588 	if (ipg > M_TXTIMERSEPQ0)
3589 		return -EINVAL;
3590 
3591 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3592 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3593 	if (sched & 1)
3594 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3595 	else
3596 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3597 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3598 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3599 	return 0;
3600 }
3601 
3602 /**
3603  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3604  *	@adap: the adapter
3605  *	@sched: the scheduler index
3606  *	@kbps: the byte rate in Kbps
3607  *	@ipg: the interpacket delay in tenths of nanoseconds
3608  *
3609  *	Return the current configuration of a HW Tx scheduler.
3610  */
3611 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3612 		     unsigned int *ipg)
3613 {
3614 	unsigned int v, addr, bpt, cpt;
3615 
3616 	if (kbps) {
3617 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3618 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3619 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3620 		if (sched & 1)
3621 			v >>= 16;
3622 		bpt = (v >> 8) & 0xff;
3623 		cpt = v & 0xff;
3624 		if (!cpt)
3625 			*kbps = 0;        /* scheduler disabled */
3626 		else {
3627 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3628 			*kbps = (v * bpt) / 125;
3629 		}
3630 	}
3631 	if (ipg) {
3632 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3633 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3634 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3635 		if (sched & 1)
3636 			v >>= 16;
3637 		v &= 0xffff;
3638 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3639 	}
3640 }
3641 
3642 /*
3643  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3644  * clocks.  The formula is
3645  *
3646  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3647  *
3648  * which is equivalent to
3649  *
3650  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3651  */
3652 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3653 {
3654 	u64 v = bytes256 * adap->params.vpd.cclk;
3655 
3656 	return v * 62 + v / 2;
3657 }
3658 
3659 /**
3660  *	t4_get_chan_txrate - get the current per channel Tx rates
3661  *	@adap: the adapter
3662  *	@nic_rate: rates for NIC traffic
3663  *	@ofld_rate: rates for offloaded traffic
3664  *
3665  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3666  *	for each channel.
3667  */
3668 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3669 {
3670 	u32 v;
3671 
3672 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3673 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3674 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3675 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3676 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3677 
3678 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3679 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3680 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3681 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3682 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3683 }
3684 
3685 /**
3686  *	t4_set_trace_filter - configure one of the tracing filters
3687  *	@adap: the adapter
3688  *	@tp: the desired trace filter parameters
3689  *	@idx: which filter to configure
3690  *	@enable: whether to enable or disable the filter
3691  *
3692  *	Configures one of the tracing filters available in HW.  If @tp is %NULL
3693  *	it indicates that the filter is already written in the register and it
3694  *	just needs to be enabled or disabled.
3695  */
3696 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3697     int idx, int enable)
3698 {
3699 	int i, ofst = idx * 4;
3700 	u32 data_reg, mask_reg, cfg;
3701 	u32 multitrc = F_TRCMULTIFILTER;
3702 	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
3703 
3704 	if (idx < 0 || idx >= NTRACE)
3705 		return -EINVAL;
3706 
3707 	if (tp == NULL || !enable) {
3708 		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
3709 		    enable ? en : 0);
3710 		return 0;
3711 	}
3712 
3713 	/*
3714 	 * TODO - After T4 data book is updated, specify the exact
3715 	 * section below.
3716 	 *
3717 	 * See T4 data book - MPS section for a complete description
3718 	 * of the below if..else handling of A_MPS_TRC_CFG register
3719 	 * value.
3720 	 */
3721 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3722 	if (cfg & F_TRCMULTIFILTER) {
3723 		/*
3724 		 * If multiple tracers are enabled, then maximum
3725 		 * capture size is 2.5KB (FIFO size of a single channel)
3726 		 * minus 2 flits for CPL_TRACE_PKT header.
3727 		 */
3728 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3729 			return -EINVAL;
3730 	} else {
3731 		/*
3732 		 * If multiple tracers are disabled, to avoid deadlocks
3733 		 * maximum packet capture size of 9600 bytes is recommended.
3734 		 * Also in this mode, only trace0 can be enabled and running.
3735 		 */
3736 		multitrc = 0;
3737 		if (tp->snap_len > 9600 || idx)
3738 			return -EINVAL;
3739 	}
3740 
3741 	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
3742 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
3743 	    tp->min_len > M_TFMINPKTSIZE)
3744 		return -EINVAL;
3745 
3746 	/* stop the tracer we'll be changing */
3747 	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
3748 
3749 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3750 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3751 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3752 
3753 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3754 		t4_write_reg(adap, data_reg, tp->data[i]);
3755 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3756 	}
3757 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3758 		     V_TFCAPTUREMAX(tp->snap_len) |
3759 		     V_TFMINPKTSIZE(tp->min_len));
3760 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3761 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
3762 		     (is_t4(adap) ?
3763 		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
3764 		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
3765 
3766 	return 0;
3767 }
3768 
3769 /**
3770  *	t4_get_trace_filter - query one of the tracing filters
3771  *	@adap: the adapter
3772  *	@tp: the current trace filter parameters
3773  *	@idx: which trace filter to query
3774  *	@enabled: non-zero if the filter is enabled
3775  *
3776  *	Returns the current settings of one of the HW tracing filters.
3777  */
3778 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3779 			 int *enabled)
3780 {
3781 	u32 ctla, ctlb;
3782 	int i, ofst = idx * 4;
3783 	u32 data_reg, mask_reg;
3784 
3785 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3786 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3787 
3788 	if (is_t4(adap)) {
3789 		*enabled = !!(ctla & F_TFEN);
3790 		tp->port =  G_TFPORT(ctla);
3791 		tp->invert = !!(ctla & F_TFINVERTMATCH);
3792 	} else {
3793 		*enabled = !!(ctla & F_T5_TFEN);
3794 		tp->port = G_T5_TFPORT(ctla);
3795 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
3796 	}
3797 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3798 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3799 	tp->skip_ofst = G_TFOFFSET(ctla);
3800 	tp->skip_len = G_TFLENGTH(ctla);
3801 
3802 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3803 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3804 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3805 
3806 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3807 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3808 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3809 	}
3810 }
3811 
3812 /**
3813  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3814  *	@adap: the adapter
3815  *	@cnt: where to store the count statistics
3816  *	@cycles: where to store the cycle statistics
3817  *
3818  *	Returns performance statistics from PMTX.
3819  */
3820 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3821 {
3822 	int i;
3823 	u32 data[2];
3824 
3825 	for (i = 0; i < PM_NSTATS; i++) {
3826 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3827 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3828 		if (is_t4(adap))
3829 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3830 		else {
3831 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3832 					 A_PM_TX_DBG_DATA, data, 2,
3833 					 A_PM_TX_DBG_STAT_MSB);
3834 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3835 		}
3836 	}
3837 }
3838 
3839 /**
3840  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3841  *	@adap: the adapter
3842  *	@cnt: where to store the count statistics
3843  *	@cycles: where to store the cycle statistics
3844  *
3845  *	Returns performance statistics from PMRX.
3846  */
3847 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3848 {
3849 	int i;
3850 	u32 data[2];
3851 
3852 	for (i = 0; i < PM_NSTATS; i++) {
3853 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3854 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3855 		if (is_t4(adap))
3856 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3857 		else {
3858 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3859 					 A_PM_RX_DBG_DATA, data, 2,
3860 					 A_PM_RX_DBG_STAT_MSB);
3861 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3862 		}
3863 	}
3864 }
3865 
3866 /**
3867  *	get_mps_bg_map - return the buffer groups associated with a port
3868  *	@adap: the adapter
3869  *	@idx: the port index
3870  *
3871  *	Returns a bitmap indicating which MPS buffer groups are associated
3872  *	with the given port.  Bit i is set if buffer group i is used by the
3873  *	port.
3874  */
3875 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3876 {
3877 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3878 
3879 	if (n == 0)
3880 		return idx == 0 ? 0xf : 0;
3881 	if (n == 1)
3882 		return idx < 2 ? (3 << (2 * idx)) : 0;
3883 	return 1 << idx;
3884 }
3885 
3886 /**
3887  *	t4_get_port_type_description - return Port Type string description
3888  *	@port_type: firmware Port Type enumeration
3889  */
3890 const char *t4_get_port_type_description(enum fw_port_type port_type)
3891 {
3892 	static const char *port_type_description[] = {
3893 		"Fiber_XFI",
3894 		"Fiber_XAUI",
3895 		"BT_SGMII",
3896 		"BT_XFI",
3897 		"BT_XAUI",
3898 		"KX4",
3899 		"CX4",
3900 		"KX",
3901 		"KR",
3902 		"SFP",
3903 		"BP_AP",
3904 		"BP4_AP",
3905 		"QSFP_10G",
3906 		"",
3907 		"QSFP",
3908 		"BP40_BA",
3909 	};
3910 
3911 	if (port_type < ARRAY_SIZE(port_type_description))
3912 		return port_type_description[port_type];
3913 	return "UNKNOWN";
3914 }
3915 
3916 /**
3917  *      t4_get_port_stats_offset - collect port stats relative to a previous
3918  *                                 snapshot
3919  *      @adap: The adapter
3920  *      @idx: The port
3921  *      @stats: Current stats to fill
3922  *      @offset: Previous stats snapshot
3923  */
3924 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3925 		struct port_stats *stats,
3926 		struct port_stats *offset)
3927 {
3928 	u64 *s, *o;
3929 	int i;
3930 
3931 	t4_get_port_stats(adap, idx, stats);
3932 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3933 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3934 			i++, s++, o++)
3935 		*s -= *o;
3936 }
3937 
3938 /**
3939  *	t4_get_port_stats - collect port statistics
3940  *	@adap: the adapter
3941  *	@idx: the port index
3942  *	@p: the stats structure to fill
3943  *
3944  *	Collect statistics related to the given port from HW.
3945  */
3946 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3947 {
3948 	u32 bgmap = get_mps_bg_map(adap, idx);
3949 
3950 #define GET_STAT(name) \
3951 	t4_read_reg64(adap, \
3952 	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3953 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3954 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3955 
3956 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3957 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3958 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3959 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3960 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3961 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3962 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3963 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3964 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3965 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3966 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3967 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3968 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3969 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3970 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3971 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3972 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3973 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3974 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3975 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3976 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3977 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3978 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3979 
3980 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3981 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3982 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3983 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3984 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3985 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3986 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3987 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3988 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3989 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3990 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3991 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3992 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3993 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3994 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3995 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3996 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3997 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3998 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3999 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
4000 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
4001 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
4002 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
4003 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
4004 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
4005 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
4006 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
4007 
4008 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
4009 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
4010 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
4011 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
4012 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
4013 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
4014 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
4015 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
4016 
4017 #undef GET_STAT
4018 #undef GET_STAT_COM
4019 }
4020 
4021 /**
4022  *	t4_clr_port_stats - clear port statistics
4023  *	@adap: the adapter
4024  *	@idx: the port index
4025  *
4026  *	Clear HW statistics for the given port.
4027  */
4028 void t4_clr_port_stats(struct adapter *adap, int idx)
4029 {
4030 	unsigned int i;
4031 	u32 bgmap = get_mps_bg_map(adap, idx);
4032 	u32 port_base_addr;
4033 
4034 	if (is_t4(adap))
4035 		port_base_addr = PORT_BASE(idx);
4036 	else
4037 		port_base_addr = T5_PORT_BASE(idx);
4038 
4039 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
4040 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
4041 		t4_write_reg(adap, port_base_addr + i, 0);
4042 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
4043 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
4044 		t4_write_reg(adap, port_base_addr + i, 0);
4045 	for (i = 0; i < 4; i++)
4046 		if (bgmap & (1 << i)) {
4047 			t4_write_reg(adap,
4048 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
4049 			t4_write_reg(adap,
4050 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
4051 		}
4052 }
4053 
4054 /**
4055  *	t4_get_lb_stats - collect loopback port statistics
4056  *	@adap: the adapter
4057  *	@idx: the loopback port index
4058  *	@p: the stats structure to fill
4059  *
4060  *	Return HW statistics for the given loopback port.
4061  */
4062 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
4063 {
4064 	u32 bgmap = get_mps_bg_map(adap, idx);
4065 
4066 #define GET_STAT(name) \
4067 	t4_read_reg64(adap, \
4068 	(is_t4(adap) ? \
4069 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
4070 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
4071 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
4072 
4073 	p->octets           = GET_STAT(BYTES);
4074 	p->frames           = GET_STAT(FRAMES);
4075 	p->bcast_frames     = GET_STAT(BCAST);
4076 	p->mcast_frames     = GET_STAT(MCAST);
4077 	p->ucast_frames     = GET_STAT(UCAST);
4078 	p->error_frames     = GET_STAT(ERROR);
4079 
4080 	p->frames_64        = GET_STAT(64B);
4081 	p->frames_65_127    = GET_STAT(65B_127B);
4082 	p->frames_128_255   = GET_STAT(128B_255B);
4083 	p->frames_256_511   = GET_STAT(256B_511B);
4084 	p->frames_512_1023  = GET_STAT(512B_1023B);
4085 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
4086 	p->frames_1519_max  = GET_STAT(1519B_MAX);
4087 	p->drop             = GET_STAT(DROP_FRAMES);
4088 
4089 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
4090 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
4091 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4092 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4093 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4094 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4095 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4096 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4097 
4098 #undef GET_STAT
4099 #undef GET_STAT_COM
4100 }
4101 
4102 /**
4103  *	t4_wol_magic_enable - enable/disable magic packet WoL
4104  *	@adap: the adapter
4105  *	@port: the physical port index
4106  *	@addr: MAC address expected in magic packets, %NULL to disable
4107  *
4108  *	Enables/disables magic packet wake-on-LAN for the selected port.
4109  */
4110 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4111 			 const u8 *addr)
4112 {
4113 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4114 
4115 	if (is_t4(adap)) {
4116 		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4117 		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4118 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4119 	} else {
4120 		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4121 		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4122 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4123 	}
4124 
4125 	if (addr) {
4126 		t4_write_reg(adap, mag_id_reg_l,
4127 			     (addr[2] << 24) | (addr[3] << 16) |
4128 			     (addr[4] << 8) | addr[5]);
4129 		t4_write_reg(adap, mag_id_reg_h,
4130 			     (addr[0] << 8) | addr[1]);
4131 	}
4132 	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4133 			 V_MAGICEN(addr != NULL));
4134 }
4135 
4136 /**
4137  *	t4_wol_pat_enable - enable/disable pattern-based WoL
4138  *	@adap: the adapter
4139  *	@port: the physical port index
4140  *	@map: bitmap of which HW pattern filters to set
4141  *	@mask0: byte mask for bytes 0-63 of a packet
4142  *	@mask1: byte mask for bytes 64-127 of a packet
4143  *	@crc: Ethernet CRC for selected bytes
4144  *	@enable: enable/disable switch
4145  *
4146  *	Sets the pattern filters indicated in @map to mask out the bytes
4147  *	specified in @mask0/@mask1 in received packets and compare the CRC of
4148  *	the resulting packet against @crc.  If @enable is %true pattern-based
4149  *	WoL is enabled, otherwise disabled.
4150  */
4151 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4152 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4153 {
4154 	int i;
4155 	u32 port_cfg_reg;
4156 
4157 	if (is_t4(adap))
4158 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4159 	else
4160 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4161 
4162 	if (!enable) {
4163 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4164 		return 0;
4165 	}
4166 	if (map > 0xff)
4167 		return -EINVAL;
4168 
4169 #define EPIO_REG(name) \
4170 	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4171 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4172 
4173 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4174 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4175 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4176 
4177 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4178 		if (!(map & 1))
4179 			continue;
4180 
4181 		/* write byte masks */
4182 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4183 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4184 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4185 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4186 			return -ETIMEDOUT;
4187 
4188 		/* write CRC */
4189 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4190 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4191 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4192 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4193 			return -ETIMEDOUT;
4194 	}
4195 #undef EPIO_REG
4196 
4197 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4198 	return 0;
4199 }
4200 
4201 /**
4202  *	t4_mk_filtdelwr - create a delete filter WR
4203  *	@ftid: the filter ID
4204  *	@wr: the filter work request to populate
4205  *	@qid: ingress queue to receive the delete notification
4206  *
4207  *	Creates a filter work request to delete the supplied filter.  If @qid is
4208  *	negative the delete notification is suppressed.
4209  */
4210 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4211 {
4212 	memset(wr, 0, sizeof(*wr));
4213 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4214 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4215 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4216 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4217 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4218 	if (qid >= 0)
4219 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4220 }
4221 
4222 #define INIT_CMD(var, cmd, rd_wr) do { \
4223 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4224 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4225 	(var).retval_len16 = htonl(FW_LEN16(var)); \
4226 } while (0)
4227 
4228 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4229 {
4230 	struct fw_ldst_cmd c;
4231 
4232 	memset(&c, 0, sizeof(c));
4233 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4234 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4235 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4236 	c.u.addrval.addr = htonl(addr);
4237 	c.u.addrval.val = htonl(val);
4238 
4239 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4240 }
4241 
4242 /**
4243  *	t4_mdio_rd - read a PHY register through MDIO
4244  *	@adap: the adapter
4245  *	@mbox: mailbox to use for the FW command
4246  *	@phy_addr: the PHY address
4247  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4248  *	@reg: the register to read
4249  *	@valp: where to store the value
4250  *
4251  *	Issues a FW command through the given mailbox to read a PHY register.
4252  */
4253 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4254 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4255 {
4256 	int ret;
4257 	struct fw_ldst_cmd c;
4258 
4259 	memset(&c, 0, sizeof(c));
4260 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4261 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4262 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4263 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4264 				   V_FW_LDST_CMD_MMD(mmd));
4265 	c.u.mdio.raddr = htons(reg);
4266 
4267 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4268 	if (ret == 0)
4269 		*valp = ntohs(c.u.mdio.rval);
4270 	return ret;
4271 }
4272 
4273 /**
4274  *	t4_mdio_wr - write a PHY register through MDIO
4275  *	@adap: the adapter
4276  *	@mbox: mailbox to use for the FW command
4277  *	@phy_addr: the PHY address
4278  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4279  *	@reg: the register to write
4280  *	@valp: value to write
4281  *
4282  *	Issues a FW command through the given mailbox to write a PHY register.
4283  */
4284 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4285 	       unsigned int mmd, unsigned int reg, unsigned int val)
4286 {
4287 	struct fw_ldst_cmd c;
4288 
4289 	memset(&c, 0, sizeof(c));
4290 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4291 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4292 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4293 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4294 				   V_FW_LDST_CMD_MMD(mmd));
4295 	c.u.mdio.raddr = htons(reg);
4296 	c.u.mdio.rval = htons(val);
4297 
4298 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4299 }
4300 
4301 /**
4302  *	t4_i2c_rd - read I2C data from adapter
4303  *	@adap: the adapter
4304  *	@port: Port number if per-port device; <0 if not
4305  *	@devid: per-port device ID or absolute device ID
4306  *	@offset: byte offset into device I2C space
4307  *	@len: byte length of I2C space data
4308  *	@buf: buffer in which to return I2C data
4309  *
4310  *	Reads the I2C data from the indicated device and location.
4311  */
4312 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
4313 	      int port, unsigned int devid,
4314 	      unsigned int offset, unsigned int len,
4315 	      u8 *buf)
4316 {
4317 	struct fw_ldst_cmd ldst;
4318 	int ret;
4319 
4320 	if (port >= 4 ||
4321 	    devid >= 256 ||
4322 	    offset >= 256 ||
4323 	    len > sizeof ldst.u.i2c.data)
4324 		return -EINVAL;
4325 
4326 	memset(&ldst, 0, sizeof ldst);
4327 	ldst.op_to_addrspace =
4328 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4329 			    F_FW_CMD_REQUEST |
4330 			    F_FW_CMD_READ |
4331 			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4332 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4333 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4334 	ldst.u.i2c.did = devid;
4335 	ldst.u.i2c.boffset = offset;
4336 	ldst.u.i2c.blen = len;
4337 	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4338 	if (!ret)
4339 		memcpy(buf, ldst.u.i2c.data, len);
4340 	return ret;
4341 }
4342 
4343 /**
4344  *	t4_i2c_wr - write I2C data to adapter
4345  *	@adap: the adapter
4346  *	@port: Port number if per-port device; <0 if not
4347  *	@devid: per-port device ID or absolute device ID
4348  *	@offset: byte offset into device I2C space
4349  *	@len: byte length of I2C space data
4350  *	@buf: buffer containing new I2C data
4351  *
4352  *	Write the I2C data to the indicated device and location.
4353  */
4354 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
4355 	      int port, unsigned int devid,
4356 	      unsigned int offset, unsigned int len,
4357 	      u8 *buf)
4358 {
4359 	struct fw_ldst_cmd ldst;
4360 
4361 	if (port >= 4 ||
4362 	    devid >= 256 ||
4363 	    offset >= 256 ||
4364 	    len > sizeof ldst.u.i2c.data)
4365 		return -EINVAL;
4366 
4367 	memset(&ldst, 0, sizeof ldst);
4368 	ldst.op_to_addrspace =
4369 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4370 			    F_FW_CMD_REQUEST |
4371 			    F_FW_CMD_WRITE |
4372 			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4373 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4374 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4375 	ldst.u.i2c.did = devid;
4376 	ldst.u.i2c.boffset = offset;
4377 	ldst.u.i2c.blen = len;
4378 	memcpy(ldst.u.i2c.data, buf, len);
4379 	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4380 }
4381 
4382 /**
4383  *	t4_sge_ctxt_flush - flush the SGE context cache
4384  *	@adap: the adapter
4385  *	@mbox: mailbox to use for the FW command
4386  *
4387  *	Issues a FW command through the given mailbox to flush the
4388  *	SGE context cache.
4389  */
4390 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4391 {
4392 	int ret;
4393 	struct fw_ldst_cmd c;
4394 
4395 	memset(&c, 0, sizeof(c));
4396 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4397 			F_FW_CMD_READ |
4398 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4399 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4400 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4401 
4402 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4403 	return ret;
4404 }
4405 
4406 /**
4407  *	t4_sge_ctxt_rd - read an SGE context through FW
4408  *	@adap: the adapter
4409  *	@mbox: mailbox to use for the FW command
4410  *	@cid: the context id
4411  *	@ctype: the context type
4412  *	@data: where to store the context data
4413  *
4414  *	Issues a FW command through the given mailbox to read an SGE context.
4415  */
4416 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4417 		   enum ctxt_type ctype, u32 *data)
4418 {
4419 	int ret;
4420 	struct fw_ldst_cmd c;
4421 
4422 	if (ctype == CTXT_EGRESS)
4423 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4424 	else if (ctype == CTXT_INGRESS)
4425 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4426 	else if (ctype == CTXT_FLM)
4427 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4428 	else
4429 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4430 
4431 	memset(&c, 0, sizeof(c));
4432 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4433 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4434 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4435 	c.u.idctxt.physid = htonl(cid);
4436 
4437 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4438 	if (ret == 0) {
4439 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4440 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4441 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4442 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4443 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4444 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4445 	}
4446 	return ret;
4447 }
4448 
4449 /**
4450  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4451  *	@adap: the adapter
4452  *	@cid: the context id
4453  *	@ctype: the context type
4454  *	@data: where to store the context data
4455  *
4456  *	Reads an SGE context directly, bypassing FW.  This is only for
4457  *	debugging when FW is unavailable.
4458  */
4459 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4460 		      u32 *data)
4461 {
4462 	int i, ret;
4463 
4464 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4465 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4466 	if (!ret)
4467 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4468 			*data++ = t4_read_reg(adap, i);
4469 	return ret;
4470 }
4471 
4472 /**
4473  *	t4_fw_hello - establish communication with FW
4474  *	@adap: the adapter
4475  *	@mbox: mailbox to use for the FW command
4476  *	@evt_mbox: mailbox to receive async FW events
4477  *	@master: specifies the caller's willingness to be the device master
4478  *	@state: returns the current device state (if non-NULL)
4479  *
4480  *	Issues a command to establish communication with FW.  Returns either
4481  *	an error (negative integer) or the mailbox of the Master PF.
4482  */
4483 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4484 		enum dev_master master, enum dev_state *state)
4485 {
4486 	int ret;
4487 	struct fw_hello_cmd c;
4488 	u32 v;
4489 	unsigned int master_mbox;
4490 	int retries = FW_CMD_HELLO_RETRIES;
4491 
4492 retry:
4493 	memset(&c, 0, sizeof(c));
4494 	INIT_CMD(c, HELLO, WRITE);
4495 	c.err_to_clearinit = htonl(
4496 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4497 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4498 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4499 			M_FW_HELLO_CMD_MBMASTER) |
4500 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4501 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4502 		F_FW_HELLO_CMD_CLEARINIT);
4503 
4504 	/*
4505 	 * Issue the HELLO command to the firmware.  If it's not successful
4506 	 * but indicates that we got a "busy" or "timeout" condition, retry
4507 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4508 	 * retry limit, check to see if the firmware left us any error
4509 	 * information and report that if so ...
4510 	 */
4511 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4512 	if (ret != FW_SUCCESS) {
4513 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4514 			goto retry;
4515 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4516 			t4_report_fw_error(adap);
4517 		return ret;
4518 	}
4519 
4520 	v = ntohl(c.err_to_clearinit);
4521 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4522 	if (state) {
4523 		if (v & F_FW_HELLO_CMD_ERR)
4524 			*state = DEV_STATE_ERR;
4525 		else if (v & F_FW_HELLO_CMD_INIT)
4526 			*state = DEV_STATE_INIT;
4527 		else
4528 			*state = DEV_STATE_UNINIT;
4529 	}
4530 
4531 	/*
4532 	 * If we're not the Master PF then we need to wait around for the
4533 	 * Master PF Driver to finish setting up the adapter.
4534 	 *
4535 	 * Note that we also do this wait if we're a non-Master-capable PF and
4536 	 * there is no current Master PF; a Master PF may show up momentarily
4537 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4538 	 * OS loads lots of different drivers rapidly at the same time).  In
4539 	 * this case, the Master PF returned by the firmware will be
4540 	 * M_PCIE_FW_MASTER so the test below will work ...
4541 	 */
4542 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4543 	    master_mbox != mbox) {
4544 		int waiting = FW_CMD_HELLO_TIMEOUT;
4545 
4546 		/*
4547 		 * Wait for the firmware to either indicate an error or
4548 		 * initialized state.  If we see either of these we bail out
4549 		 * and report the issue to the caller.  If we exhaust the
4550 		 * "hello timeout" and we haven't exhausted our retries, try
4551 		 * again.  Otherwise bail with a timeout error.
4552 		 */
4553 		for (;;) {
4554 			u32 pcie_fw;
4555 
4556 			msleep(50);
4557 			waiting -= 50;
4558 
4559 			/*
4560 			 * If neither Error nor Initialialized are indicated
4561 			 * by the firmware keep waiting till we exhaust our
4562 			 * timeout ... and then retry if we haven't exhausted
4563 			 * our retries ...
4564 			 */
4565 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4566 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4567 				if (waiting <= 0) {
4568 					if (retries-- > 0)
4569 						goto retry;
4570 
4571 					return -ETIMEDOUT;
4572 				}
4573 				continue;
4574 			}
4575 
4576 			/*
4577 			 * We either have an Error or Initialized condition
4578 			 * report errors preferentially.
4579 			 */
4580 			if (state) {
4581 				if (pcie_fw & F_PCIE_FW_ERR)
4582 					*state = DEV_STATE_ERR;
4583 				else if (pcie_fw & F_PCIE_FW_INIT)
4584 					*state = DEV_STATE_INIT;
4585 			}
4586 
4587 			/*
4588 			 * If we arrived before a Master PF was selected and
4589 			 * there's not a valid Master PF, grab its identity
4590 			 * for our caller.
4591 			 */
4592 			if (master_mbox == M_PCIE_FW_MASTER &&
4593 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4594 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4595 			break;
4596 		}
4597 	}
4598 
4599 	return master_mbox;
4600 }
4601 
4602 /**
4603  *	t4_fw_bye - end communication with FW
4604  *	@adap: the adapter
4605  *	@mbox: mailbox to use for the FW command
4606  *
4607  *	Issues a command to terminate communication with FW.
4608  */
4609 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4610 {
4611 	struct fw_bye_cmd c;
4612 
4613 	memset(&c, 0, sizeof(c));
4614 	INIT_CMD(c, BYE, WRITE);
4615 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4616 }
4617 
4618 /**
4619  *	t4_fw_reset - issue a reset to FW
4620  *	@adap: the adapter
4621  *	@mbox: mailbox to use for the FW command
4622  *	@reset: specifies the type of reset to perform
4623  *
4624  *	Issues a reset command of the specified type to FW.
4625  */
4626 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4627 {
4628 	struct fw_reset_cmd c;
4629 
4630 	memset(&c, 0, sizeof(c));
4631 	INIT_CMD(c, RESET, WRITE);
4632 	c.val = htonl(reset);
4633 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4634 }
4635 
4636 /**
4637  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4638  *	@adap: the adapter
4639  *	@mbox: mailbox to use for the FW RESET command (if desired)
4640  *	@force: force uP into RESET even if FW RESET command fails
4641  *
4642  *	Issues a RESET command to firmware (if desired) with a HALT indication
4643  *	and then puts the microprocessor into RESET state.  The RESET command
4644  *	will only be issued if a legitimate mailbox is provided (mbox <=
4645  *	M_PCIE_FW_MASTER).
4646  *
4647  *	This is generally used in order for the host to safely manipulate the
4648  *	adapter without fear of conflicting with whatever the firmware might
4649  *	be doing.  The only way out of this state is to RESTART the firmware
4650  *	...
4651  */
4652 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4653 {
4654 	int ret = 0;
4655 
4656 	/*
4657 	 * If a legitimate mailbox is provided, issue a RESET command
4658 	 * with a HALT indication.
4659 	 */
4660 	if (mbox <= M_PCIE_FW_MASTER) {
4661 		struct fw_reset_cmd c;
4662 
4663 		memset(&c, 0, sizeof(c));
4664 		INIT_CMD(c, RESET, WRITE);
4665 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4666 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4667 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4668 	}
4669 
4670 	/*
4671 	 * Normally we won't complete the operation if the firmware RESET
4672 	 * command fails but if our caller insists we'll go ahead and put the
4673 	 * uP into RESET.  This can be useful if the firmware is hung or even
4674 	 * missing ...  We'll have to take the risk of putting the uP into
4675 	 * RESET without the cooperation of firmware in that case.
4676 	 *
4677 	 * We also force the firmware's HALT flag to be on in case we bypassed
4678 	 * the firmware RESET command above or we're dealing with old firmware
4679 	 * which doesn't have the HALT capability.  This will serve as a flag
4680 	 * for the incoming firmware to know that it's coming out of a HALT
4681 	 * rather than a RESET ... if it's new enough to understand that ...
4682 	 */
4683 	if (ret == 0 || force) {
4684 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4685 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4686 	}
4687 
4688 	/*
4689 	 * And we always return the result of the firmware RESET command
4690 	 * even when we force the uP into RESET ...
4691 	 */
4692 	return ret;
4693 }
4694 
4695 /**
4696  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4697  *	@adap: the adapter
4698  *	@reset: if we want to do a RESET to restart things
4699  *
4700  *	Restart firmware previously halted by t4_fw_halt().  On successful
4701  *	return the previous PF Master remains as the new PF Master and there
4702  *	is no need to issue a new HELLO command, etc.
4703  *
4704  *	We do this in two ways:
4705  *
4706  *	 1. If we're dealing with newer firmware we'll simply want to take
4707  *	    the chip's microprocessor out of RESET.  This will cause the
4708  *	    firmware to start up from its start vector.  And then we'll loop
4709  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4710  *	    reset to 0) or we timeout.
4711  *
4712  *	 2. If we're dealing with older firmware then we'll need to RESET
4713  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4714  *	    flag and automatically RESET itself on startup.
4715  */
4716 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4717 {
4718 	if (reset) {
4719 		/*
4720 		 * Since we're directing the RESET instead of the firmware
4721 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4722 		 * bit.
4723 		 */
4724 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4725 
4726 		/*
4727 		 * If we've been given a valid mailbox, first try to get the
4728 		 * firmware to do the RESET.  If that works, great and we can
4729 		 * return success.  Otherwise, if we haven't been given a
4730 		 * valid mailbox or the RESET command failed, fall back to
4731 		 * hitting the chip with a hammer.
4732 		 */
4733 		if (mbox <= M_PCIE_FW_MASTER) {
4734 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4735 			msleep(100);
4736 			if (t4_fw_reset(adap, mbox,
4737 					F_PIORST | F_PIORSTMODE) == 0)
4738 				return 0;
4739 		}
4740 
4741 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4742 		msleep(2000);
4743 	} else {
4744 		int ms;
4745 
4746 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4747 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4748 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4749 				return FW_SUCCESS;
4750 			msleep(100);
4751 			ms += 100;
4752 		}
4753 		return -ETIMEDOUT;
4754 	}
4755 	return 0;
4756 }
4757 
4758 /**
4759  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4760  *	@adap: the adapter
4761  *	@mbox: mailbox to use for the FW RESET command (if desired)
4762  *	@fw_data: the firmware image to write
4763  *	@size: image size
4764  *	@force: force upgrade even if firmware doesn't cooperate
4765  *
4766  *	Perform all of the steps necessary for upgrading an adapter's
4767  *	firmware image.  Normally this requires the cooperation of the
4768  *	existing firmware in order to halt all existing activities
4769  *	but if an invalid mailbox token is passed in we skip that step
4770  *	(though we'll still put the adapter microprocessor into RESET in
4771  *	that case).
4772  *
4773  *	On successful return the new firmware will have been loaded and
4774  *	the adapter will have been fully RESET losing all previous setup
4775  *	state.  On unsuccessful return the adapter may be completely hosed ...
4776  *	positive errno indicates that the adapter is ~probably~ intact, a
4777  *	negative errno indicates that things are looking bad ...
4778  */
4779 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4780 		  const u8 *fw_data, unsigned int size, int force)
4781 {
4782 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4783 	unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4784 	int reset, ret;
4785 
4786 	if (!bootstrap) {
4787 		ret = t4_fw_halt(adap, mbox, force);
4788 		if (ret < 0 && !force)
4789 			return ret;
4790 	}
4791 
4792 	ret = t4_load_fw(adap, fw_data, size);
4793 	if (ret < 0 || bootstrap)
4794 		return ret;
4795 
4796 	/*
4797 	 * Older versions of the firmware don't understand the new
4798 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4799 	 * restart.  So for newly loaded older firmware we'll have to do the
4800 	 * RESET for it so it starts up on a clean slate.  We can tell if
4801 	 * the newly loaded firmware will handle this right by checking
4802 	 * its header flags to see if it advertises the capability.
4803 	 */
4804 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4805 	return t4_fw_restart(adap, mbox, reset);
4806 }
4807 
4808 /**
4809  *	t4_fw_initialize - ask FW to initialize the device
4810  *	@adap: the adapter
4811  *	@mbox: mailbox to use for the FW command
4812  *
4813  *	Issues a command to FW to partially initialize the device.  This
4814  *	performs initialization that generally doesn't depend on user input.
4815  */
4816 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4817 {
4818 	struct fw_initialize_cmd c;
4819 
4820 	memset(&c, 0, sizeof(c));
4821 	INIT_CMD(c, INITIALIZE, WRITE);
4822 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4823 }
4824 
4825 /**
4826  *	t4_query_params - query FW or device parameters
4827  *	@adap: the adapter
4828  *	@mbox: mailbox to use for the FW command
4829  *	@pf: the PF
4830  *	@vf: the VF
4831  *	@nparams: the number of parameters
4832  *	@params: the parameter names
4833  *	@val: the parameter values
4834  *
4835  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4836  *	queried at once.
4837  */
4838 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4839 		    unsigned int vf, unsigned int nparams, const u32 *params,
4840 		    u32 *val)
4841 {
4842 	int i, ret;
4843 	struct fw_params_cmd c;
4844 	__be32 *p = &c.param[0].mnem;
4845 
4846 	if (nparams > 7)
4847 		return -EINVAL;
4848 
4849 	memset(&c, 0, sizeof(c));
4850 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4851 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4852 			    V_FW_PARAMS_CMD_VFN(vf));
4853 	c.retval_len16 = htonl(FW_LEN16(c));
4854 
4855 	for (i = 0; i < nparams; i++, p += 2, params++)
4856 		*p = htonl(*params);
4857 
4858 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4859 	if (ret == 0)
4860 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4861 			*val++ = ntohl(*p);
4862 	return ret;
4863 }
4864 
4865 /**
4866  *	t4_set_params - sets FW or device parameters
4867  *	@adap: the adapter
4868  *	@mbox: mailbox to use for the FW command
4869  *	@pf: the PF
4870  *	@vf: the VF
4871  *	@nparams: the number of parameters
4872  *	@params: the parameter names
4873  *	@val: the parameter values
4874  *
4875  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4876  *	specified at once.
4877  */
4878 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4879 		  unsigned int vf, unsigned int nparams, const u32 *params,
4880 		  const u32 *val)
4881 {
4882 	struct fw_params_cmd c;
4883 	__be32 *p = &c.param[0].mnem;
4884 
4885 	if (nparams > 7)
4886 		return -EINVAL;
4887 
4888 	memset(&c, 0, sizeof(c));
4889 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4890 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4891 			    V_FW_PARAMS_CMD_VFN(vf));
4892 	c.retval_len16 = htonl(FW_LEN16(c));
4893 
4894 	while (nparams--) {
4895 		*p++ = htonl(*params);
4896 		params++;
4897 		*p++ = htonl(*val);
4898 		val++;
4899 	}
4900 
4901 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4902 }
4903 
4904 /**
4905  *	t4_cfg_pfvf - configure PF/VF resource limits
4906  *	@adap: the adapter
4907  *	@mbox: mailbox to use for the FW command
4908  *	@pf: the PF being configured
4909  *	@vf: the VF being configured
4910  *	@txq: the max number of egress queues
4911  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4912  *	@rxqi: the max number of interrupt-capable ingress queues
4913  *	@rxq: the max number of interruptless ingress queues
4914  *	@tc: the PCI traffic class
4915  *	@vi: the max number of virtual interfaces
4916  *	@cmask: the channel access rights mask for the PF/VF
4917  *	@pmask: the port access rights mask for the PF/VF
4918  *	@nexact: the maximum number of exact MPS filters
4919  *	@rcaps: read capabilities
4920  *	@wxcaps: write/execute capabilities
4921  *
4922  *	Configures resource limits and capabilities for a physical or virtual
4923  *	function.
4924  */
4925 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4926 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4927 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4928 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4929 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4930 {
4931 	struct fw_pfvf_cmd c;
4932 
4933 	memset(&c, 0, sizeof(c));
4934 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4935 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4936 			    V_FW_PFVF_CMD_VFN(vf));
4937 	c.retval_len16 = htonl(FW_LEN16(c));
4938 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4939 			       V_FW_PFVF_CMD_NIQ(rxq));
4940 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4941 			      V_FW_PFVF_CMD_PMASK(pmask) |
4942 			      V_FW_PFVF_CMD_NEQ(txq));
4943 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4944 				V_FW_PFVF_CMD_NEXACTF(nexact));
4945 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4946 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4947 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4948 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4949 }
4950 
4951 /**
4952  *	t4_alloc_vi_func - allocate a virtual interface
4953  *	@adap: the adapter
4954  *	@mbox: mailbox to use for the FW command
4955  *	@port: physical port associated with the VI
4956  *	@pf: the PF owning the VI
4957  *	@vf: the VF owning the VI
4958  *	@nmac: number of MAC addresses needed (1 to 5)
4959  *	@mac: the MAC addresses of the VI
4960  *	@rss_size: size of RSS table slice associated with this VI
4961  *	@portfunc: which Port Application Function MAC Address is desired
4962  *	@idstype: Intrusion Detection Type
4963  *
4964  *	Allocates a virtual interface for the given physical port.  If @mac is
4965  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4966  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4967  *	stored consecutively so the space needed is @nmac * 6 bytes.
4968  *	Returns a negative error number or the non-negative VI id.
4969  */
4970 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4971 		     unsigned int port, unsigned int pf, unsigned int vf,
4972 		     unsigned int nmac, u8 *mac, u16 *rss_size,
4973 		     unsigned int portfunc, unsigned int idstype)
4974 {
4975 	int ret;
4976 	struct fw_vi_cmd c;
4977 
4978 	memset(&c, 0, sizeof(c));
4979 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4980 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4981 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4982 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4983 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4984 			       V_FW_VI_CMD_FUNC(portfunc));
4985 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4986 	c.nmac = nmac - 1;
4987 
4988 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4989 	if (ret)
4990 		return ret;
4991 
4992 	if (mac) {
4993 		memcpy(mac, c.mac, sizeof(c.mac));
4994 		switch (nmac) {
4995 		case 5:
4996 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4997 		case 4:
4998 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4999 		case 3:
5000 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
5001 		case 2:
5002 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
5003 		}
5004 	}
5005 	if (rss_size)
5006 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
5007 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
5008 }
5009 
5010 /**
5011  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
5012  *	@adap: the adapter
5013  *	@mbox: mailbox to use for the FW command
5014  *	@port: physical port associated with the VI
5015  *	@pf: the PF owning the VI
5016  *	@vf: the VF owning the VI
5017  *	@nmac: number of MAC addresses needed (1 to 5)
5018  *	@mac: the MAC addresses of the VI
5019  *	@rss_size: size of RSS table slice associated with this VI
5020  *
5021  *	backwards compatible and convieniance routine to allocate a Virtual
5022  *	Interface with a Ethernet Port Application Function and Intrustion
5023  *	Detection System disabled.
5024  */
5025 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
5026 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
5027 		u16 *rss_size)
5028 {
5029 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
5030 				FW_VI_FUNC_ETH, 0);
5031 }
5032 
5033 /**
5034  *	t4_free_vi - free a virtual interface
5035  *	@adap: the adapter
5036  *	@mbox: mailbox to use for the FW command
5037  *	@pf: the PF owning the VI
5038  *	@vf: the VF owning the VI
5039  *	@viid: virtual interface identifiler
5040  *
5041  *	Free a previously allocated virtual interface.
5042  */
5043 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
5044 	       unsigned int vf, unsigned int viid)
5045 {
5046 	struct fw_vi_cmd c;
5047 
5048 	memset(&c, 0, sizeof(c));
5049 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
5050 			    F_FW_CMD_REQUEST |
5051 			    F_FW_CMD_EXEC |
5052 			    V_FW_VI_CMD_PFN(pf) |
5053 			    V_FW_VI_CMD_VFN(vf));
5054 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
5055 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
5056 
5057 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5058 }
5059 
5060 /**
5061  *	t4_set_rxmode - set Rx properties of a virtual interface
5062  *	@adap: the adapter
5063  *	@mbox: mailbox to use for the FW command
5064  *	@viid: the VI id
5065  *	@mtu: the new MTU or -1
5066  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
5067  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
5068  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
5069  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
5070  *	@sleep_ok: if true we may sleep while awaiting command completion
5071  *
5072  *	Sets Rx properties of a virtual interface.
5073  */
5074 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
5075 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
5076 		  bool sleep_ok)
5077 {
5078 	struct fw_vi_rxmode_cmd c;
5079 
5080 	/* convert to FW values */
5081 	if (mtu < 0)
5082 		mtu = M_FW_VI_RXMODE_CMD_MTU;
5083 	if (promisc < 0)
5084 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
5085 	if (all_multi < 0)
5086 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
5087 	if (bcast < 0)
5088 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
5089 	if (vlanex < 0)
5090 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
5091 
5092 	memset(&c, 0, sizeof(c));
5093 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
5094 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
5095 	c.retval_len16 = htonl(FW_LEN16(c));
5096 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
5097 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
5098 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
5099 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
5100 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
5101 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5102 }
5103 
5104 /**
5105  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5106  *	@adap: the adapter
5107  *	@mbox: mailbox to use for the FW command
5108  *	@viid: the VI id
5109  *	@free: if true any existing filters for this VI id are first removed
5110  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
5111  *	@addr: the MAC address(es)
5112  *	@idx: where to store the index of each allocated filter
5113  *	@hash: pointer to hash address filter bitmap
5114  *	@sleep_ok: call is allowed to sleep
5115  *
5116  *	Allocates an exact-match filter for each of the supplied addresses and
5117  *	sets it to the corresponding address.  If @idx is not %NULL it should
5118  *	have at least @naddr entries, each of which will be set to the index of
5119  *	the filter allocated for the corresponding MAC address.  If a filter
5120  *	could not be allocated for an address its index is set to 0xffff.
5121  *	If @hash is not %NULL addresses that fail to allocate an exact filter
5122  *	are hashed and update the hash filter bitmap pointed at by @hash.
5123  *
5124  *	Returns a negative error number or the number of filters allocated.
5125  */
5126 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5127 		      unsigned int viid, bool free, unsigned int naddr,
5128 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5129 {
5130 	int offset, ret = 0;
5131 	struct fw_vi_mac_cmd c;
5132 	unsigned int nfilters = 0;
5133 	unsigned int max_naddr = is_t4(adap) ?
5134 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
5135 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5136 	unsigned int rem = naddr;
5137 
5138 	if (naddr > max_naddr)
5139 		return -EINVAL;
5140 
5141 	for (offset = 0; offset < naddr ; /**/) {
5142 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
5143 					 ? rem
5144 					 : ARRAY_SIZE(c.u.exact));
5145 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5146 						     u.exact[fw_naddr]), 16);
5147 		struct fw_vi_mac_exact *p;
5148 		int i;
5149 
5150 		memset(&c, 0, sizeof(c));
5151 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5152 				     F_FW_CMD_REQUEST |
5153 				     F_FW_CMD_WRITE |
5154 				     V_FW_CMD_EXEC(free) |
5155 				     V_FW_VI_MAC_CMD_VIID(viid));
5156 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5157 					    V_FW_CMD_LEN16(len16));
5158 
5159 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5160 			p->valid_to_idx = htons(
5161 				F_FW_VI_MAC_CMD_VALID |
5162 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5163 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5164 		}
5165 
5166 		/*
5167 		 * It's okay if we run out of space in our MAC address arena.
5168 		 * Some of the addresses we submit may get stored so we need
5169 		 * to run through the reply to see what the results were ...
5170 		 */
5171 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5172 		if (ret && ret != -FW_ENOMEM)
5173 			break;
5174 
5175 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5176 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5177 
5178 			if (idx)
5179 				idx[offset+i] = (index >=  max_naddr
5180 						 ? 0xffff
5181 						 : index);
5182 			if (index < max_naddr)
5183 				nfilters++;
5184 			else if (hash)
5185 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5186 		}
5187 
5188 		free = false;
5189 		offset += fw_naddr;
5190 		rem -= fw_naddr;
5191 	}
5192 
5193 	if (ret == 0 || ret == -FW_ENOMEM)
5194 		ret = nfilters;
5195 	return ret;
5196 }
5197 
5198 /**
5199  *	t4_change_mac - modifies the exact-match filter for a MAC address
5200  *	@adap: the adapter
5201  *	@mbox: mailbox to use for the FW command
5202  *	@viid: the VI id
5203  *	@idx: index of existing filter for old value of MAC address, or -1
5204  *	@addr: the new MAC address value
5205  *	@persist: whether a new MAC allocation should be persistent
5206  *	@add_smt: if true also add the address to the HW SMT
5207  *
5208  *	Modifies an exact-match filter and sets it to the new MAC address if
5209  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5210  *	latter case the address is added persistently if @persist is %true.
5211  *
5212  *	Note that in general it is not possible to modify the value of a given
5213  *	filter so the generic way to modify an address filter is to free the one
5214  *	being used by the old address value and allocate a new filter for the
5215  *	new address value.
5216  *
5217  *	Returns a negative error number or the index of the filter with the new
5218  *	MAC value.  Note that this index may differ from @idx.
5219  */
5220 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5221 		  int idx, const u8 *addr, bool persist, bool add_smt)
5222 {
5223 	int ret, mode;
5224 	struct fw_vi_mac_cmd c;
5225 	struct fw_vi_mac_exact *p = c.u.exact;
5226 	unsigned int max_mac_addr = is_t4(adap) ?
5227 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5228 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5229 
5230 	if (idx < 0)                             /* new allocation */
5231 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5232 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5233 
5234 	memset(&c, 0, sizeof(c));
5235 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5236 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5237 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5238 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5239 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5240 				V_FW_VI_MAC_CMD_IDX(idx));
5241 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5242 
5243 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5244 	if (ret == 0) {
5245 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5246 		if (ret >= max_mac_addr)
5247 			ret = -ENOMEM;
5248 	}
5249 	return ret;
5250 }
5251 
5252 /**
5253  *	t4_set_addr_hash - program the MAC inexact-match hash filter
5254  *	@adap: the adapter
5255  *	@mbox: mailbox to use for the FW command
5256  *	@viid: the VI id
5257  *	@ucast: whether the hash filter should also match unicast addresses
5258  *	@vec: the value to be written to the hash filter
5259  *	@sleep_ok: call is allowed to sleep
5260  *
5261  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5262  */
5263 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5264 		     bool ucast, u64 vec, bool sleep_ok)
5265 {
5266 	struct fw_vi_mac_cmd c;
5267 	u32 val;
5268 
5269 	memset(&c, 0, sizeof(c));
5270 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5271 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5272 	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
5273 	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
5274 	c.freemacs_to_len16 = cpu_to_be32(val);
5275 	c.u.hash.hashvec = cpu_to_be64(vec);
5276 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5277 }
5278 
5279 /**
5280  *	t4_enable_vi - enable/disable a virtual interface
5281  *	@adap: the adapter
5282  *	@mbox: mailbox to use for the FW command
5283  *	@viid: the VI id
5284  *	@rx_en: 1=enable Rx, 0=disable Rx
5285  *	@tx_en: 1=enable Tx, 0=disable Tx
5286  *
5287  *	Enables/disables a virtual interface.
5288  */
5289 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5290 		 bool rx_en, bool tx_en)
5291 {
5292 	struct fw_vi_enable_cmd c;
5293 
5294 	memset(&c, 0, sizeof(c));
5295 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5296 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5297 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5298 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5299 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5300 }
5301 
5302 /**
5303  *	t4_identify_port - identify a VI's port by blinking its LED
5304  *	@adap: the adapter
5305  *	@mbox: mailbox to use for the FW command
5306  *	@viid: the VI id
5307  *	@nblinks: how many times to blink LED at 2.5 Hz
5308  *
5309  *	Identifies a VI's port by blinking its LED.
5310  */
5311 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5312 		     unsigned int nblinks)
5313 {
5314 	struct fw_vi_enable_cmd c;
5315 
5316 	memset(&c, 0, sizeof(c));
5317 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5318 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5319 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5320 	c.blinkdur = htons(nblinks);
5321 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5322 }
5323 
5324 /**
5325  *	t4_iq_free - free an ingress queue and its FLs
5326  *	@adap: the adapter
5327  *	@mbox: mailbox to use for the FW command
5328  *	@pf: the PF owning the queues
5329  *	@vf: the VF owning the queues
5330  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5331  *	@iqid: ingress queue id
5332  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5333  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5334  *
5335  *	Frees an ingress queue and its associated FLs, if any.
5336  */
5337 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5338 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5339 	       unsigned int fl0id, unsigned int fl1id)
5340 {
5341 	struct fw_iq_cmd c;
5342 
5343 	memset(&c, 0, sizeof(c));
5344 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5345 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5346 			    V_FW_IQ_CMD_VFN(vf));
5347 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5348 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5349 	c.iqid = htons(iqid);
5350 	c.fl0id = htons(fl0id);
5351 	c.fl1id = htons(fl1id);
5352 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5353 }
5354 
5355 /**
5356  *	t4_eth_eq_free - free an Ethernet egress queue
5357  *	@adap: the adapter
5358  *	@mbox: mailbox to use for the FW command
5359  *	@pf: the PF owning the queue
5360  *	@vf: the VF owning the queue
5361  *	@eqid: egress queue id
5362  *
5363  *	Frees an Ethernet egress queue.
5364  */
5365 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5366 		   unsigned int vf, unsigned int eqid)
5367 {
5368 	struct fw_eq_eth_cmd c;
5369 
5370 	memset(&c, 0, sizeof(c));
5371 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5372 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5373 			    V_FW_EQ_ETH_CMD_VFN(vf));
5374 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5375 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5376 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5377 }
5378 
5379 /**
5380  *	t4_ctrl_eq_free - free a control egress queue
5381  *	@adap: the adapter
5382  *	@mbox: mailbox to use for the FW command
5383  *	@pf: the PF owning the queue
5384  *	@vf: the VF owning the queue
5385  *	@eqid: egress queue id
5386  *
5387  *	Frees a control egress queue.
5388  */
5389 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5390 		    unsigned int vf, unsigned int eqid)
5391 {
5392 	struct fw_eq_ctrl_cmd c;
5393 
5394 	memset(&c, 0, sizeof(c));
5395 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5396 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5397 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5398 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5399 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5400 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5401 }
5402 
5403 /**
5404  *	t4_ofld_eq_free - free an offload egress queue
5405  *	@adap: the adapter
5406  *	@mbox: mailbox to use for the FW command
5407  *	@pf: the PF owning the queue
5408  *	@vf: the VF owning the queue
5409  *	@eqid: egress queue id
5410  *
5411  *	Frees a control egress queue.
5412  */
5413 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5414 		    unsigned int vf, unsigned int eqid)
5415 {
5416 	struct fw_eq_ofld_cmd c;
5417 
5418 	memset(&c, 0, sizeof(c));
5419 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5420 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5421 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5422 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5423 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5424 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5425 }
5426 
5427 /**
5428  *	t4_handle_fw_rpl - process a FW reply message
5429  *	@adap: the adapter
5430  *	@rpl: start of the FW message
5431  *
5432  *	Processes a FW message, such as link state change messages.
5433  */
5434 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5435 {
5436 	u8 opcode = *(const u8 *)rpl;
5437 	const struct fw_port_cmd *p = (const void *)rpl;
5438 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5439 
5440 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5441 		/* link/module state change message */
5442 		int speed = 0, fc = 0, i;
5443 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5444 		struct port_info *pi = NULL;
5445 		struct link_config *lc;
5446 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5447 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5448 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5449 
5450 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5451 			fc |= PAUSE_RX;
5452 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5453 			fc |= PAUSE_TX;
5454 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5455 			speed = SPEED_100;
5456 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5457 			speed = SPEED_1000;
5458 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5459 			speed = SPEED_10000;
5460 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5461 			speed = SPEED_40000;
5462 
5463 		for_each_port(adap, i) {
5464 			pi = adap2pinfo(adap, i);
5465 			if (pi->tx_chan == chan)
5466 				break;
5467 		}
5468 		lc = &pi->link_cfg;
5469 
5470 		if (mod != pi->mod_type) {
5471 			pi->mod_type = mod;
5472 			t4_os_portmod_changed(adap, i);
5473 		}
5474 		if (link_ok != lc->link_ok || speed != lc->speed ||
5475 		    fc != lc->fc) {                    /* something changed */
5476 			int reason;
5477 
5478 			if (!link_ok && lc->link_ok)
5479 				reason = G_FW_PORT_CMD_LINKDNRC(stat);
5480 			else
5481 				reason = -1;
5482 
5483 			lc->link_ok = link_ok;
5484 			lc->speed = speed;
5485 			lc->fc = fc;
5486 			lc->supported = ntohs(p->u.info.pcap);
5487 			t4_os_link_changed(adap, i, link_ok, reason);
5488 		}
5489 	} else {
5490 		CH_WARN_RATELIMIT(adap,
5491 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5492 		return -EINVAL;
5493 	}
5494 	return 0;
5495 }
5496 
5497 /**
5498  *	get_pci_mode - determine a card's PCI mode
5499  *	@adapter: the adapter
5500  *	@p: where to store the PCI settings
5501  *
5502  *	Determines a card's PCI mode and associated parameters, such as speed
5503  *	and width.
5504  */
5505 static void __devinit get_pci_mode(struct adapter *adapter,
5506 				   struct pci_params *p)
5507 {
5508 	u16 val;
5509 	u32 pcie_cap;
5510 
5511 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5512 	if (pcie_cap) {
5513 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5514 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5515 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5516 	}
5517 }
5518 
5519 /**
5520  *	init_link_config - initialize a link's SW state
5521  *	@lc: structure holding the link state
5522  *	@caps: link capabilities
5523  *
5524  *	Initializes the SW state maintained for each link, including the link's
5525  *	capabilities and default speed/flow-control/autonegotiation settings.
5526  */
5527 static void __devinit init_link_config(struct link_config *lc,
5528 				       unsigned int caps)
5529 {
5530 	lc->supported = caps;
5531 	lc->requested_speed = 0;
5532 	lc->speed = 0;
5533 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5534 	if (lc->supported & FW_PORT_CAP_ANEG) {
5535 		lc->advertising = lc->supported & ADVERT_MASK;
5536 		lc->autoneg = AUTONEG_ENABLE;
5537 		lc->requested_fc |= PAUSE_AUTONEG;
5538 	} else {
5539 		lc->advertising = 0;
5540 		lc->autoneg = AUTONEG_DISABLE;
5541 	}
5542 }
5543 
5544 static int __devinit get_flash_params(struct adapter *adapter)
5545 {
5546 	int ret;
5547 	u32 info = 0;
5548 
5549 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5550 	if (!ret)
5551 		ret = sf1_read(adapter, 3, 0, 1, &info);
5552 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5553 	if (ret < 0)
5554 		return ret;
5555 
5556 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5557 		return -EINVAL;
5558 	info >>= 16;                           /* log2 of size */
5559 	if (info >= 0x14 && info < 0x18)
5560 		adapter->params.sf_nsec = 1 << (info - 16);
5561 	else if (info == 0x18)
5562 		adapter->params.sf_nsec = 64;
5563 	else
5564 		return -EINVAL;
5565 	adapter->params.sf_size = 1 << info;
5566 	return 0;
5567 }
5568 
5569 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5570 						  u8 range)
5571 {
5572 	u16 val;
5573 	u32 pcie_cap;
5574 
5575 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5576 	if (pcie_cap) {
5577 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5578 		val &= 0xfff0;
5579 		val |= range ;
5580 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5581 	}
5582 }
5583 
5584 /**
5585  *	t4_prep_adapter - prepare SW and HW for operation
5586  *	@adapter: the adapter
5587  *	@reset: if true perform a HW reset
5588  *
5589  *	Initialize adapter SW state for the various HW modules, set initial
5590  *	values for some adapter tunables, take PHYs out of reset, and
5591  *	initialize the MDIO interface.
5592  */
5593 int __devinit t4_prep_adapter(struct adapter *adapter)
5594 {
5595 	int ret;
5596 	uint16_t device_id;
5597 	uint32_t pl_rev;
5598 
5599 	get_pci_mode(adapter, &adapter->params.pci);
5600 
5601 	pl_rev = t4_read_reg(adapter, A_PL_REV);
5602 	adapter->params.chipid = G_CHIPID(pl_rev);
5603 	adapter->params.rev = G_REV(pl_rev);
5604 	if (adapter->params.chipid == 0) {
5605 		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5606 		adapter->params.chipid = CHELSIO_T4;
5607 
5608 		/* T4A1 chip is not supported */
5609 		if (adapter->params.rev == 1) {
5610 			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5611 			return -EINVAL;
5612 		}
5613 	}
5614 	adapter->params.pci.vpd_cap_addr =
5615 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5616 
5617 	ret = get_flash_params(adapter);
5618 	if (ret < 0)
5619 		return ret;
5620 
5621 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5622 	if (ret < 0)
5623 		return ret;
5624 
5625 	/* Cards with real ASICs have the chipid in the PCIe device id */
5626 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5627 	if (device_id >> 12 == adapter->params.chipid)
5628 		adapter->params.cim_la_size = CIMLA_SIZE;
5629 	else {
5630 		/* FPGA */
5631 		adapter->params.fpga = 1;
5632 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5633 	}
5634 
5635 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5636 
5637 	/*
5638 	 * Default port and clock for debugging in case we can't reach FW.
5639 	 */
5640 	adapter->params.nports = 1;
5641 	adapter->params.portvec = 1;
5642 	adapter->params.vpd.cclk = 50000;
5643 
5644 	/* Set pci completion timeout value to 4 seconds. */
5645 	set_pcie_completion_timeout(adapter, 0xd);
5646 	return 0;
5647 }
5648 
5649 /**
5650  *	t4_init_tp_params - initialize adap->params.tp
5651  *	@adap: the adapter
5652  *
5653  *	Initialize various fields of the adapter's TP Parameters structure.
5654  */
5655 int __devinit t4_init_tp_params(struct adapter *adap)
5656 {
5657 	int chan;
5658 	u32 v;
5659 
5660 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5661 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
5662 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5663 
5664 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5665 	for (chan = 0; chan < NCHAN; chan++)
5666 		adap->params.tp.tx_modq[chan] = chan;
5667 
5668 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5669 			 &adap->params.tp.ingress_config, 1,
5670 			 A_TP_INGRESS_CONFIG);
5671 	refresh_vlan_pri_map(adap);
5672 
5673 	return 0;
5674 }
5675 
5676 /**
5677  *	t4_filter_field_shift - calculate filter field shift
5678  *	@adap: the adapter
5679  *	@filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5680  *
5681  *	Return the shift position of a filter field within the Compressed
5682  *	Filter Tuple.  The filter field is specified via its selection bit
5683  *	within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
5684  */
5685 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5686 {
5687 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5688 	unsigned int sel;
5689 	int field_shift;
5690 
5691 	if ((filter_mode & filter_sel) == 0)
5692 		return -1;
5693 
5694 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5695 	    switch (filter_mode & sel) {
5696 		case F_FCOE:          field_shift += W_FT_FCOE;          break;
5697 		case F_PORT:          field_shift += W_FT_PORT;          break;
5698 		case F_VNIC_ID:       field_shift += W_FT_VNIC_ID;       break;
5699 		case F_VLAN:          field_shift += W_FT_VLAN;          break;
5700 		case F_TOS:           field_shift += W_FT_TOS;           break;
5701 		case F_PROTOCOL:      field_shift += W_FT_PROTOCOL;      break;
5702 		case F_ETHERTYPE:     field_shift += W_FT_ETHERTYPE;     break;
5703 		case F_MACMATCH:      field_shift += W_FT_MACMATCH;      break;
5704 		case F_MPSHITTYPE:    field_shift += W_FT_MPSHITTYPE;    break;
5705 		case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5706 	    }
5707 	}
5708 	return field_shift;
5709 }
5710 
5711 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5712 {
5713 	u8 addr[6];
5714 	int ret, i, j;
5715 	struct fw_port_cmd c;
5716 	u16 rss_size;
5717 	adapter_t *adap = p->adapter;
5718 	u32 param, val;
5719 
5720 	memset(&c, 0, sizeof(c));
5721 
5722 	for (i = 0, j = -1; i <= p->port_id; i++) {
5723 		do {
5724 			j++;
5725 		} while ((adap->params.portvec & (1 << j)) == 0);
5726 	}
5727 
5728 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5729 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5730 			       V_FW_PORT_CMD_PORTID(j));
5731 	c.action_to_len16 = htonl(
5732 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5733 		FW_LEN16(c));
5734 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5735 	if (ret)
5736 		return ret;
5737 
5738 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5739 	if (ret < 0)
5740 		return ret;
5741 
5742 	p->vi[0].viid = ret;
5743 	p->tx_chan = j;
5744 	p->rx_chan_map = get_mps_bg_map(adap, j);
5745 	p->lport = j;
5746 	p->vi[0].rss_size = rss_size;
5747 	t4_os_set_hw_addr(adap, p->port_id, addr);
5748 
5749 	ret = ntohl(c.u.info.lstatus_to_modtype);
5750 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5751 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5752 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5753 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5754 
5755 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5756 
5757 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5758 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
5759 	    V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
5760 	ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
5761 	if (ret)
5762 		p->vi[0].rss_base = 0xffff;
5763 	else {
5764 		/* MPASS((val >> 16) == rss_size); */
5765 		p->vi[0].rss_base = val & 0xffff;
5766 	}
5767 
5768 	return 0;
5769 }
5770 
5771 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
5772     		    int sleep_ok)
5773 {
5774 	struct fw_sched_cmd cmd;
5775 
5776 	memset(&cmd, 0, sizeof(cmd));
5777 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5778 				      F_FW_CMD_REQUEST |
5779 				      F_FW_CMD_WRITE);
5780 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5781 
5782 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
5783 	cmd.u.config.type = type;
5784 	cmd.u.config.minmaxen = minmaxen;
5785 
5786 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5787 			       NULL, sleep_ok);
5788 }
5789 
5790 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
5791 		    int rateunit, int ratemode, int channel, int cl,
5792 		    int minrate, int maxrate, int weight, int pktsize,
5793 		    int sleep_ok)
5794 {
5795 	struct fw_sched_cmd cmd;
5796 
5797 	memset(&cmd, 0, sizeof(cmd));
5798 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5799 				      F_FW_CMD_REQUEST |
5800 				      F_FW_CMD_WRITE);
5801 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5802 
5803 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
5804 	cmd.u.params.type = type;
5805 	cmd.u.params.level = level;
5806 	cmd.u.params.mode = mode;
5807 	cmd.u.params.ch = channel;
5808 	cmd.u.params.cl = cl;
5809 	cmd.u.params.unit = rateunit;
5810 	cmd.u.params.rate = ratemode;
5811 	cmd.u.params.min = cpu_to_be32(minrate);
5812 	cmd.u.params.max = cpu_to_be32(maxrate);
5813 	cmd.u.params.weight = cpu_to_be16(weight);
5814 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
5815 
5816 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5817 			       NULL, sleep_ok);
5818 }
5819