xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision fcb560670601b2a4d87bb31d7531c8dcc37ee71b)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 
35 #include "common.h"
36 #include "t4_regs.h"
37 #include "t4_regs_values.h"
38 #include "firmware/t4fw_interface.h"
39 
40 #undef msleep
41 #define msleep(x) do { \
42 	if (cold) \
43 		DELAY((x) * 1000); \
44 	else \
45 		pause("t4hw", (x) * hz / 1000); \
46 } while (0)
47 
48 /**
49  *	t4_wait_op_done_val - wait until an operation is completed
50  *	@adapter: the adapter performing the operation
51  *	@reg: the register to check for completion
52  *	@mask: a single-bit field within @reg that indicates completion
53  *	@polarity: the value of the field when the operation is completed
54  *	@attempts: number of check iterations
55  *	@delay: delay in usecs between iterations
56  *	@valp: where to store the value of the register at completion time
57  *
58  *	Wait until an operation is completed by checking a bit in a register
59  *	up to @attempts times.  If @valp is not NULL the value of the register
60  *	at the time it indicated completion is stored there.  Returns 0 if the
61  *	operation completes and	-EAGAIN	otherwise.
62  */
63 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
64 		        int polarity, int attempts, int delay, u32 *valp)
65 {
66 	while (1) {
67 		u32 val = t4_read_reg(adapter, reg);
68 
69 		if (!!(val & mask) == polarity) {
70 			if (valp)
71 				*valp = val;
72 			return 0;
73 		}
74 		if (--attempts == 0)
75 			return -EAGAIN;
76 		if (delay)
77 			udelay(delay);
78 	}
79 }
80 
81 /**
82  *	t4_set_reg_field - set a register field to a value
83  *	@adapter: the adapter to program
84  *	@addr: the register address
85  *	@mask: specifies the portion of the register to modify
86  *	@val: the new value for the register field
87  *
88  *	Sets a register field specified by the supplied mask to the
89  *	given value.
90  */
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 		      u32 val)
93 {
94 	u32 v = t4_read_reg(adapter, addr) & ~mask;
95 
96 	t4_write_reg(adapter, addr, v | val);
97 	(void) t4_read_reg(adapter, addr);      /* flush */
98 }
99 
100 /**
101  *	t4_read_indirect - read indirectly addressed registers
102  *	@adap: the adapter
103  *	@addr_reg: register holding the indirect address
104  *	@data_reg: register holding the value of the indirect register
105  *	@vals: where the read register values are stored
106  *	@nregs: how many indirect registers to read
107  *	@start_idx: index of first indirect register to read
108  *
109  *	Reads registers that are accessed indirectly through an address/data
110  *	register pair.
111  */
112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
114 		      unsigned int start_idx)
115 {
116 	while (nregs--) {
117 		t4_write_reg(adap, addr_reg, start_idx);
118 		*vals++ = t4_read_reg(adap, data_reg);
119 		start_idx++;
120 	}
121 }
122 
123 /**
124  *	t4_write_indirect - write indirectly addressed registers
125  *	@adap: the adapter
126  *	@addr_reg: register holding the indirect addresses
127  *	@data_reg: register holding the value for the indirect registers
128  *	@vals: values to write
129  *	@nregs: how many indirect registers to write
130  *	@start_idx: address of first indirect register to write
131  *
132  *	Writes a sequential block of registers that are accessed indirectly
133  *	through an address/data register pair.
134  */
135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 		       unsigned int data_reg, const u32 *vals,
137 		       unsigned int nregs, unsigned int start_idx)
138 {
139 	while (nregs--) {
140 		t4_write_reg(adap, addr_reg, start_idx++);
141 		t4_write_reg(adap, data_reg, *vals++);
142 	}
143 }
144 
145 /*
146  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147  * mechanism.  This guarantees that we get the real value even if we're
148  * operating within a Virtual Machine and the Hypervisor is trapping our
149  * Configuration Space accesses.
150  */
151 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
152 {
153 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
154 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
155 		     V_REGISTER(reg));
156 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
157 }
158 
159 /*
160  *	t4_report_fw_error - report firmware error
161  *	@adap: the adapter
162  *
163  *	The adapter firmware can indicate error conditions to the host.
164  *	This routine prints out the reason for the firmware error (as
165  *	reported by the firmware).
166  */
167 static void t4_report_fw_error(struct adapter *adap)
168 {
169 	static const char *reason[] = {
170 		"Crash",			/* PCIE_FW_EVAL_CRASH */
171 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
172 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
173 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
174 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
175 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
176 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
177 		"Reserved",			/* reserved */
178 	};
179 	u32 pcie_fw;
180 
181 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
182 	if (pcie_fw & F_PCIE_FW_ERR)
183 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
184 		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
185 }
186 
187 /*
188  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
189  */
190 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
191 			 u32 mbox_addr)
192 {
193 	for ( ; nflit; nflit--, mbox_addr += 8)
194 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
195 }
196 
197 /*
198  * Handle a FW assertion reported in a mailbox.
199  */
200 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
201 {
202 	struct fw_debug_cmd asrt;
203 
204 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
205 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
206 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
207 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
208 }
209 
210 #define X_CIM_PF_NOACCESS 0xeeeeeeee
211 /**
212  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
213  *	@adap: the adapter
214  *	@mbox: index of the mailbox to use
215  *	@cmd: the command to write
216  *	@size: command length in bytes
217  *	@rpl: where to optionally store the reply
218  *	@sleep_ok: if true we may sleep while awaiting command completion
219  *
220  *	Sends the given command to FW through the selected mailbox and waits
221  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
222  *	store the FW's reply to the command.  The command and its optional
223  *	reply are of the same length.  Some FW commands like RESET and
224  *	INITIALIZE can take a considerable amount of time to execute.
225  *	@sleep_ok determines whether we may sleep while awaiting the response.
226  *	If sleeping is allowed we use progressive backoff otherwise we spin.
227  *
228  *	The return value is 0 on success or a negative errno on failure.  A
229  *	failure can happen either because we are not able to execute the
230  *	command or FW executes it but signals an error.  In the latter case
231  *	the return value is the error code indicated by FW (negated).
232  */
233 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
234 		    void *rpl, bool sleep_ok)
235 {
236 	/*
237 	 * We delay in small increments at first in an effort to maintain
238 	 * responsiveness for simple, fast executing commands but then back
239 	 * off to larger delays to a maximum retry delay.
240 	 */
241 	static const int delay[] = {
242 		1, 1, 3, 5, 10, 10, 20, 50, 100
243 	};
244 
245 	u32 v;
246 	u64 res;
247 	int i, ms, delay_idx;
248 	const __be64 *p = cmd;
249 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
250 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
251 
252 	if ((size & 15) || size > MBOX_LEN)
253 		return -EINVAL;
254 
255 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
256 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
257 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
258 
259 	if (v != X_MBOWNER_PL)
260 		return v ? -EBUSY : -ETIMEDOUT;
261 
262 	for (i = 0; i < size; i += 8, p++)
263 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
264 
265 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
266 	t4_read_reg(adap, ctl_reg);          /* flush write */
267 
268 	delay_idx = 0;
269 	ms = delay[0];
270 
271 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
272 		if (sleep_ok) {
273 			ms = delay[delay_idx];  /* last element may repeat */
274 			if (delay_idx < ARRAY_SIZE(delay) - 1)
275 				delay_idx++;
276 			msleep(ms);
277 		} else
278 			mdelay(ms);
279 
280 		v = t4_read_reg(adap, ctl_reg);
281 		if (v == X_CIM_PF_NOACCESS)
282 			continue;
283 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
284 			if (!(v & F_MBMSGVALID)) {
285 				t4_write_reg(adap, ctl_reg,
286 					     V_MBOWNER(X_MBOWNER_NONE));
287 				continue;
288 			}
289 
290 			res = t4_read_reg64(adap, data_reg);
291 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
292 				fw_asrt(adap, data_reg);
293 				res = V_FW_CMD_RETVAL(EIO);
294 			} else if (rpl)
295 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
296 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
297 			return -G_FW_CMD_RETVAL((int)res);
298 		}
299 	}
300 
301 	/*
302 	 * We timed out waiting for a reply to our mailbox command.  Report
303 	 * the error and also check to see if the firmware reported any
304 	 * errors ...
305 	 */
306 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
307 	       *(const u8 *)cmd, mbox);
308 	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
309 		t4_report_fw_error(adap);
310 	return -ETIMEDOUT;
311 }
312 
313 /**
314  *	t4_mc_read - read from MC through backdoor accesses
315  *	@adap: the adapter
316  *	@idx: which MC to access
317  *	@addr: address of first byte requested
318  *	@data: 64 bytes of data containing the requested address
319  *	@ecc: where to store the corresponding 64-bit ECC word
320  *
321  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
322  *	that covers the requested address @addr.  If @parity is not %NULL it
323  *	is assigned the 64-bit ECC word for the read data.
324  */
325 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
326 {
327 	int i;
328 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
329 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
330 
331 	if (is_t4(adap)) {
332 		mc_bist_cmd_reg = A_MC_BIST_CMD;
333 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
334 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
335 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
336 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
337 	} else {
338 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
339 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
340 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
341 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
342 						  idx);
343 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
344 						  idx);
345 	}
346 
347 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
348 		return -EBUSY;
349 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
350 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
351 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
352 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
353 		     F_START_BIST | V_BIST_CMD_GAP(1));
354 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
355 	if (i)
356 		return i;
357 
358 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
359 
360 	for (i = 15; i >= 0; i--)
361 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
362 	if (ecc)
363 		*ecc = t4_read_reg64(adap, MC_DATA(16));
364 #undef MC_DATA
365 	return 0;
366 }
367 
368 /**
369  *	t4_edc_read - read from EDC through backdoor accesses
370  *	@adap: the adapter
371  *	@idx: which EDC to access
372  *	@addr: address of first byte requested
373  *	@data: 64 bytes of data containing the requested address
374  *	@ecc: where to store the corresponding 64-bit ECC word
375  *
376  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
377  *	that covers the requested address @addr.  If @parity is not %NULL it
378  *	is assigned the 64-bit ECC word for the read data.
379  */
380 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
381 {
382 	int i;
383 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
384 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
385 
386 	if (is_t4(adap)) {
387 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
388 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
389 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
390 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
391 						    idx);
392 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
393 						    idx);
394 	} else {
395 /*
396  * These macro are missing in t4_regs.h file.
397  * Added temporarily for testing.
398  */
399 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
400 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
401 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
402 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
403 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
404 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
405 						    idx);
406 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
407 						    idx);
408 #undef EDC_REG_T5
409 #undef EDC_STRIDE_T5
410 	}
411 
412 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
413 		return -EBUSY;
414 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
415 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
416 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
417 	t4_write_reg(adap, edc_bist_cmd_reg,
418 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
419 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
420 	if (i)
421 		return i;
422 
423 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
424 
425 	for (i = 15; i >= 0; i--)
426 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
427 	if (ecc)
428 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
429 #undef EDC_DATA
430 	return 0;
431 }
432 
433 /**
434  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
435  *	@adap: the adapter
436  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
437  *	@addr: address within indicated memory type
438  *	@len: amount of memory to read
439  *	@buf: host memory buffer
440  *
441  *	Reads an [almost] arbitrary memory region in the firmware: the
442  *	firmware memory address, length and host buffer must be aligned on
443  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
444  *	the firmware's memory.  If this memory contains data structures which
445  *	contain multi-byte integers, it's the callers responsibility to
446  *	perform appropriate byte order conversions.
447  */
448 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
449 		__be32 *buf)
450 {
451 	u32 pos, start, end, offset;
452 	int ret;
453 
454 	/*
455 	 * Argument sanity checks ...
456 	 */
457 	if ((addr & 0x3) || (len & 0x3))
458 		return -EINVAL;
459 
460 	/*
461 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
462 	 * need to round down the start and round up the end.  We'll start
463 	 * copying out of the first line at (addr - start) a word at a time.
464 	 */
465 	start = addr & ~(64-1);
466 	end = (addr + len + 64-1) & ~(64-1);
467 	offset = (addr - start)/sizeof(__be32);
468 
469 	for (pos = start; pos < end; pos += 64, offset = 0) {
470 		__be32 data[16];
471 
472 		/*
473 		 * Read the chip's memory block and bail if there's an error.
474 		 */
475 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
476 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
477 		else
478 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
479 		if (ret)
480 			return ret;
481 
482 		/*
483 		 * Copy the data into the caller's memory buffer.
484 		 */
485 		while (offset < 16 && len > 0) {
486 			*buf++ = data[offset++];
487 			len -= sizeof(__be32);
488 		}
489 	}
490 
491 	return 0;
492 }
493 
494 /*
495  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
496  * VPD-R header.
497  */
498 struct t4_vpd_hdr {
499 	u8  id_tag;
500 	u8  id_len[2];
501 	u8  id_data[ID_LEN];
502 	u8  vpdr_tag;
503 	u8  vpdr_len[2];
504 };
505 
506 /*
507  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
508  */
509 #define EEPROM_MAX_RD_POLL 40
510 #define EEPROM_MAX_WR_POLL 6
511 #define EEPROM_STAT_ADDR   0x7bfc
512 #define VPD_BASE           0x400
513 #define VPD_BASE_OLD       0
514 #define VPD_LEN            1024
515 #define VPD_INFO_FLD_HDR_SIZE	3
516 #define CHELSIO_VPD_UNIQUE_ID 0x82
517 
518 /**
519  *	t4_seeprom_read - read a serial EEPROM location
520  *	@adapter: adapter to read
521  *	@addr: EEPROM virtual address
522  *	@data: where to store the read data
523  *
524  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
525  *	VPD capability.  Note that this function must be called with a virtual
526  *	address.
527  */
528 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
529 {
530 	u16 val;
531 	int attempts = EEPROM_MAX_RD_POLL;
532 	unsigned int base = adapter->params.pci.vpd_cap_addr;
533 
534 	if (addr >= EEPROMVSIZE || (addr & 3))
535 		return -EINVAL;
536 
537 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
538 	do {
539 		udelay(10);
540 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
541 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
542 
543 	if (!(val & PCI_VPD_ADDR_F)) {
544 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
545 		return -EIO;
546 	}
547 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
548 	*data = le32_to_cpu(*data);
549 	return 0;
550 }
551 
552 /**
553  *	t4_seeprom_write - write a serial EEPROM location
554  *	@adapter: adapter to write
555  *	@addr: virtual EEPROM address
556  *	@data: value to write
557  *
558  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
559  *	VPD capability.  Note that this function must be called with a virtual
560  *	address.
561  */
562 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
563 {
564 	u16 val;
565 	int attempts = EEPROM_MAX_WR_POLL;
566 	unsigned int base = adapter->params.pci.vpd_cap_addr;
567 
568 	if (addr >= EEPROMVSIZE || (addr & 3))
569 		return -EINVAL;
570 
571 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
572 				 cpu_to_le32(data));
573 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
574 				 (u16)addr | PCI_VPD_ADDR_F);
575 	do {
576 		msleep(1);
577 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
578 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
579 
580 	if (val & PCI_VPD_ADDR_F) {
581 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
582 		return -EIO;
583 	}
584 	return 0;
585 }
586 
587 /**
588  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
589  *	@phys_addr: the physical EEPROM address
590  *	@fn: the PCI function number
591  *	@sz: size of function-specific area
592  *
593  *	Translate a physical EEPROM address to virtual.  The first 1K is
594  *	accessed through virtual addresses starting at 31K, the rest is
595  *	accessed through virtual addresses starting at 0.
596  *
597  *	The mapping is as follows:
598  *	[0..1K) -> [31K..32K)
599  *	[1K..1K+A) -> [ES-A..ES)
600  *	[1K+A..ES) -> [0..ES-A-1K)
601  *
602  *	where A = @fn * @sz, and ES = EEPROM size.
603  */
604 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
605 {
606 	fn *= sz;
607 	if (phys_addr < 1024)
608 		return phys_addr + (31 << 10);
609 	if (phys_addr < 1024 + fn)
610 		return EEPROMSIZE - fn + phys_addr - 1024;
611 	if (phys_addr < EEPROMSIZE)
612 		return phys_addr - 1024 - fn;
613 	return -EINVAL;
614 }
615 
616 /**
617  *	t4_seeprom_wp - enable/disable EEPROM write protection
618  *	@adapter: the adapter
619  *	@enable: whether to enable or disable write protection
620  *
621  *	Enables or disables write protection on the serial EEPROM.
622  */
623 int t4_seeprom_wp(struct adapter *adapter, int enable)
624 {
625 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
626 }
627 
628 /**
629  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
630  *	@v: Pointer to buffered vpd data structure
631  *	@kw: The keyword to search for
632  *
633  *	Returns the value of the information field keyword or
634  *	-ENOENT otherwise.
635  */
636 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
637 {
638          int i;
639 	 unsigned int offset , len;
640 	 const u8 *buf = &v->id_tag;
641 	 const u8 *vpdr_len = &v->vpdr_tag;
642 	 offset = sizeof(struct t4_vpd_hdr);
643 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
644 
645 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
646 		 return -ENOENT;
647 	 }
648 
649          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
650 		 if(memcmp(buf + i , kw , 2) == 0){
651 			 i += VPD_INFO_FLD_HDR_SIZE;
652                          return i;
653 		  }
654 
655                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
656          }
657 
658          return -ENOENT;
659 }
660 
661 
662 /**
663  *	get_vpd_params - read VPD parameters from VPD EEPROM
664  *	@adapter: adapter to read
665  *	@p: where to store the parameters
666  *
667  *	Reads card parameters stored in VPD EEPROM.
668  */
669 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
670 {
671 	int i, ret, addr;
672 	int ec, sn, pn, na;
673 	u8 vpd[VPD_LEN], csum;
674 	const struct t4_vpd_hdr *v;
675 
676 	/*
677 	 * Card information normally starts at VPD_BASE but early cards had
678 	 * it at 0.
679 	 */
680 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
681 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
682 
683 	for (i = 0; i < sizeof(vpd); i += 4) {
684 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
685 		if (ret)
686 			return ret;
687 	}
688  	v = (const struct t4_vpd_hdr *)vpd;
689 
690 #define FIND_VPD_KW(var,name) do { \
691 	var = get_vpd_keyword_val(v , name); \
692 	if (var < 0) { \
693 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
694 		return -EINVAL; \
695 	} \
696 } while (0)
697 
698 	FIND_VPD_KW(i, "RV");
699 	for (csum = 0; i >= 0; i--)
700 		csum += vpd[i];
701 
702 	if (csum) {
703 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
704 		return -EINVAL;
705 	}
706 	FIND_VPD_KW(ec, "EC");
707 	FIND_VPD_KW(sn, "SN");
708 	FIND_VPD_KW(pn, "PN");
709 	FIND_VPD_KW(na, "NA");
710 #undef FIND_VPD_KW
711 
712 	memcpy(p->id, v->id_data, ID_LEN);
713 	strstrip(p->id);
714 	memcpy(p->ec, vpd + ec, EC_LEN);
715 	strstrip(p->ec);
716 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
717 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
718 	strstrip(p->sn);
719 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
720 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
721 	strstrip((char *)p->pn);
722 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
723 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
724 	strstrip((char *)p->na);
725 
726 	return 0;
727 }
728 
729 /* serial flash and firmware constants and flash config file constants */
730 enum {
731 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
732 
733 	/* flash command opcodes */
734 	SF_PROG_PAGE    = 2,          /* program page */
735 	SF_WR_DISABLE   = 4,          /* disable writes */
736 	SF_RD_STATUS    = 5,          /* read status register */
737 	SF_WR_ENABLE    = 6,          /* enable writes */
738 	SF_RD_DATA_FAST = 0xb,        /* read flash */
739 	SF_RD_ID        = 0x9f,       /* read ID */
740 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
741 };
742 
743 /**
744  *	sf1_read - read data from the serial flash
745  *	@adapter: the adapter
746  *	@byte_cnt: number of bytes to read
747  *	@cont: whether another operation will be chained
748  *	@lock: whether to lock SF for PL access only
749  *	@valp: where to store the read data
750  *
751  *	Reads up to 4 bytes of data from the serial flash.  The location of
752  *	the read needs to be specified prior to calling this by issuing the
753  *	appropriate commands to the serial flash.
754  */
755 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
756 		    int lock, u32 *valp)
757 {
758 	int ret;
759 
760 	if (!byte_cnt || byte_cnt > 4)
761 		return -EINVAL;
762 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
763 		return -EBUSY;
764 	t4_write_reg(adapter, A_SF_OP,
765 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
766 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
767 	if (!ret)
768 		*valp = t4_read_reg(adapter, A_SF_DATA);
769 	return ret;
770 }
771 
772 /**
773  *	sf1_write - write data to the serial flash
774  *	@adapter: the adapter
775  *	@byte_cnt: number of bytes to write
776  *	@cont: whether another operation will be chained
777  *	@lock: whether to lock SF for PL access only
778  *	@val: value to write
779  *
780  *	Writes up to 4 bytes of data to the serial flash.  The location of
781  *	the write needs to be specified prior to calling this by issuing the
782  *	appropriate commands to the serial flash.
783  */
784 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
785 		     int lock, u32 val)
786 {
787 	if (!byte_cnt || byte_cnt > 4)
788 		return -EINVAL;
789 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
790 		return -EBUSY;
791 	t4_write_reg(adapter, A_SF_DATA, val);
792 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
793 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
794 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
795 }
796 
797 /**
798  *	flash_wait_op - wait for a flash operation to complete
799  *	@adapter: the adapter
800  *	@attempts: max number of polls of the status register
801  *	@delay: delay between polls in ms
802  *
803  *	Wait for a flash operation to complete by polling the status register.
804  */
805 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
806 {
807 	int ret;
808 	u32 status;
809 
810 	while (1) {
811 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
812 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
813 			return ret;
814 		if (!(status & 1))
815 			return 0;
816 		if (--attempts == 0)
817 			return -EAGAIN;
818 		if (delay)
819 			msleep(delay);
820 	}
821 }
822 
823 /**
824  *	t4_read_flash - read words from serial flash
825  *	@adapter: the adapter
826  *	@addr: the start address for the read
827  *	@nwords: how many 32-bit words to read
828  *	@data: where to store the read data
829  *	@byte_oriented: whether to store data as bytes or as words
830  *
831  *	Read the specified number of 32-bit words from the serial flash.
832  *	If @byte_oriented is set the read data is stored as a byte array
833  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
834  *	natural endianess.
835  */
836 int t4_read_flash(struct adapter *adapter, unsigned int addr,
837 		  unsigned int nwords, u32 *data, int byte_oriented)
838 {
839 	int ret;
840 
841 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
842 		return -EINVAL;
843 
844 	addr = swab32(addr) | SF_RD_DATA_FAST;
845 
846 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
847 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
848 		return ret;
849 
850 	for ( ; nwords; nwords--, data++) {
851 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
852 		if (nwords == 1)
853 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
854 		if (ret)
855 			return ret;
856 		if (byte_oriented)
857 			*data = htonl(*data);
858 	}
859 	return 0;
860 }
861 
862 /**
863  *	t4_write_flash - write up to a page of data to the serial flash
864  *	@adapter: the adapter
865  *	@addr: the start address to write
866  *	@n: length of data to write in bytes
867  *	@data: the data to write
868  *	@byte_oriented: whether to store data as bytes or as words
869  *
870  *	Writes up to a page of data (256 bytes) to the serial flash starting
871  *	at the given address.  All the data must be written to the same page.
872  *	If @byte_oriented is set the write data is stored as byte stream
873  *	(i.e. matches what on disk), otherwise in big-endian.
874  */
875 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
876 			  unsigned int n, const u8 *data, int byte_oriented)
877 {
878 	int ret;
879 	u32 buf[SF_PAGE_SIZE / 4];
880 	unsigned int i, c, left, val, offset = addr & 0xff;
881 
882 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
883 		return -EINVAL;
884 
885 	val = swab32(addr) | SF_PROG_PAGE;
886 
887 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
888 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
889 		goto unlock;
890 
891 	for (left = n; left; left -= c) {
892 		c = min(left, 4U);
893 		for (val = 0, i = 0; i < c; ++i)
894 			val = (val << 8) + *data++;
895 
896 		if (!byte_oriented)
897 			val = htonl(val);
898 
899 		ret = sf1_write(adapter, c, c != left, 1, val);
900 		if (ret)
901 			goto unlock;
902 	}
903 	ret = flash_wait_op(adapter, 8, 1);
904 	if (ret)
905 		goto unlock;
906 
907 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
908 
909 	/* Read the page to verify the write succeeded */
910 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
911 			    byte_oriented);
912 	if (ret)
913 		return ret;
914 
915 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
916 		CH_ERR(adapter, "failed to correctly write the flash page "
917 		       "at %#x\n", addr);
918 		return -EIO;
919 	}
920 	return 0;
921 
922 unlock:
923 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
924 	return ret;
925 }
926 
927 /**
928  *	t4_get_fw_version - read the firmware version
929  *	@adapter: the adapter
930  *	@vers: where to place the version
931  *
932  *	Reads the FW version from flash.
933  */
934 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
935 {
936 	return t4_read_flash(adapter,
937 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
938 			     vers, 0);
939 }
940 
941 /**
942  *	t4_get_tp_version - read the TP microcode version
943  *	@adapter: the adapter
944  *	@vers: where to place the version
945  *
946  *	Reads the TP microcode version from flash.
947  */
948 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
949 {
950 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
951 							      tp_microcode_ver),
952 			     1, vers, 0);
953 }
954 
955 /**
956  *	t4_check_fw_version - check if the FW is compatible with this driver
957  *	@adapter: the adapter
958  *
959  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
960  *	if there's exact match, a negative error if the version could not be
961  *	read or there's a major version mismatch, and a positive value if the
962  *	expected major version is found but there's a minor version mismatch.
963  */
964 int t4_check_fw_version(struct adapter *adapter)
965 {
966 	int ret, major, minor, micro;
967 	int exp_major, exp_minor, exp_micro;
968 
969 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
970 	if (!ret)
971 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
972 	if (ret)
973 		return ret;
974 
975 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
976 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
977 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
978 
979 	switch (chip_id(adapter)) {
980 	case CHELSIO_T4:
981 		exp_major = T4FW_VERSION_MAJOR;
982 		exp_minor = T4FW_VERSION_MINOR;
983 		exp_micro = T4FW_VERSION_MICRO;
984 		break;
985 	case CHELSIO_T5:
986 		exp_major = T5FW_VERSION_MAJOR;
987 		exp_minor = T5FW_VERSION_MINOR;
988 		exp_micro = T5FW_VERSION_MICRO;
989 		break;
990 	default:
991 		CH_ERR(adapter, "Unsupported chip type, %x\n",
992 		    chip_id(adapter));
993 		return -EINVAL;
994 	}
995 
996 	if (major != exp_major) {            /* major mismatch - fail */
997 		CH_ERR(adapter, "card FW has major version %u, driver wants "
998 		       "%u\n", major, exp_major);
999 		return -EINVAL;
1000 	}
1001 
1002 	if (minor == exp_minor && micro == exp_micro)
1003 		return 0;                                   /* perfect match */
1004 
1005 	/* Minor/micro version mismatch.  Report it but often it's OK. */
1006 	return 1;
1007 }
1008 
1009 /**
1010  *	t4_flash_erase_sectors - erase a range of flash sectors
1011  *	@adapter: the adapter
1012  *	@start: the first sector to erase
1013  *	@end: the last sector to erase
1014  *
1015  *	Erases the sectors in the given inclusive range.
1016  */
1017 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1018 {
1019 	int ret = 0;
1020 
1021 	while (start <= end) {
1022 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1023 		    (ret = sf1_write(adapter, 4, 0, 1,
1024 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1025 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1026 			CH_ERR(adapter, "erase of flash sector %d failed, "
1027 			       "error %d\n", start, ret);
1028 			break;
1029 		}
1030 		start++;
1031 	}
1032 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1033 	return ret;
1034 }
1035 
1036 /**
1037  *	t4_flash_cfg_addr - return the address of the flash configuration file
1038  *	@adapter: the adapter
1039  *
1040  *	Return the address within the flash where the Firmware Configuration
1041  *	File is stored, or an error if the device FLASH is too small to contain
1042  *	a Firmware Configuration File.
1043  */
1044 int t4_flash_cfg_addr(struct adapter *adapter)
1045 {
1046 	/*
1047 	 * If the device FLASH isn't large enough to hold a Firmware
1048 	 * Configuration File, return an error.
1049 	 */
1050 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1051 		return -ENOSPC;
1052 
1053 	return FLASH_CFG_START;
1054 }
1055 
1056 /**
1057  *	t4_load_cfg - download config file
1058  *	@adap: the adapter
1059  *	@cfg_data: the cfg text file to write
1060  *	@size: text file size
1061  *
1062  *	Write the supplied config text file to the card's serial flash.
1063  */
1064 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1065 {
1066 	int ret, i, n, cfg_addr;
1067 	unsigned int addr;
1068 	unsigned int flash_cfg_start_sec;
1069 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1070 
1071 	cfg_addr = t4_flash_cfg_addr(adap);
1072 	if (cfg_addr < 0)
1073 		return cfg_addr;
1074 
1075 	addr = cfg_addr;
1076 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1077 
1078 	if (size > FLASH_CFG_MAX_SIZE) {
1079 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1080 		       FLASH_CFG_MAX_SIZE);
1081 		return -EFBIG;
1082 	}
1083 
1084 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1085 			 sf_sec_size);
1086 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1087 				     flash_cfg_start_sec + i - 1);
1088 	/*
1089 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1090 	 * with the on-adapter Firmware Configuration File.
1091 	 */
1092 	if (ret || size == 0)
1093 		goto out;
1094 
1095 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1096 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1097 		if ( (size - i) <  SF_PAGE_SIZE)
1098 			n = size - i;
1099 		else
1100 			n = SF_PAGE_SIZE;
1101 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1102 		if (ret)
1103 			goto out;
1104 
1105 		addr += SF_PAGE_SIZE;
1106 		cfg_data += SF_PAGE_SIZE;
1107 	}
1108 
1109 out:
1110 	if (ret)
1111 		CH_ERR(adap, "config file %s failed %d\n",
1112 		       (size == 0 ? "clear" : "download"), ret);
1113 	return ret;
1114 }
1115 
1116 
1117 /**
1118  *	t4_load_fw - download firmware
1119  *	@adap: the adapter
1120  *	@fw_data: the firmware image to write
1121  *	@size: image size
1122  *
1123  *	Write the supplied firmware image to the card's serial flash.
1124  */
1125 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1126 {
1127 	u32 csum;
1128 	int ret, addr;
1129 	unsigned int i;
1130 	u8 first_page[SF_PAGE_SIZE];
1131 	const u32 *p = (const u32 *)fw_data;
1132 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1133 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1134 	unsigned int fw_start_sec;
1135 	unsigned int fw_start;
1136 	unsigned int fw_size;
1137 
1138 	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1139 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1140 		fw_start = FLASH_FWBOOTSTRAP_START;
1141 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1142 	} else {
1143 		fw_start_sec = FLASH_FW_START_SEC;
1144  		fw_start = FLASH_FW_START;
1145 		fw_size = FLASH_FW_MAX_SIZE;
1146 	}
1147 	if (!size) {
1148 		CH_ERR(adap, "FW image has no data\n");
1149 		return -EINVAL;
1150 	}
1151 	if (size & 511) {
1152 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1153 		return -EINVAL;
1154 	}
1155 	if (ntohs(hdr->len512) * 512 != size) {
1156 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1157 		return -EINVAL;
1158 	}
1159 	if (size > fw_size) {
1160 		CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1161 		return -EFBIG;
1162 	}
1163 	if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1164 	    (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1165 		CH_ERR(adap,
1166 		    "FW image (%d) is not suitable for this adapter (%d)\n",
1167 		    hdr->chip, chip_id(adap));
1168 		return -EINVAL;
1169 	}
1170 
1171 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1172 		csum += ntohl(p[i]);
1173 
1174 	if (csum != 0xffffffff) {
1175 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1176 		       csum);
1177 		return -EINVAL;
1178 	}
1179 
1180 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1181 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1182 	if (ret)
1183 		goto out;
1184 
1185 	/*
1186 	 * We write the correct version at the end so the driver can see a bad
1187 	 * version if the FW write fails.  Start by writing a copy of the
1188 	 * first page with a bad version.
1189 	 */
1190 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1191 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1192 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1193 	if (ret)
1194 		goto out;
1195 
1196 	addr = fw_start;
1197 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1198 		addr += SF_PAGE_SIZE;
1199 		fw_data += SF_PAGE_SIZE;
1200 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1201 		if (ret)
1202 			goto out;
1203 	}
1204 
1205 	ret = t4_write_flash(adap,
1206 			     fw_start + offsetof(struct fw_hdr, fw_ver),
1207 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1208 out:
1209 	if (ret)
1210 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1211 	return ret;
1212 }
1213 
1214 /* BIOS boot headers */
1215 typedef struct pci_expansion_rom_header {
1216 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1217 	u8	reserved[22]; /* Reserved per processor Architecture data */
1218 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1219 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1220 
1221 /* Legacy PCI Expansion ROM Header */
1222 typedef struct legacy_pci_expansion_rom_header {
1223 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1224 	u8	size512; /* Current Image Size in units of 512 bytes */
1225 	u8	initentry_point[4];
1226 	u8	cksum; /* Checksum computed on the entire Image */
1227 	u8	reserved[16]; /* Reserved */
1228 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1229 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1230 
1231 /* EFI PCI Expansion ROM Header */
1232 typedef struct efi_pci_expansion_rom_header {
1233 	u8	signature[2]; // ROM signature. The value 0xaa55
1234 	u8	initialization_size[2]; /* Units 512. Includes this header */
1235 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1236 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1237 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1238 	u8	compression_type[2]; /* Compression type. */
1239 		/*
1240 		 * Compression type definition
1241 		 * 0x0: uncompressed
1242 		 * 0x1: Compressed
1243 		 * 0x2-0xFFFF: Reserved
1244 		 */
1245 	u8	reserved[8]; /* Reserved */
1246 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1247 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1248 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1249 
1250 /* PCI Data Structure Format */
1251 typedef struct pcir_data_structure { /* PCI Data Structure */
1252 	u8	signature[4]; /* Signature. The string "PCIR" */
1253 	u8	vendor_id[2]; /* Vendor Identification */
1254 	u8	device_id[2]; /* Device Identification */
1255 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1256 	u8	length[2]; /* PCIR Data Structure Length */
1257 	u8	revision; /* PCIR Data Structure Revision */
1258 	u8	class_code[3]; /* Class Code */
1259 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1260 	u8	code_revision[2]; /* Revision Level of Code/Data */
1261 	u8	code_type; /* Code Type. */
1262 		/*
1263 		 * PCI Expansion ROM Code Types
1264 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1265 		 * 0x01: Open Firmware standard for PCI. FCODE
1266 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1267 		 * 0x03: EFI Image. EFI
1268 		 * 0x04-0xFF: Reserved.
1269 		 */
1270 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1271 	u8	reserved[2]; /* Reserved */
1272 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1273 
1274 /* BOOT constants */
1275 enum {
1276 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1277 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1278 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1279 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1280 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1281 	VENDOR_ID = 0x1425, /* Vendor ID */
1282 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1283 };
1284 
1285 /*
1286  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1287  *	@adatper: the device ID to write.
1288  *	@boot_data: the boot image to modify.
1289  *
1290  *	Write the supplied device ID to the boot BIOS image.
1291  */
1292 static void modify_device_id(int device_id, u8 *boot_data)
1293 {
1294 	legacy_pci_exp_rom_header_t *header;
1295 	pcir_data_t *pcir_header;
1296 	u32 cur_header = 0;
1297 
1298 	/*
1299 	 * Loop through all chained images and change the device ID's
1300 	 */
1301 	while (1) {
1302 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1303 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1304 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1305 
1306 		/*
1307 		 * Only modify the Device ID if code type is Legacy or HP.
1308 		 * 0x00: Okay to modify
1309 		 * 0x01: FCODE. Do not be modify
1310 		 * 0x03: Okay to modify
1311 		 * 0x04-0xFF: Do not modify
1312 		 */
1313 		if (pcir_header->code_type == 0x00) {
1314 			u8 csum = 0;
1315 			int i;
1316 
1317 			/*
1318 			 * Modify Device ID to match current adatper
1319 			 */
1320 			*(u16*) pcir_header->device_id = device_id;
1321 
1322 			/*
1323 			 * Set checksum temporarily to 0.
1324 			 * We will recalculate it later.
1325 			 */
1326 			header->cksum = 0x0;
1327 
1328 			/*
1329 			 * Calculate and update checksum
1330 			 */
1331 			for (i = 0; i < (header->size512 * 512); i++)
1332 				csum += (u8)boot_data[cur_header + i];
1333 
1334 			/*
1335 			 * Invert summed value to create the checksum
1336 			 * Writing new checksum value directly to the boot data
1337 			 */
1338 			boot_data[cur_header + 7] = -csum;
1339 
1340 		} else if (pcir_header->code_type == 0x03) {
1341 
1342 			/*
1343 			 * Modify Device ID to match current adatper
1344 			 */
1345 			*(u16*) pcir_header->device_id = device_id;
1346 
1347 		}
1348 
1349 
1350 		/*
1351 		 * Check indicator element to identify if this is the last
1352 		 * image in the ROM.
1353 		 */
1354 		if (pcir_header->indicator & 0x80)
1355 			break;
1356 
1357 		/*
1358 		 * Move header pointer up to the next image in the ROM.
1359 		 */
1360 		cur_header += header->size512 * 512;
1361 	}
1362 }
1363 
1364 /*
1365  *	t4_load_boot - download boot flash
1366  *	@adapter: the adapter
1367  *	@boot_data: the boot image to write
1368  *	@boot_addr: offset in flash to write boot_data
1369  *	@size: image size
1370  *
1371  *	Write the supplied boot image to the card's serial flash.
1372  *	The boot image has the following sections: a 28-byte header and the
1373  *	boot image.
1374  */
1375 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1376 		 unsigned int boot_addr, unsigned int size)
1377 {
1378 	pci_exp_rom_header_t *header;
1379 	int pcir_offset ;
1380 	pcir_data_t *pcir_header;
1381 	int ret, addr;
1382 	uint16_t device_id;
1383 	unsigned int i;
1384 	unsigned int boot_sector = boot_addr * 1024;
1385 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1386 
1387 	/*
1388 	 * Make sure the boot image does not encroach on the firmware region
1389 	 */
1390 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1391 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1392 		return -EFBIG;
1393 	}
1394 
1395 	/*
1396 	 * Number of sectors spanned
1397 	 */
1398 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1399 			sf_sec_size);
1400 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1401 				     (boot_sector >> 16) + i - 1);
1402 
1403 	/*
1404 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1405 	 * with the on-adapter option ROM file
1406 	 */
1407 	if (ret || (size == 0))
1408 		goto out;
1409 
1410 	/* Get boot header */
1411 	header = (pci_exp_rom_header_t *)boot_data;
1412 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1413 	/* PCIR Data Structure */
1414 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1415 
1416 	/*
1417 	 * Perform some primitive sanity testing to avoid accidentally
1418 	 * writing garbage over the boot sectors.  We ought to check for
1419 	 * more but it's not worth it for now ...
1420 	 */
1421 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1422 		CH_ERR(adap, "boot image too small/large\n");
1423 		return -EFBIG;
1424 	}
1425 
1426 	/*
1427 	 * Check BOOT ROM header signature
1428 	 */
1429 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1430 		CH_ERR(adap, "Boot image missing signature\n");
1431 		return -EINVAL;
1432 	}
1433 
1434 	/*
1435 	 * Check PCI header signature
1436 	 */
1437 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1438 		CH_ERR(adap, "PCI header missing signature\n");
1439 		return -EINVAL;
1440 	}
1441 
1442 	/*
1443 	 * Check Vendor ID matches Chelsio ID
1444 	 */
1445 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1446 		CH_ERR(adap, "Vendor ID missing signature\n");
1447 		return -EINVAL;
1448 	}
1449 
1450 	/*
1451 	 * Retrieve adapter's device ID
1452 	 */
1453 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1454 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1455 	device_id = (device_id & 0xff) | 0x4000;
1456 
1457 	/*
1458 	 * Check PCIE Device ID
1459 	 */
1460 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1461 		/*
1462 		 * Change the device ID in the Boot BIOS image to match
1463 		 * the Device ID of the current adapter.
1464 		 */
1465 		modify_device_id(device_id, boot_data);
1466 	}
1467 
1468 	/*
1469 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1470 	 * we finish copying the rest of the boot image. This will ensure
1471 	 * that the BIOS boot header will only be written if the boot image
1472 	 * was written in full.
1473 	 */
1474 	addr = boot_sector;
1475 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1476 		addr += SF_PAGE_SIZE;
1477 		boot_data += SF_PAGE_SIZE;
1478 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1479 		if (ret)
1480 			goto out;
1481 	}
1482 
1483 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1484 
1485 out:
1486 	if (ret)
1487 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1488 	return ret;
1489 }
1490 
1491 /**
1492  *	t4_read_cimq_cfg - read CIM queue configuration
1493  *	@adap: the adapter
1494  *	@base: holds the queue base addresses in bytes
1495  *	@size: holds the queue sizes in bytes
1496  *	@thres: holds the queue full thresholds in bytes
1497  *
1498  *	Returns the current configuration of the CIM queues, starting with
1499  *	the IBQs, then the OBQs.
1500  */
1501 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1502 {
1503 	unsigned int i, v;
1504 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1505 
1506 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1507 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1508 			     V_QUENUMSELECT(i));
1509 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1510 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1511 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1512 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1513 	}
1514 	for (i = 0; i < cim_num_obq; i++) {
1515 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1516 			     V_QUENUMSELECT(i));
1517 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1518 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1519 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1520 	}
1521 }
1522 
1523 /**
1524  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1525  *	@adap: the adapter
1526  *	@qid: the queue index
1527  *	@data: where to store the queue contents
1528  *	@n: capacity of @data in 32-bit words
1529  *
1530  *	Reads the contents of the selected CIM queue starting at address 0 up
1531  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1532  *	error and the number of 32-bit words actually read on success.
1533  */
1534 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1535 {
1536 	int i, err;
1537 	unsigned int addr;
1538 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1539 
1540 	if (qid > 5 || (n & 3))
1541 		return -EINVAL;
1542 
1543 	addr = qid * nwords;
1544 	if (n > nwords)
1545 		n = nwords;
1546 
1547 	for (i = 0; i < n; i++, addr++) {
1548 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1549 			     F_IBQDBGEN);
1550 		/*
1551 		 * It might take 3-10ms before the IBQ debug read access is
1552 		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1553 		 */
1554 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1555 				      1000000, 1);
1556 		if (err)
1557 			return err;
1558 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1559 	}
1560 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1561 	return i;
1562 }
1563 
1564 /**
1565  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1566  *	@adap: the adapter
1567  *	@qid: the queue index
1568  *	@data: where to store the queue contents
1569  *	@n: capacity of @data in 32-bit words
1570  *
1571  *	Reads the contents of the selected CIM queue starting at address 0 up
1572  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1573  *	error and the number of 32-bit words actually read on success.
1574  */
1575 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1576 {
1577 	int i, err;
1578 	unsigned int addr, v, nwords;
1579 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1580 
1581 	if (qid >= cim_num_obq || (n & 3))
1582 		return -EINVAL;
1583 
1584 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1585 		     V_QUENUMSELECT(qid));
1586 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1587 
1588 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1589 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1590 	if (n > nwords)
1591 		n = nwords;
1592 
1593 	for (i = 0; i < n; i++, addr++) {
1594 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1595 			     F_OBQDBGEN);
1596 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1597 				      2, 1);
1598 		if (err)
1599 			return err;
1600 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1601 	}
1602 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1603 	return i;
1604 }
1605 
1606 enum {
1607 	CIM_QCTL_BASE     = 0,
1608 	CIM_CTL_BASE      = 0x2000,
1609 	CIM_PBT_ADDR_BASE = 0x2800,
1610 	CIM_PBT_LRF_BASE  = 0x3000,
1611 	CIM_PBT_DATA_BASE = 0x3800
1612 };
1613 
1614 /**
1615  *	t4_cim_read - read a block from CIM internal address space
1616  *	@adap: the adapter
1617  *	@addr: the start address within the CIM address space
1618  *	@n: number of words to read
1619  *	@valp: where to store the result
1620  *
1621  *	Reads a block of 4-byte words from the CIM intenal address space.
1622  */
1623 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1624 		unsigned int *valp)
1625 {
1626 	int ret = 0;
1627 
1628 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1629 		return -EBUSY;
1630 
1631 	for ( ; !ret && n--; addr += 4) {
1632 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1633 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1634 				      0, 5, 2);
1635 		if (!ret)
1636 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1637 	}
1638 	return ret;
1639 }
1640 
1641 /**
1642  *	t4_cim_write - write a block into CIM internal address space
1643  *	@adap: the adapter
1644  *	@addr: the start address within the CIM address space
1645  *	@n: number of words to write
1646  *	@valp: set of values to write
1647  *
1648  *	Writes a block of 4-byte words into the CIM intenal address space.
1649  */
1650 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1651 		 const unsigned int *valp)
1652 {
1653 	int ret = 0;
1654 
1655 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1656 		return -EBUSY;
1657 
1658 	for ( ; !ret && n--; addr += 4) {
1659 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1660 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1661 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1662 				      0, 5, 2);
1663 	}
1664 	return ret;
1665 }
1666 
1667 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1668 {
1669 	return t4_cim_write(adap, addr, 1, &val);
1670 }
1671 
1672 /**
1673  *	t4_cim_ctl_read - read a block from CIM control region
1674  *	@adap: the adapter
1675  *	@addr: the start address within the CIM control region
1676  *	@n: number of words to read
1677  *	@valp: where to store the result
1678  *
1679  *	Reads a block of 4-byte words from the CIM control region.
1680  */
1681 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1682 		    unsigned int *valp)
1683 {
1684 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1685 }
1686 
1687 /**
1688  *	t4_cim_read_la - read CIM LA capture buffer
1689  *	@adap: the adapter
1690  *	@la_buf: where to store the LA data
1691  *	@wrptr: the HW write pointer within the capture buffer
1692  *
1693  *	Reads the contents of the CIM LA buffer with the most recent entry at
1694  *	the end	of the returned data and with the entry at @wrptr first.
1695  *	We try to leave the LA in the running state we find it in.
1696  */
1697 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1698 {
1699 	int i, ret;
1700 	unsigned int cfg, val, idx;
1701 
1702 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1703 	if (ret)
1704 		return ret;
1705 
1706 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1707 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1708 		if (ret)
1709 			return ret;
1710 	}
1711 
1712 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1713 	if (ret)
1714 		goto restart;
1715 
1716 	idx = G_UPDBGLAWRPTR(val);
1717 	if (wrptr)
1718 		*wrptr = idx;
1719 
1720 	for (i = 0; i < adap->params.cim_la_size; i++) {
1721 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1722 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1723 		if (ret)
1724 			break;
1725 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1726 		if (ret)
1727 			break;
1728 		if (val & F_UPDBGLARDEN) {
1729 			ret = -ETIMEDOUT;
1730 			break;
1731 		}
1732 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1733 		if (ret)
1734 			break;
1735 		idx = (idx + 1) & M_UPDBGLARDPTR;
1736 	}
1737 restart:
1738 	if (cfg & F_UPDBGLAEN) {
1739 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1740 				      cfg & ~F_UPDBGLARDEN);
1741 		if (!ret)
1742 			ret = r;
1743 	}
1744 	return ret;
1745 }
1746 
1747 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1748 			unsigned int *pif_req_wrptr,
1749 			unsigned int *pif_rsp_wrptr)
1750 {
1751 	int i, j;
1752 	u32 cfg, val, req, rsp;
1753 
1754 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1755 	if (cfg & F_LADBGEN)
1756 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1757 
1758 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1759 	req = G_POLADBGWRPTR(val);
1760 	rsp = G_PILADBGWRPTR(val);
1761 	if (pif_req_wrptr)
1762 		*pif_req_wrptr = req;
1763 	if (pif_rsp_wrptr)
1764 		*pif_rsp_wrptr = rsp;
1765 
1766 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1767 		for (j = 0; j < 6; j++) {
1768 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1769 				     V_PILADBGRDPTR(rsp));
1770 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1771 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1772 			req++;
1773 			rsp++;
1774 		}
1775 		req = (req + 2) & M_POLADBGRDPTR;
1776 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1777 	}
1778 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1779 }
1780 
1781 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1782 {
1783 	u32 cfg;
1784 	int i, j, idx;
1785 
1786 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1787 	if (cfg & F_LADBGEN)
1788 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1789 
1790 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1791 		for (j = 0; j < 5; j++) {
1792 			idx = 8 * i + j;
1793 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1794 				     V_PILADBGRDPTR(idx));
1795 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1796 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1797 		}
1798 	}
1799 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1800 }
1801 
1802 /**
1803  *	t4_tp_read_la - read TP LA capture buffer
1804  *	@adap: the adapter
1805  *	@la_buf: where to store the LA data
1806  *	@wrptr: the HW write pointer within the capture buffer
1807  *
1808  *	Reads the contents of the TP LA buffer with the most recent entry at
1809  *	the end	of the returned data and with the entry at @wrptr first.
1810  *	We leave the LA in the running state we find it in.
1811  */
1812 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1813 {
1814 	bool last_incomplete;
1815 	unsigned int i, cfg, val, idx;
1816 
1817 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1818 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1819 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1820 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1821 
1822 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1823 	idx = G_DBGLAWPTR(val);
1824 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1825 	if (last_incomplete)
1826 		idx = (idx + 1) & M_DBGLARPTR;
1827 	if (wrptr)
1828 		*wrptr = idx;
1829 
1830 	val &= 0xffff;
1831 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1832 	val |= adap->params.tp.la_mask;
1833 
1834 	for (i = 0; i < TPLA_SIZE; i++) {
1835 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1836 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1837 		idx = (idx + 1) & M_DBGLARPTR;
1838 	}
1839 
1840 	/* Wipe out last entry if it isn't valid */
1841 	if (last_incomplete)
1842 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1843 
1844 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1845 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1846 			     cfg | adap->params.tp.la_mask);
1847 }
1848 
1849 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1850 {
1851 	unsigned int i, j;
1852 
1853 	for (i = 0; i < 8; i++) {
1854 		u32 *p = la_buf + i;
1855 
1856 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1857 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1858 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1859 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1860 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1861 	}
1862 }
1863 
1864 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1865 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1866 		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1867 
1868 /**
1869  *	t4_link_start - apply link configuration to MAC/PHY
1870  *	@phy: the PHY to setup
1871  *	@mac: the MAC to setup
1872  *	@lc: the requested link configuration
1873  *
1874  *	Set up a port's MAC and PHY according to a desired link configuration.
1875  *	- If the PHY can auto-negotiate first decide what to advertise, then
1876  *	  enable/disable auto-negotiation as desired, and reset.
1877  *	- If the PHY does not auto-negotiate just reset it.
1878  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1879  *	  otherwise do it later based on the outcome of auto-negotiation.
1880  */
1881 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1882 		  struct link_config *lc)
1883 {
1884 	struct fw_port_cmd c;
1885 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1886 
1887 	lc->link_ok = 0;
1888 	if (lc->requested_fc & PAUSE_RX)
1889 		fc |= FW_PORT_CAP_FC_RX;
1890 	if (lc->requested_fc & PAUSE_TX)
1891 		fc |= FW_PORT_CAP_FC_TX;
1892 
1893 	memset(&c, 0, sizeof(c));
1894 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1895 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1896 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1897 				  FW_LEN16(c));
1898 
1899 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1900 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1901 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1902 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1903 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1904 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1905 	} else
1906 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1907 
1908 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1909 }
1910 
1911 /**
1912  *	t4_restart_aneg - restart autonegotiation
1913  *	@adap: the adapter
1914  *	@mbox: mbox to use for the FW command
1915  *	@port: the port id
1916  *
1917  *	Restarts autonegotiation for the selected port.
1918  */
1919 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1920 {
1921 	struct fw_port_cmd c;
1922 
1923 	memset(&c, 0, sizeof(c));
1924 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1925 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1926 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1927 				  FW_LEN16(c));
1928 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1929 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1930 }
1931 
1932 struct intr_info {
1933 	unsigned int mask;       /* bits to check in interrupt status */
1934 	const char *msg;         /* message to print or NULL */
1935 	short stat_idx;          /* stat counter to increment or -1 */
1936 	unsigned short fatal;    /* whether the condition reported is fatal */
1937 };
1938 
1939 /**
1940  *	t4_handle_intr_status - table driven interrupt handler
1941  *	@adapter: the adapter that generated the interrupt
1942  *	@reg: the interrupt status register to process
1943  *	@acts: table of interrupt actions
1944  *
1945  *	A table driven interrupt handler that applies a set of masks to an
1946  *	interrupt status word and performs the corresponding actions if the
1947  *	interrupts described by the mask have occured.  The actions include
1948  *	optionally emitting a warning or alert message.  The table is terminated
1949  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1950  *	conditions.
1951  */
1952 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1953 				 const struct intr_info *acts)
1954 {
1955 	int fatal = 0;
1956 	unsigned int mask = 0;
1957 	unsigned int status = t4_read_reg(adapter, reg);
1958 
1959 	for ( ; acts->mask; ++acts) {
1960 		if (!(status & acts->mask))
1961 			continue;
1962 		if (acts->fatal) {
1963 			fatal++;
1964 			CH_ALERT(adapter, "%s (0x%x)\n",
1965 				 acts->msg, status & acts->mask);
1966 		} else if (acts->msg)
1967 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1968 					  acts->msg, status & acts->mask);
1969 		mask |= acts->mask;
1970 	}
1971 	status &= mask;
1972 	if (status)                           /* clear processed interrupts */
1973 		t4_write_reg(adapter, reg, status);
1974 	return fatal;
1975 }
1976 
1977 /*
1978  * Interrupt handler for the PCIE module.
1979  */
1980 static void pcie_intr_handler(struct adapter *adapter)
1981 {
1982 	static struct intr_info sysbus_intr_info[] = {
1983 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1984 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1985 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1986 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1987 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1988 		{ 0 }
1989 	};
1990 	static struct intr_info pcie_port_intr_info[] = {
1991 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1992 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1993 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1994 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1995 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1996 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1997 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1998 		{ F_RDPE, "Rx data parity error", -1, 1 },
1999 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
2000 		{ 0 }
2001 	};
2002 	static struct intr_info pcie_intr_info[] = {
2003 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2004 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2005 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2006 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2007 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2008 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2009 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2010 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2011 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2012 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2013 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2014 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2015 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2016 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2017 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2018 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2019 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2020 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2021 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2022 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2023 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2024 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2025 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2026 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2027 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2028 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2029 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2030 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2031 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2032 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2033 		  0 },
2034 		{ 0 }
2035 	};
2036 
2037 	static struct intr_info t5_pcie_intr_info[] = {
2038 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2039 		  -1, 1 },
2040 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2041 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2042 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2043 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2044 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2045 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2046 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2047 		  -1, 1 },
2048 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2049 		  -1, 1 },
2050 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2051 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2052 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2053 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2054 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2055 		  -1, 1 },
2056 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2057 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2058 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2059 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2060 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2061 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2062 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2063 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2064 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2065 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2066 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2067 		  -1, 1 },
2068 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2069 		  -1, 1 },
2070 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2071 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2072 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2073 		{ F_READRSPERR, "Outbound read error", -1,
2074 		  0 },
2075 		{ 0 }
2076 	};
2077 
2078 	int fat;
2079 
2080 	if (is_t4(adapter))
2081 		fat = t4_handle_intr_status(adapter,
2082 					    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2083 					    sysbus_intr_info) +
2084 		      t4_handle_intr_status(adapter,
2085 					    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2086 					    pcie_port_intr_info) +
2087 		      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2088 					    pcie_intr_info);
2089 	else
2090 		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2091 					    t5_pcie_intr_info);
2092 	if (fat)
2093 		t4_fatal_err(adapter);
2094 }
2095 
2096 /*
2097  * TP interrupt handler.
2098  */
2099 static void tp_intr_handler(struct adapter *adapter)
2100 {
2101 	static struct intr_info tp_intr_info[] = {
2102 		{ 0x3fffffff, "TP parity error", -1, 1 },
2103 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2104 		{ 0 }
2105 	};
2106 
2107 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2108 		t4_fatal_err(adapter);
2109 }
2110 
2111 /*
2112  * SGE interrupt handler.
2113  */
2114 static void sge_intr_handler(struct adapter *adapter)
2115 {
2116 	u64 v;
2117 	u32 err;
2118 
2119 	static struct intr_info sge_intr_info[] = {
2120 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2121 		  "SGE received CPL exceeding IQE size", -1, 1 },
2122 		{ F_ERR_INVALID_CIDX_INC,
2123 		  "SGE GTS CIDX increment too large", -1, 0 },
2124 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2125 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2126 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2127 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2128 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2129 		  0 },
2130 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2131 		  0 },
2132 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2133 		  0 },
2134 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2135 		  0 },
2136 		{ F_ERR_ING_CTXT_PRIO,
2137 		  "SGE too many priority ingress contexts", -1, 0 },
2138 		{ F_ERR_EGR_CTXT_PRIO,
2139 		  "SGE too many priority egress contexts", -1, 0 },
2140 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2141 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2142 		{ 0 }
2143 	};
2144 
2145 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2146 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2147 	if (v) {
2148 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2149 			 (unsigned long long)v);
2150 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2151 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2152 	}
2153 
2154 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2155 
2156 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2157 	if (err & F_ERROR_QID_VALID) {
2158 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2159 		if (err & F_UNCAPTURED_ERROR)
2160 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2161 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2162 			     F_UNCAPTURED_ERROR);
2163 	}
2164 
2165 	if (v != 0)
2166 		t4_fatal_err(adapter);
2167 }
2168 
2169 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2170 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2171 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2172 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2173 
2174 /*
2175  * CIM interrupt handler.
2176  */
2177 static void cim_intr_handler(struct adapter *adapter)
2178 {
2179 	static struct intr_info cim_intr_info[] = {
2180 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2181 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2182 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2183 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2184 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2185 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2186 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2187 		{ 0 }
2188 	};
2189 	static struct intr_info cim_upintr_info[] = {
2190 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2191 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2192 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2193 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2194 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2195 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2196 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2197 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2198 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2199 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2200 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2201 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2202 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2203 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2204 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2205 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2206 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2207 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2208 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2209 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2210 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2211 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2212 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2213 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2214 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2215 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2216 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2217 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2218 		{ 0 }
2219 	};
2220 	int fat;
2221 
2222 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2223 		t4_report_fw_error(adapter);
2224 
2225 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2226 				    cim_intr_info) +
2227 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2228 				    cim_upintr_info);
2229 	if (fat)
2230 		t4_fatal_err(adapter);
2231 }
2232 
2233 /*
2234  * ULP RX interrupt handler.
2235  */
2236 static void ulprx_intr_handler(struct adapter *adapter)
2237 {
2238 	static struct intr_info ulprx_intr_info[] = {
2239 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2240 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2241 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2242 		{ 0 }
2243 	};
2244 
2245 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2246 		t4_fatal_err(adapter);
2247 }
2248 
2249 /*
2250  * ULP TX interrupt handler.
2251  */
2252 static void ulptx_intr_handler(struct adapter *adapter)
2253 {
2254 	static struct intr_info ulptx_intr_info[] = {
2255 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2256 		  0 },
2257 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2258 		  0 },
2259 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2260 		  0 },
2261 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2262 		  0 },
2263 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2264 		{ 0 }
2265 	};
2266 
2267 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2268 		t4_fatal_err(adapter);
2269 }
2270 
2271 /*
2272  * PM TX interrupt handler.
2273  */
2274 static void pmtx_intr_handler(struct adapter *adapter)
2275 {
2276 	static struct intr_info pmtx_intr_info[] = {
2277 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2278 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2279 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2280 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2281 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2282 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2283 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2284 		  1 },
2285 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2286 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2287 		{ 0 }
2288 	};
2289 
2290 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2291 		t4_fatal_err(adapter);
2292 }
2293 
2294 /*
2295  * PM RX interrupt handler.
2296  */
2297 static void pmrx_intr_handler(struct adapter *adapter)
2298 {
2299 	static struct intr_info pmrx_intr_info[] = {
2300 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2301 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2302 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2303 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2304 		  1 },
2305 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2306 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2307 		{ 0 }
2308 	};
2309 
2310 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2311 		t4_fatal_err(adapter);
2312 }
2313 
2314 /*
2315  * CPL switch interrupt handler.
2316  */
2317 static void cplsw_intr_handler(struct adapter *adapter)
2318 {
2319 	static struct intr_info cplsw_intr_info[] = {
2320 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2321 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2322 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2323 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2324 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2325 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2326 		{ 0 }
2327 	};
2328 
2329 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2330 		t4_fatal_err(adapter);
2331 }
2332 
2333 /*
2334  * LE interrupt handler.
2335  */
2336 static void le_intr_handler(struct adapter *adap)
2337 {
2338 	static struct intr_info le_intr_info[] = {
2339 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2340 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2341 		{ F_PARITYERR, "LE parity error", -1, 1 },
2342 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2343 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2344 		{ 0 }
2345 	};
2346 
2347 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2348 		t4_fatal_err(adap);
2349 }
2350 
2351 /*
2352  * MPS interrupt handler.
2353  */
2354 static void mps_intr_handler(struct adapter *adapter)
2355 {
2356 	static struct intr_info mps_rx_intr_info[] = {
2357 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2358 		{ 0 }
2359 	};
2360 	static struct intr_info mps_tx_intr_info[] = {
2361 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2362 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2363 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2364 		  -1, 1 },
2365 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2366 		  -1, 1 },
2367 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2368 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2369 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2370 		{ 0 }
2371 	};
2372 	static struct intr_info mps_trc_intr_info[] = {
2373 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2374 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2375 		  1 },
2376 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2377 		{ 0 }
2378 	};
2379 	static struct intr_info mps_stat_sram_intr_info[] = {
2380 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2381 		{ 0 }
2382 	};
2383 	static struct intr_info mps_stat_tx_intr_info[] = {
2384 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2385 		{ 0 }
2386 	};
2387 	static struct intr_info mps_stat_rx_intr_info[] = {
2388 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2389 		{ 0 }
2390 	};
2391 	static struct intr_info mps_cls_intr_info[] = {
2392 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2393 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2394 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2395 		{ 0 }
2396 	};
2397 
2398 	int fat;
2399 
2400 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2401 				    mps_rx_intr_info) +
2402 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2403 				    mps_tx_intr_info) +
2404 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2405 				    mps_trc_intr_info) +
2406 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2407 				    mps_stat_sram_intr_info) +
2408 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2409 				    mps_stat_tx_intr_info) +
2410 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2411 				    mps_stat_rx_intr_info) +
2412 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2413 				    mps_cls_intr_info);
2414 
2415 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2416 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2417 	if (fat)
2418 		t4_fatal_err(adapter);
2419 }
2420 
2421 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2422 
2423 /*
2424  * EDC/MC interrupt handler.
2425  */
2426 static void mem_intr_handler(struct adapter *adapter, int idx)
2427 {
2428 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2429 
2430 	unsigned int addr, cnt_addr, v;
2431 
2432 	if (idx <= MEM_EDC1) {
2433 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2434 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2435 	} else {
2436 		if (is_t4(adapter)) {
2437 			addr = A_MC_INT_CAUSE;
2438 			cnt_addr = A_MC_ECC_STATUS;
2439 		} else {
2440 			addr = A_MC_P_INT_CAUSE;
2441 			cnt_addr = A_MC_P_ECC_STATUS;
2442 		}
2443 	}
2444 
2445 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2446 	if (v & F_PERR_INT_CAUSE)
2447 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2448 	if (v & F_ECC_CE_INT_CAUSE) {
2449 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2450 
2451 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2452 		CH_WARN_RATELIMIT(adapter,
2453 				  "%u %s correctable ECC data error%s\n",
2454 				  cnt, name[idx], cnt > 1 ? "s" : "");
2455 	}
2456 	if (v & F_ECC_UE_INT_CAUSE)
2457 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2458 			 name[idx]);
2459 
2460 	t4_write_reg(adapter, addr, v);
2461 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2462 		t4_fatal_err(adapter);
2463 }
2464 
2465 /*
2466  * MA interrupt handler.
2467  */
2468 static void ma_intr_handler(struct adapter *adapter)
2469 {
2470 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2471 
2472 	if (status & F_MEM_PERR_INT_CAUSE) {
2473 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2474 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
2475 		if (is_t5(adapter))
2476 			CH_ALERT(adapter,
2477 				 "MA parity error, parity status %#x\n",
2478 				 t4_read_reg(adapter,
2479 				 	     A_MA_PARITY_ERROR_STATUS2));
2480 	}
2481 	if (status & F_MEM_WRAP_INT_CAUSE) {
2482 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2483 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2484 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2485 			 G_MEM_WRAP_ADDRESS(v) << 4);
2486 	}
2487 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2488 	t4_fatal_err(adapter);
2489 }
2490 
2491 /*
2492  * SMB interrupt handler.
2493  */
2494 static void smb_intr_handler(struct adapter *adap)
2495 {
2496 	static struct intr_info smb_intr_info[] = {
2497 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2498 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2499 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2500 		{ 0 }
2501 	};
2502 
2503 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2504 		t4_fatal_err(adap);
2505 }
2506 
2507 /*
2508  * NC-SI interrupt handler.
2509  */
2510 static void ncsi_intr_handler(struct adapter *adap)
2511 {
2512 	static struct intr_info ncsi_intr_info[] = {
2513 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2514 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2515 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2516 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2517 		{ 0 }
2518 	};
2519 
2520 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2521 		t4_fatal_err(adap);
2522 }
2523 
2524 /*
2525  * XGMAC interrupt handler.
2526  */
2527 static void xgmac_intr_handler(struct adapter *adap, int port)
2528 {
2529 	u32 v, int_cause_reg;
2530 
2531 	if (is_t4(adap))
2532 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2533 	else
2534 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2535 
2536 	v = t4_read_reg(adap, int_cause_reg);
2537 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2538 	if (!v)
2539 		return;
2540 
2541 	if (v & F_TXFIFO_PRTY_ERR)
2542 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2543 	if (v & F_RXFIFO_PRTY_ERR)
2544 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2545 	t4_write_reg(adap, int_cause_reg, v);
2546 	t4_fatal_err(adap);
2547 }
2548 
2549 /*
2550  * PL interrupt handler.
2551  */
2552 static void pl_intr_handler(struct adapter *adap)
2553 {
2554 	static struct intr_info pl_intr_info[] = {
2555 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2556 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2557 		{ 0 }
2558 	};
2559 
2560 	static struct intr_info t5_pl_intr_info[] = {
2561 		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2562 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2563 		{ 0 }
2564 	};
2565 
2566 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2567 	    is_t4(adap) ?  pl_intr_info : t5_pl_intr_info))
2568 		t4_fatal_err(adap);
2569 }
2570 
2571 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2572 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2573 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2574 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2575 
2576 /**
2577  *	t4_slow_intr_handler - control path interrupt handler
2578  *	@adapter: the adapter
2579  *
2580  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2581  *	The designation 'slow' is because it involves register reads, while
2582  *	data interrupts typically don't involve any MMIOs.
2583  */
2584 int t4_slow_intr_handler(struct adapter *adapter)
2585 {
2586 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2587 
2588 	if (!(cause & GLBL_INTR_MASK))
2589 		return 0;
2590 	if (cause & F_CIM)
2591 		cim_intr_handler(adapter);
2592 	if (cause & F_MPS)
2593 		mps_intr_handler(adapter);
2594 	if (cause & F_NCSI)
2595 		ncsi_intr_handler(adapter);
2596 	if (cause & F_PL)
2597 		pl_intr_handler(adapter);
2598 	if (cause & F_SMB)
2599 		smb_intr_handler(adapter);
2600 	if (cause & F_XGMAC0)
2601 		xgmac_intr_handler(adapter, 0);
2602 	if (cause & F_XGMAC1)
2603 		xgmac_intr_handler(adapter, 1);
2604 	if (cause & F_XGMAC_KR0)
2605 		xgmac_intr_handler(adapter, 2);
2606 	if (cause & F_XGMAC_KR1)
2607 		xgmac_intr_handler(adapter, 3);
2608 	if (cause & F_PCIE)
2609 		pcie_intr_handler(adapter);
2610 	if (cause & F_MC)
2611 		mem_intr_handler(adapter, MEM_MC);
2612 	if (cause & F_EDC0)
2613 		mem_intr_handler(adapter, MEM_EDC0);
2614 	if (cause & F_EDC1)
2615 		mem_intr_handler(adapter, MEM_EDC1);
2616 	if (cause & F_LE)
2617 		le_intr_handler(adapter);
2618 	if (cause & F_TP)
2619 		tp_intr_handler(adapter);
2620 	if (cause & F_MA)
2621 		ma_intr_handler(adapter);
2622 	if (cause & F_PM_TX)
2623 		pmtx_intr_handler(adapter);
2624 	if (cause & F_PM_RX)
2625 		pmrx_intr_handler(adapter);
2626 	if (cause & F_ULP_RX)
2627 		ulprx_intr_handler(adapter);
2628 	if (cause & F_CPL_SWITCH)
2629 		cplsw_intr_handler(adapter);
2630 	if (cause & F_SGE)
2631 		sge_intr_handler(adapter);
2632 	if (cause & F_ULP_TX)
2633 		ulptx_intr_handler(adapter);
2634 
2635 	/* Clear the interrupts just processed for which we are the master. */
2636 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2637 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2638 	return 1;
2639 }
2640 
2641 /**
2642  *	t4_intr_enable - enable interrupts
2643  *	@adapter: the adapter whose interrupts should be enabled
2644  *
2645  *	Enable PF-specific interrupts for the calling function and the top-level
2646  *	interrupt concentrator for global interrupts.  Interrupts are already
2647  *	enabled at each module,	here we just enable the roots of the interrupt
2648  *	hierarchies.
2649  *
2650  *	Note: this function should be called only when the driver manages
2651  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2652  *	function at a time should be doing this.
2653  */
2654 void t4_intr_enable(struct adapter *adapter)
2655 {
2656 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2657 
2658 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2659 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2660 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2661 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2662 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2663 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2664 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2665 		     F_EGRESS_SIZE_ERR);
2666 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2667 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2668 }
2669 
2670 /**
2671  *	t4_intr_disable - disable interrupts
2672  *	@adapter: the adapter whose interrupts should be disabled
2673  *
2674  *	Disable interrupts.  We only disable the top-level interrupt
2675  *	concentrators.  The caller must be a PCI function managing global
2676  *	interrupts.
2677  */
2678 void t4_intr_disable(struct adapter *adapter)
2679 {
2680 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2681 
2682 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2683 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2684 }
2685 
2686 /**
2687  *	t4_intr_clear - clear all interrupts
2688  *	@adapter: the adapter whose interrupts should be cleared
2689  *
2690  *	Clears all interrupts.  The caller must be a PCI function managing
2691  *	global interrupts.
2692  */
2693 void t4_intr_clear(struct adapter *adapter)
2694 {
2695 	static const unsigned int cause_reg[] = {
2696 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2697 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2698 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
2699 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2700 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2701 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2702 		A_TP_INT_CAUSE,
2703 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2704 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2705 		A_MPS_RX_PERR_INT_CAUSE,
2706 		A_CPL_INTR_CAUSE,
2707 		MYPF_REG(A_PL_PF_INT_CAUSE),
2708 		A_PL_PL_INT_CAUSE,
2709 		A_LE_DB_INT_CAUSE,
2710 	};
2711 
2712 	unsigned int i;
2713 
2714 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2715 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2716 
2717 	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2718 				A_MC_P_INT_CAUSE, 0xffffffff);
2719 
2720 	if (is_t4(adapter)) {
2721 		t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2722 				0xffffffff);
2723 		t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2724 				0xffffffff);
2725 	} else
2726 		t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
2727 
2728 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2729 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2730 }
2731 
2732 /**
2733  *	hash_mac_addr - return the hash value of a MAC address
2734  *	@addr: the 48-bit Ethernet MAC address
2735  *
2736  *	Hashes a MAC address according to the hash function used by HW inexact
2737  *	(hash) address matching.
2738  */
2739 static int hash_mac_addr(const u8 *addr)
2740 {
2741 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2742 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2743 	a ^= b;
2744 	a ^= (a >> 12);
2745 	a ^= (a >> 6);
2746 	return a & 0x3f;
2747 }
2748 
2749 /**
2750  *	t4_config_rss_range - configure a portion of the RSS mapping table
2751  *	@adapter: the adapter
2752  *	@mbox: mbox to use for the FW command
2753  *	@viid: virtual interface whose RSS subtable is to be written
2754  *	@start: start entry in the table to write
2755  *	@n: how many table entries to write
2756  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2757  *	@nrspq: number of values in @rspq
2758  *
2759  *	Programs the selected part of the VI's RSS mapping table with the
2760  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2761  *	until the full table range is populated.
2762  *
2763  *	The caller must ensure the values in @rspq are in the range allowed for
2764  *	@viid.
2765  */
2766 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2767 			int start, int n, const u16 *rspq, unsigned int nrspq)
2768 {
2769 	int ret;
2770 	const u16 *rsp = rspq;
2771 	const u16 *rsp_end = rspq + nrspq;
2772 	struct fw_rss_ind_tbl_cmd cmd;
2773 
2774 	memset(&cmd, 0, sizeof(cmd));
2775 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2776 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2777 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2778 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2779 
2780 
2781 	/*
2782 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2783 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2784 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2785 	 * reserved.
2786 	 */
2787 	while (n > 0) {
2788 		int nq = min(n, 32);
2789 		int nq_packed = 0;
2790 		__be32 *qp = &cmd.iq0_to_iq2;
2791 
2792 		/*
2793 		 * Set up the firmware RSS command header to send the next
2794 		 * "nq" Ingress Queue IDs to the firmware.
2795 		 */
2796 		cmd.niqid = htons(nq);
2797 		cmd.startidx = htons(start);
2798 
2799 		/*
2800 		 * "nq" more done for the start of the next loop.
2801 		 */
2802 		start += nq;
2803 		n -= nq;
2804 
2805 		/*
2806 		 * While there are still Ingress Queue IDs to stuff into the
2807 		 * current firmware RSS command, retrieve them from the
2808 		 * Ingress Queue ID array and insert them into the command.
2809 		 */
2810 		while (nq > 0) {
2811 			/*
2812 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2813 			 * around the Ingress Queue ID array if necessary) and
2814 			 * insert them into the firmware RSS command at the
2815 			 * current 3-tuple position within the commad.
2816 			 */
2817 			u16 qbuf[3];
2818 			u16 *qbp = qbuf;
2819 			int nqbuf = min(3, nq);
2820 
2821 			nq -= nqbuf;
2822 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2823 			while (nqbuf && nq_packed < 32) {
2824 				nqbuf--;
2825 				nq_packed++;
2826 				*qbp++ = *rsp++;
2827 				if (rsp >= rsp_end)
2828 					rsp = rspq;
2829 			}
2830 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2831 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2832 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2833 		}
2834 
2835 		/*
2836 		 * Send this portion of the RRS table update to the firmware;
2837 		 * bail out on any errors.
2838 		 */
2839 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2840 		if (ret)
2841 			return ret;
2842 	}
2843 
2844 	return 0;
2845 }
2846 
2847 /**
2848  *	t4_config_glbl_rss - configure the global RSS mode
2849  *	@adapter: the adapter
2850  *	@mbox: mbox to use for the FW command
2851  *	@mode: global RSS mode
2852  *	@flags: mode-specific flags
2853  *
2854  *	Sets the global RSS mode.
2855  */
2856 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2857 		       unsigned int flags)
2858 {
2859 	struct fw_rss_glb_config_cmd c;
2860 
2861 	memset(&c, 0, sizeof(c));
2862 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2863 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2864 	c.retval_len16 = htonl(FW_LEN16(c));
2865 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2866 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2867 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2868 		c.u.basicvirtual.mode_pkd =
2869 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2870 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2871 	} else
2872 		return -EINVAL;
2873 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2874 }
2875 
2876 /**
2877  *	t4_config_vi_rss - configure per VI RSS settings
2878  *	@adapter: the adapter
2879  *	@mbox: mbox to use for the FW command
2880  *	@viid: the VI id
2881  *	@flags: RSS flags
2882  *	@defq: id of the default RSS queue for the VI.
2883  *
2884  *	Configures VI-specific RSS properties.
2885  */
2886 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2887 		     unsigned int flags, unsigned int defq)
2888 {
2889 	struct fw_rss_vi_config_cmd c;
2890 
2891 	memset(&c, 0, sizeof(c));
2892 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2893 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2894 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2895 	c.retval_len16 = htonl(FW_LEN16(c));
2896 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2897 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2898 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2899 }
2900 
2901 /* Read an RSS table row */
2902 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2903 {
2904 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2905 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2906 				   5, 0, val);
2907 }
2908 
2909 /**
2910  *	t4_read_rss - read the contents of the RSS mapping table
2911  *	@adapter: the adapter
2912  *	@map: holds the contents of the RSS mapping table
2913  *
2914  *	Reads the contents of the RSS hash->queue mapping table.
2915  */
2916 int t4_read_rss(struct adapter *adapter, u16 *map)
2917 {
2918 	u32 val;
2919 	int i, ret;
2920 
2921 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2922 		ret = rd_rss_row(adapter, i, &val);
2923 		if (ret)
2924 			return ret;
2925 		*map++ = G_LKPTBLQUEUE0(val);
2926 		*map++ = G_LKPTBLQUEUE1(val);
2927 	}
2928 	return 0;
2929 }
2930 
2931 /**
2932  *	t4_read_rss_key - read the global RSS key
2933  *	@adap: the adapter
2934  *	@key: 10-entry array holding the 320-bit RSS key
2935  *
2936  *	Reads the global 320-bit RSS key.
2937  */
2938 void t4_read_rss_key(struct adapter *adap, u32 *key)
2939 {
2940 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2941 			 A_TP_RSS_SECRET_KEY0);
2942 }
2943 
2944 /**
2945  *	t4_write_rss_key - program one of the RSS keys
2946  *	@adap: the adapter
2947  *	@key: 10-entry array holding the 320-bit RSS key
2948  *	@idx: which RSS key to write
2949  *
2950  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2951  *	0..15 the corresponding entry in the RSS key table is written,
2952  *	otherwise the global RSS key is written.
2953  */
2954 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2955 {
2956 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2957 			  A_TP_RSS_SECRET_KEY0);
2958 	if (idx >= 0 && idx < 16)
2959 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2960 			     V_KEYWRADDR(idx) | F_KEYWREN);
2961 }
2962 
2963 /**
2964  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2965  *	@adapter: the adapter
2966  *	@index: the entry in the PF RSS table to read
2967  *	@valp: where to store the returned value
2968  *
2969  *	Reads the PF RSS Configuration Table at the specified index and returns
2970  *	the value found there.
2971  */
2972 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2973 {
2974 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2975 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2976 }
2977 
2978 /**
2979  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2980  *	@adapter: the adapter
2981  *	@index: the entry in the VF RSS table to read
2982  *	@val: the value to store
2983  *
2984  *	Writes the PF RSS Configuration Table at the specified index with the
2985  *	specified value.
2986  */
2987 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2988 {
2989 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2990 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2991 }
2992 
2993 /**
2994  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2995  *	@adapter: the adapter
2996  *	@index: the entry in the VF RSS table to read
2997  *	@vfl: where to store the returned VFL
2998  *	@vfh: where to store the returned VFH
2999  *
3000  *	Reads the VF RSS Configuration Table at the specified index and returns
3001  *	the (VFL, VFH) values found there.
3002  */
3003 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3004 			   u32 *vfl, u32 *vfh)
3005 {
3006 	u32 vrt;
3007 
3008 	/*
3009 	 * Request that the index'th VF Table values be read into VFL/VFH.
3010 	 */
3011 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3012 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
3013 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
3014 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3015 
3016 	/*
3017 	 * Grab the VFL/VFH values ...
3018 	 */
3019 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3020 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
3021 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3022 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
3023 }
3024 
3025 /**
3026  *	t4_write_rss_vf_config - write VF RSS Configuration Table
3027  *
3028  *	@adapter: the adapter
3029  *	@index: the entry in the VF RSS table to write
3030  *	@vfl: the VFL to store
3031  *	@vfh: the VFH to store
3032  *
3033  *	Writes the VF RSS Configuration Table at the specified index with the
3034  *	specified (VFL, VFH) values.
3035  */
3036 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3037 			    u32 vfl, u32 vfh)
3038 {
3039 	u32 vrt;
3040 
3041 	/*
3042 	 * Load up VFL/VFH with the values to be written ...
3043 	 */
3044 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3045 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
3046 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3047 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
3048 
3049 	/*
3050 	 * Write the VFL/VFH into the VF Table at index'th location.
3051 	 */
3052 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3053 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3054 	vrt |= V_VFWRADDR(index) | F_VFWREN;
3055 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3056 }
3057 
3058 /**
3059  *	t4_read_rss_pf_map - read PF RSS Map
3060  *	@adapter: the adapter
3061  *
3062  *	Reads the PF RSS Map register and returns its value.
3063  */
3064 u32 t4_read_rss_pf_map(struct adapter *adapter)
3065 {
3066 	u32 pfmap;
3067 
3068 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3069 			 &pfmap, 1, A_TP_RSS_PF_MAP);
3070 	return pfmap;
3071 }
3072 
3073 /**
3074  *	t4_write_rss_pf_map - write PF RSS Map
3075  *	@adapter: the adapter
3076  *	@pfmap: PF RSS Map value
3077  *
3078  *	Writes the specified value to the PF RSS Map register.
3079  */
3080 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3081 {
3082 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3083 			  &pfmap, 1, A_TP_RSS_PF_MAP);
3084 }
3085 
3086 /**
3087  *	t4_read_rss_pf_mask - read PF RSS Mask
3088  *	@adapter: the adapter
3089  *
3090  *	Reads the PF RSS Mask register and returns its value.
3091  */
3092 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3093 {
3094 	u32 pfmask;
3095 
3096 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3097 			 &pfmask, 1, A_TP_RSS_PF_MSK);
3098 	return pfmask;
3099 }
3100 
3101 /**
3102  *	t4_write_rss_pf_mask - write PF RSS Mask
3103  *	@adapter: the adapter
3104  *	@pfmask: PF RSS Mask value
3105  *
3106  *	Writes the specified value to the PF RSS Mask register.
3107  */
3108 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3109 {
3110 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3111 			  &pfmask, 1, A_TP_RSS_PF_MSK);
3112 }
3113 
3114 static void refresh_vlan_pri_map(struct adapter *adap)
3115 {
3116 
3117         t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3118                          &adap->params.tp.vlan_pri_map, 1,
3119                          A_TP_VLAN_PRI_MAP);
3120 
3121 	/*
3122 	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3123 	 * shift positions of several elements of the Compressed Filter Tuple
3124 	 * for this adapter which we need frequently ...
3125 	 */
3126 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3127 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3128 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3129 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
3130 
3131 	/*
3132 	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3133 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
3134 	 */
3135 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3136 		adap->params.tp.vnic_shift = -1;
3137 }
3138 
3139 /**
3140  *	t4_set_filter_mode - configure the optional components of filter tuples
3141  *	@adap: the adapter
3142  *	@mode_map: a bitmap selcting which optional filter components to enable
3143  *
3144  *	Sets the filter mode by selecting the optional components to enable
3145  *	in filter tuples.  Returns 0 on success and a negative error if the
3146  *	requested mode needs more bits than are available for optional
3147  *	components.
3148  */
3149 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3150 {
3151 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3152 
3153 	int i, nbits = 0;
3154 
3155 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3156 		if (mode_map & (1 << i))
3157 			nbits += width[i];
3158 	if (nbits > FILTER_OPT_LEN)
3159 		return -EINVAL;
3160 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3161 			  A_TP_VLAN_PRI_MAP);
3162 	refresh_vlan_pri_map(adap);
3163 
3164 	return 0;
3165 }
3166 
3167 /**
3168  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3169  *	@adap: the adapter
3170  *	@v4: holds the TCP/IP counter values
3171  *	@v6: holds the TCP/IPv6 counter values
3172  *
3173  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3174  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3175  */
3176 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3177 			 struct tp_tcp_stats *v6)
3178 {
3179 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3180 
3181 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3182 #define STAT(x)     val[STAT_IDX(x)]
3183 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3184 
3185 	if (v4) {
3186 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3187 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3188 		v4->tcpOutRsts = STAT(OUT_RST);
3189 		v4->tcpInSegs  = STAT64(IN_SEG);
3190 		v4->tcpOutSegs = STAT64(OUT_SEG);
3191 		v4->tcpRetransSegs = STAT64(RXT_SEG);
3192 	}
3193 	if (v6) {
3194 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3195 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3196 		v6->tcpOutRsts = STAT(OUT_RST);
3197 		v6->tcpInSegs  = STAT64(IN_SEG);
3198 		v6->tcpOutSegs = STAT64(OUT_SEG);
3199 		v6->tcpRetransSegs = STAT64(RXT_SEG);
3200 	}
3201 #undef STAT64
3202 #undef STAT
3203 #undef STAT_IDX
3204 }
3205 
3206 /**
3207  *	t4_tp_get_err_stats - read TP's error MIB counters
3208  *	@adap: the adapter
3209  *	@st: holds the counter values
3210  *
3211  *	Returns the values of TP's error counters.
3212  */
3213 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3214 {
3215 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3216 			 12, A_TP_MIB_MAC_IN_ERR_0);
3217 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3218 			 8, A_TP_MIB_TNL_CNG_DROP_0);
3219 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3220 			 4, A_TP_MIB_TNL_DROP_0);
3221 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3222 			 4, A_TP_MIB_OFD_VLN_DROP_0);
3223 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3224 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3225 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3226 			 2, A_TP_MIB_OFD_ARP_DROP);
3227 }
3228 
3229 /**
3230  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3231  *	@adap: the adapter
3232  *	@st: holds the counter values
3233  *
3234  *	Returns the values of TP's proxy counters.
3235  */
3236 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3237 {
3238 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3239 			 4, A_TP_MIB_TNL_LPBK_0);
3240 }
3241 
3242 /**
3243  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3244  *	@adap: the adapter
3245  *	@st: holds the counter values
3246  *
3247  *	Returns the values of TP's CPL counters.
3248  */
3249 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3250 {
3251 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3252 			 8, A_TP_MIB_CPL_IN_REQ_0);
3253 }
3254 
3255 /**
3256  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3257  *	@adap: the adapter
3258  *	@st: holds the counter values
3259  *
3260  *	Returns the values of TP's RDMA counters.
3261  */
3262 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3263 {
3264 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3265 			 2, A_TP_MIB_RQE_DFR_MOD);
3266 }
3267 
3268 /**
3269  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3270  *	@adap: the adapter
3271  *	@idx: the port index
3272  *	@st: holds the counter values
3273  *
3274  *	Returns the values of TP's FCoE counters for the selected port.
3275  */
3276 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3277 		       struct tp_fcoe_stats *st)
3278 {
3279 	u32 val[2];
3280 
3281 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3282 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3283 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3284 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3285 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3286 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3287 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3288 }
3289 
3290 /**
3291  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3292  *	@adap: the adapter
3293  *	@st: holds the counter values
3294  *
3295  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3296  */
3297 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3298 {
3299 	u32 val[4];
3300 
3301 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3302 			 A_TP_MIB_USM_PKTS);
3303 	st->frames = val[0];
3304 	st->drops = val[1];
3305 	st->octets = ((u64)val[2] << 32) | val[3];
3306 }
3307 
3308 /**
3309  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3310  *	@adap: the adapter
3311  *	@mtus: where to store the MTU values
3312  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3313  *
3314  *	Reads the HW path MTU table.
3315  */
3316 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3317 {
3318 	u32 v;
3319 	int i;
3320 
3321 	for (i = 0; i < NMTUS; ++i) {
3322 		t4_write_reg(adap, A_TP_MTU_TABLE,
3323 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3324 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3325 		mtus[i] = G_MTUVALUE(v);
3326 		if (mtu_log)
3327 			mtu_log[i] = G_MTUWIDTH(v);
3328 	}
3329 }
3330 
3331 /**
3332  *	t4_read_cong_tbl - reads the congestion control table
3333  *	@adap: the adapter
3334  *	@incr: where to store the alpha values
3335  *
3336  *	Reads the additive increments programmed into the HW congestion
3337  *	control table.
3338  */
3339 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3340 {
3341 	unsigned int mtu, w;
3342 
3343 	for (mtu = 0; mtu < NMTUS; ++mtu)
3344 		for (w = 0; w < NCCTRL_WIN; ++w) {
3345 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3346 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3347 			incr[mtu][w] = (u16)t4_read_reg(adap,
3348 						A_TP_CCTRL_TABLE) & 0x1fff;
3349 		}
3350 }
3351 
3352 /**
3353  *	t4_read_pace_tbl - read the pace table
3354  *	@adap: the adapter
3355  *	@pace_vals: holds the returned values
3356  *
3357  *	Returns the values of TP's pace table in microseconds.
3358  */
3359 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3360 {
3361 	unsigned int i, v;
3362 
3363 	for (i = 0; i < NTX_SCHED; i++) {
3364 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3365 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3366 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3367 	}
3368 }
3369 
3370 /**
3371  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3372  *	@adap: the adapter
3373  *	@addr: the indirect TP register address
3374  *	@mask: specifies the field within the register to modify
3375  *	@val: new value for the field
3376  *
3377  *	Sets a field of an indirect TP register to the given value.
3378  */
3379 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3380 			    unsigned int mask, unsigned int val)
3381 {
3382 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3383 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3384 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3385 }
3386 
3387 /**
3388  *	init_cong_ctrl - initialize congestion control parameters
3389  *	@a: the alpha values for congestion control
3390  *	@b: the beta values for congestion control
3391  *
3392  *	Initialize the congestion control parameters.
3393  */
3394 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3395 {
3396 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3397 	a[9] = 2;
3398 	a[10] = 3;
3399 	a[11] = 4;
3400 	a[12] = 5;
3401 	a[13] = 6;
3402 	a[14] = 7;
3403 	a[15] = 8;
3404 	a[16] = 9;
3405 	a[17] = 10;
3406 	a[18] = 14;
3407 	a[19] = 17;
3408 	a[20] = 21;
3409 	a[21] = 25;
3410 	a[22] = 30;
3411 	a[23] = 35;
3412 	a[24] = 45;
3413 	a[25] = 60;
3414 	a[26] = 80;
3415 	a[27] = 100;
3416 	a[28] = 200;
3417 	a[29] = 300;
3418 	a[30] = 400;
3419 	a[31] = 500;
3420 
3421 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3422 	b[9] = b[10] = 1;
3423 	b[11] = b[12] = 2;
3424 	b[13] = b[14] = b[15] = b[16] = 3;
3425 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3426 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3427 	b[28] = b[29] = 6;
3428 	b[30] = b[31] = 7;
3429 }
3430 
3431 /* The minimum additive increment value for the congestion control table */
3432 #define CC_MIN_INCR 2U
3433 
3434 /**
3435  *	t4_load_mtus - write the MTU and congestion control HW tables
3436  *	@adap: the adapter
3437  *	@mtus: the values for the MTU table
3438  *	@alpha: the values for the congestion control alpha parameter
3439  *	@beta: the values for the congestion control beta parameter
3440  *
3441  *	Write the HW MTU table with the supplied MTUs and the high-speed
3442  *	congestion control table with the supplied alpha, beta, and MTUs.
3443  *	We write the two tables together because the additive increments
3444  *	depend on the MTUs.
3445  */
3446 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3447 		  const unsigned short *alpha, const unsigned short *beta)
3448 {
3449 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3450 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3451 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3452 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3453 	};
3454 
3455 	unsigned int i, w;
3456 
3457 	for (i = 0; i < NMTUS; ++i) {
3458 		unsigned int mtu = mtus[i];
3459 		unsigned int log2 = fls(mtu);
3460 
3461 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3462 			log2--;
3463 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3464 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3465 
3466 		for (w = 0; w < NCCTRL_WIN; ++w) {
3467 			unsigned int inc;
3468 
3469 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3470 				  CC_MIN_INCR);
3471 
3472 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3473 				     (w << 16) | (beta[w] << 13) | inc);
3474 		}
3475 	}
3476 }
3477 
3478 /**
3479  *	t4_set_pace_tbl - set the pace table
3480  *	@adap: the adapter
3481  *	@pace_vals: the pace values in microseconds
3482  *	@start: index of the first entry in the HW pace table to set
3483  *	@n: how many entries to set
3484  *
3485  *	Sets (a subset of the) HW pace table.
3486  */
3487 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3488 		     unsigned int start, unsigned int n)
3489 {
3490 	unsigned int vals[NTX_SCHED], i;
3491 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3492 
3493 	if (n > NTX_SCHED)
3494 	    return -ERANGE;
3495 
3496 	/* convert values from us to dack ticks, rounding to closest value */
3497 	for (i = 0; i < n; i++, pace_vals++) {
3498 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3499 		if (vals[i] > 0x7ff)
3500 			return -ERANGE;
3501 		if (*pace_vals && vals[i] == 0)
3502 			return -ERANGE;
3503 	}
3504 	for (i = 0; i < n; i++, start++)
3505 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3506 	return 0;
3507 }
3508 
3509 /**
3510  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3511  *	@adap: the adapter
3512  *	@kbps: target rate in Kbps
3513  *	@sched: the scheduler index
3514  *
3515  *	Configure a Tx HW scheduler for the target rate.
3516  */
3517 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3518 {
3519 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3520 	unsigned int clk = adap->params.vpd.cclk * 1000;
3521 	unsigned int selected_cpt = 0, selected_bpt = 0;
3522 
3523 	if (kbps > 0) {
3524 		kbps *= 125;     /* -> bytes */
3525 		for (cpt = 1; cpt <= 255; cpt++) {
3526 			tps = clk / cpt;
3527 			bpt = (kbps + tps / 2) / tps;
3528 			if (bpt > 0 && bpt <= 255) {
3529 				v = bpt * tps;
3530 				delta = v >= kbps ? v - kbps : kbps - v;
3531 				if (delta < mindelta) {
3532 					mindelta = delta;
3533 					selected_cpt = cpt;
3534 					selected_bpt = bpt;
3535 				}
3536 			} else if (selected_cpt)
3537 				break;
3538 		}
3539 		if (!selected_cpt)
3540 			return -EINVAL;
3541 	}
3542 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3543 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3544 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3545 	if (sched & 1)
3546 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3547 	else
3548 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3549 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3550 	return 0;
3551 }
3552 
3553 /**
3554  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3555  *	@adap: the adapter
3556  *	@sched: the scheduler index
3557  *	@ipg: the interpacket delay in tenths of nanoseconds
3558  *
3559  *	Set the interpacket delay for a HW packet rate scheduler.
3560  */
3561 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3562 {
3563 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3564 
3565 	/* convert ipg to nearest number of core clocks */
3566 	ipg *= core_ticks_per_usec(adap);
3567 	ipg = (ipg + 5000) / 10000;
3568 	if (ipg > M_TXTIMERSEPQ0)
3569 		return -EINVAL;
3570 
3571 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3572 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3573 	if (sched & 1)
3574 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3575 	else
3576 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3577 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3578 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3579 	return 0;
3580 }
3581 
3582 /**
3583  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3584  *	@adap: the adapter
3585  *	@sched: the scheduler index
3586  *	@kbps: the byte rate in Kbps
3587  *	@ipg: the interpacket delay in tenths of nanoseconds
3588  *
3589  *	Return the current configuration of a HW Tx scheduler.
3590  */
3591 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3592 		     unsigned int *ipg)
3593 {
3594 	unsigned int v, addr, bpt, cpt;
3595 
3596 	if (kbps) {
3597 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3598 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3599 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3600 		if (sched & 1)
3601 			v >>= 16;
3602 		bpt = (v >> 8) & 0xff;
3603 		cpt = v & 0xff;
3604 		if (!cpt)
3605 			*kbps = 0;        /* scheduler disabled */
3606 		else {
3607 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3608 			*kbps = (v * bpt) / 125;
3609 		}
3610 	}
3611 	if (ipg) {
3612 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3613 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3614 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3615 		if (sched & 1)
3616 			v >>= 16;
3617 		v &= 0xffff;
3618 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3619 	}
3620 }
3621 
3622 /*
3623  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3624  * clocks.  The formula is
3625  *
3626  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3627  *
3628  * which is equivalent to
3629  *
3630  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3631  */
3632 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3633 {
3634 	u64 v = bytes256 * adap->params.vpd.cclk;
3635 
3636 	return v * 62 + v / 2;
3637 }
3638 
3639 /**
3640  *	t4_get_chan_txrate - get the current per channel Tx rates
3641  *	@adap: the adapter
3642  *	@nic_rate: rates for NIC traffic
3643  *	@ofld_rate: rates for offloaded traffic
3644  *
3645  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3646  *	for each channel.
3647  */
3648 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3649 {
3650 	u32 v;
3651 
3652 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3653 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3654 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3655 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3656 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3657 
3658 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3659 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3660 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3661 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3662 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3663 }
3664 
3665 /**
3666  *	t4_set_trace_filter - configure one of the tracing filters
3667  *	@adap: the adapter
3668  *	@tp: the desired trace filter parameters
3669  *	@idx: which filter to configure
3670  *	@enable: whether to enable or disable the filter
3671  *
3672  *	Configures one of the tracing filters available in HW.  If @tp is %NULL
3673  *	it indicates that the filter is already written in the register and it
3674  *	just needs to be enabled or disabled.
3675  */
3676 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3677     int idx, int enable)
3678 {
3679 	int i, ofst = idx * 4;
3680 	u32 data_reg, mask_reg, cfg;
3681 	u32 multitrc = F_TRCMULTIFILTER;
3682 	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
3683 
3684 	if (idx < 0 || idx >= NTRACE)
3685 		return -EINVAL;
3686 
3687 	if (tp == NULL || !enable) {
3688 		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
3689 		    enable ? en : 0);
3690 		return 0;
3691 	}
3692 
3693 	/*
3694 	 * TODO - After T4 data book is updated, specify the exact
3695 	 * section below.
3696 	 *
3697 	 * See T4 data book - MPS section for a complete description
3698 	 * of the below if..else handling of A_MPS_TRC_CFG register
3699 	 * value.
3700 	 */
3701 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3702 	if (cfg & F_TRCMULTIFILTER) {
3703 		/*
3704 		 * If multiple tracers are enabled, then maximum
3705 		 * capture size is 2.5KB (FIFO size of a single channel)
3706 		 * minus 2 flits for CPL_TRACE_PKT header.
3707 		 */
3708 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3709 			return -EINVAL;
3710 	} else {
3711 		/*
3712 		 * If multiple tracers are disabled, to avoid deadlocks
3713 		 * maximum packet capture size of 9600 bytes is recommended.
3714 		 * Also in this mode, only trace0 can be enabled and running.
3715 		 */
3716 		multitrc = 0;
3717 		if (tp->snap_len > 9600 || idx)
3718 			return -EINVAL;
3719 	}
3720 
3721 	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
3722 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
3723 	    tp->min_len > M_TFMINPKTSIZE)
3724 		return -EINVAL;
3725 
3726 	/* stop the tracer we'll be changing */
3727 	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
3728 
3729 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3730 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3731 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3732 
3733 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3734 		t4_write_reg(adap, data_reg, tp->data[i]);
3735 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3736 	}
3737 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3738 		     V_TFCAPTUREMAX(tp->snap_len) |
3739 		     V_TFMINPKTSIZE(tp->min_len));
3740 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3741 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
3742 		     (is_t4(adap) ?
3743 		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
3744 		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
3745 
3746 	return 0;
3747 }
3748 
3749 /**
3750  *	t4_get_trace_filter - query one of the tracing filters
3751  *	@adap: the adapter
3752  *	@tp: the current trace filter parameters
3753  *	@idx: which trace filter to query
3754  *	@enabled: non-zero if the filter is enabled
3755  *
3756  *	Returns the current settings of one of the HW tracing filters.
3757  */
3758 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3759 			 int *enabled)
3760 {
3761 	u32 ctla, ctlb;
3762 	int i, ofst = idx * 4;
3763 	u32 data_reg, mask_reg;
3764 
3765 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3766 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3767 
3768 	if (is_t4(adap)) {
3769 		*enabled = !!(ctla & F_TFEN);
3770 		tp->port =  G_TFPORT(ctla);
3771 		tp->invert = !!(ctla & F_TFINVERTMATCH);
3772 	} else {
3773 		*enabled = !!(ctla & F_T5_TFEN);
3774 		tp->port = G_T5_TFPORT(ctla);
3775 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
3776 	}
3777 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3778 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3779 	tp->skip_ofst = G_TFOFFSET(ctla);
3780 	tp->skip_len = G_TFLENGTH(ctla);
3781 
3782 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3783 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3784 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3785 
3786 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3787 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3788 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3789 	}
3790 }
3791 
3792 /**
3793  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3794  *	@adap: the adapter
3795  *	@cnt: where to store the count statistics
3796  *	@cycles: where to store the cycle statistics
3797  *
3798  *	Returns performance statistics from PMTX.
3799  */
3800 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3801 {
3802 	int i;
3803 	u32 data[2];
3804 
3805 	for (i = 0; i < PM_NSTATS; i++) {
3806 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3807 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3808 		if (is_t4(adap))
3809 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3810 		else {
3811 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3812 					 A_PM_TX_DBG_DATA, data, 2,
3813 					 A_PM_TX_DBG_STAT_MSB);
3814 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3815 		}
3816 	}
3817 }
3818 
3819 /**
3820  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3821  *	@adap: the adapter
3822  *	@cnt: where to store the count statistics
3823  *	@cycles: where to store the cycle statistics
3824  *
3825  *	Returns performance statistics from PMRX.
3826  */
3827 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3828 {
3829 	int i;
3830 	u32 data[2];
3831 
3832 	for (i = 0; i < PM_NSTATS; i++) {
3833 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3834 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3835 		if (is_t4(adap))
3836 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3837 		else {
3838 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3839 					 A_PM_RX_DBG_DATA, data, 2,
3840 					 A_PM_RX_DBG_STAT_MSB);
3841 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3842 		}
3843 	}
3844 }
3845 
3846 /**
3847  *	get_mps_bg_map - return the buffer groups associated with a port
3848  *	@adap: the adapter
3849  *	@idx: the port index
3850  *
3851  *	Returns a bitmap indicating which MPS buffer groups are associated
3852  *	with the given port.  Bit i is set if buffer group i is used by the
3853  *	port.
3854  */
3855 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3856 {
3857 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3858 
3859 	if (n == 0)
3860 		return idx == 0 ? 0xf : 0;
3861 	if (n == 1)
3862 		return idx < 2 ? (3 << (2 * idx)) : 0;
3863 	return 1 << idx;
3864 }
3865 
3866 /**
3867  *      t4_get_port_stats_offset - collect port stats relative to a previous
3868  *                                 snapshot
3869  *      @adap: The adapter
3870  *      @idx: The port
3871  *      @stats: Current stats to fill
3872  *      @offset: Previous stats snapshot
3873  */
3874 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3875 		struct port_stats *stats,
3876 		struct port_stats *offset)
3877 {
3878 	u64 *s, *o;
3879 	int i;
3880 
3881 	t4_get_port_stats(adap, idx, stats);
3882 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3883 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3884 			i++, s++, o++)
3885 		*s -= *o;
3886 }
3887 
3888 /**
3889  *	t4_get_port_stats - collect port statistics
3890  *	@adap: the adapter
3891  *	@idx: the port index
3892  *	@p: the stats structure to fill
3893  *
3894  *	Collect statistics related to the given port from HW.
3895  */
3896 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3897 {
3898 	u32 bgmap = get_mps_bg_map(adap, idx);
3899 
3900 #define GET_STAT(name) \
3901 	t4_read_reg64(adap, \
3902 	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3903 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3904 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3905 
3906 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3907 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3908 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3909 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3910 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3911 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3912 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3913 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3914 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3915 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3916 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3917 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3918 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3919 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3920 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3921 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3922 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3923 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3924 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3925 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3926 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3927 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3928 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3929 
3930 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3931 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3932 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3933 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3934 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3935 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3936 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3937 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3938 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3939 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3940 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3941 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3942 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3943 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3944 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3945 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3946 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3947 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3948 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3949 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3950 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3951 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3952 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3953 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3954 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3955 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3956 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3957 
3958 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3959 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3960 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3961 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3962 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3963 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3964 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3965 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3966 
3967 #undef GET_STAT
3968 #undef GET_STAT_COM
3969 }
3970 
3971 /**
3972  *	t4_clr_port_stats - clear port statistics
3973  *	@adap: the adapter
3974  *	@idx: the port index
3975  *
3976  *	Clear HW statistics for the given port.
3977  */
3978 void t4_clr_port_stats(struct adapter *adap, int idx)
3979 {
3980 	unsigned int i;
3981 	u32 bgmap = get_mps_bg_map(adap, idx);
3982 	u32 port_base_addr;
3983 
3984 	if (is_t4(adap))
3985 		port_base_addr = PORT_BASE(idx);
3986 	else
3987 		port_base_addr = T5_PORT_BASE(idx);
3988 
3989 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3990 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3991 		t4_write_reg(adap, port_base_addr + i, 0);
3992 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3993 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3994 		t4_write_reg(adap, port_base_addr + i, 0);
3995 	for (i = 0; i < 4; i++)
3996 		if (bgmap & (1 << i)) {
3997 			t4_write_reg(adap,
3998 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3999 			t4_write_reg(adap,
4000 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
4001 		}
4002 }
4003 
4004 /**
4005  *	t4_get_lb_stats - collect loopback port statistics
4006  *	@adap: the adapter
4007  *	@idx: the loopback port index
4008  *	@p: the stats structure to fill
4009  *
4010  *	Return HW statistics for the given loopback port.
4011  */
4012 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
4013 {
4014 	u32 bgmap = get_mps_bg_map(adap, idx);
4015 
4016 #define GET_STAT(name) \
4017 	t4_read_reg64(adap, \
4018 	(is_t4(adap) ? \
4019 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
4020 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
4021 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
4022 
4023 	p->octets           = GET_STAT(BYTES);
4024 	p->frames           = GET_STAT(FRAMES);
4025 	p->bcast_frames     = GET_STAT(BCAST);
4026 	p->mcast_frames     = GET_STAT(MCAST);
4027 	p->ucast_frames     = GET_STAT(UCAST);
4028 	p->error_frames     = GET_STAT(ERROR);
4029 
4030 	p->frames_64        = GET_STAT(64B);
4031 	p->frames_65_127    = GET_STAT(65B_127B);
4032 	p->frames_128_255   = GET_STAT(128B_255B);
4033 	p->frames_256_511   = GET_STAT(256B_511B);
4034 	p->frames_512_1023  = GET_STAT(512B_1023B);
4035 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
4036 	p->frames_1519_max  = GET_STAT(1519B_MAX);
4037 	p->drop             = GET_STAT(DROP_FRAMES);
4038 
4039 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
4040 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
4041 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4042 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4043 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4044 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4045 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4046 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4047 
4048 #undef GET_STAT
4049 #undef GET_STAT_COM
4050 }
4051 
4052 /**
4053  *	t4_wol_magic_enable - enable/disable magic packet WoL
4054  *	@adap: the adapter
4055  *	@port: the physical port index
4056  *	@addr: MAC address expected in magic packets, %NULL to disable
4057  *
4058  *	Enables/disables magic packet wake-on-LAN for the selected port.
4059  */
4060 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4061 			 const u8 *addr)
4062 {
4063 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4064 
4065 	if (is_t4(adap)) {
4066 		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4067 		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4068 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4069 	} else {
4070 		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4071 		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4072 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4073 	}
4074 
4075 	if (addr) {
4076 		t4_write_reg(adap, mag_id_reg_l,
4077 			     (addr[2] << 24) | (addr[3] << 16) |
4078 			     (addr[4] << 8) | addr[5]);
4079 		t4_write_reg(adap, mag_id_reg_h,
4080 			     (addr[0] << 8) | addr[1]);
4081 	}
4082 	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4083 			 V_MAGICEN(addr != NULL));
4084 }
4085 
4086 /**
4087  *	t4_wol_pat_enable - enable/disable pattern-based WoL
4088  *	@adap: the adapter
4089  *	@port: the physical port index
4090  *	@map: bitmap of which HW pattern filters to set
4091  *	@mask0: byte mask for bytes 0-63 of a packet
4092  *	@mask1: byte mask for bytes 64-127 of a packet
4093  *	@crc: Ethernet CRC for selected bytes
4094  *	@enable: enable/disable switch
4095  *
4096  *	Sets the pattern filters indicated in @map to mask out the bytes
4097  *	specified in @mask0/@mask1 in received packets and compare the CRC of
4098  *	the resulting packet against @crc.  If @enable is %true pattern-based
4099  *	WoL is enabled, otherwise disabled.
4100  */
4101 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4102 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4103 {
4104 	int i;
4105 	u32 port_cfg_reg;
4106 
4107 	if (is_t4(adap))
4108 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4109 	else
4110 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4111 
4112 	if (!enable) {
4113 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4114 		return 0;
4115 	}
4116 	if (map > 0xff)
4117 		return -EINVAL;
4118 
4119 #define EPIO_REG(name) \
4120 	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4121 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4122 
4123 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4124 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4125 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4126 
4127 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4128 		if (!(map & 1))
4129 			continue;
4130 
4131 		/* write byte masks */
4132 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4133 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4134 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4135 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4136 			return -ETIMEDOUT;
4137 
4138 		/* write CRC */
4139 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4140 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4141 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4142 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4143 			return -ETIMEDOUT;
4144 	}
4145 #undef EPIO_REG
4146 
4147 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4148 	return 0;
4149 }
4150 
4151 /**
4152  *	t4_mk_filtdelwr - create a delete filter WR
4153  *	@ftid: the filter ID
4154  *	@wr: the filter work request to populate
4155  *	@qid: ingress queue to receive the delete notification
4156  *
4157  *	Creates a filter work request to delete the supplied filter.  If @qid is
4158  *	negative the delete notification is suppressed.
4159  */
4160 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4161 {
4162 	memset(wr, 0, sizeof(*wr));
4163 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4164 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4165 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4166 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4167 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4168 	if (qid >= 0)
4169 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4170 }
4171 
4172 #define INIT_CMD(var, cmd, rd_wr) do { \
4173 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4174 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4175 	(var).retval_len16 = htonl(FW_LEN16(var)); \
4176 } while (0)
4177 
4178 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4179 {
4180 	struct fw_ldst_cmd c;
4181 
4182 	memset(&c, 0, sizeof(c));
4183 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4184 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4185 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4186 	c.u.addrval.addr = htonl(addr);
4187 	c.u.addrval.val = htonl(val);
4188 
4189 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4190 }
4191 
4192 /**
4193  *	t4_mdio_rd - read a PHY register through MDIO
4194  *	@adap: the adapter
4195  *	@mbox: mailbox to use for the FW command
4196  *	@phy_addr: the PHY address
4197  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4198  *	@reg: the register to read
4199  *	@valp: where to store the value
4200  *
4201  *	Issues a FW command through the given mailbox to read a PHY register.
4202  */
4203 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4204 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4205 {
4206 	int ret;
4207 	struct fw_ldst_cmd c;
4208 
4209 	memset(&c, 0, sizeof(c));
4210 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4211 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4212 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4213 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4214 				   V_FW_LDST_CMD_MMD(mmd));
4215 	c.u.mdio.raddr = htons(reg);
4216 
4217 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4218 	if (ret == 0)
4219 		*valp = ntohs(c.u.mdio.rval);
4220 	return ret;
4221 }
4222 
4223 /**
4224  *	t4_mdio_wr - write a PHY register through MDIO
4225  *	@adap: the adapter
4226  *	@mbox: mailbox to use for the FW command
4227  *	@phy_addr: the PHY address
4228  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4229  *	@reg: the register to write
4230  *	@valp: value to write
4231  *
4232  *	Issues a FW command through the given mailbox to write a PHY register.
4233  */
4234 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4235 	       unsigned int mmd, unsigned int reg, unsigned int val)
4236 {
4237 	struct fw_ldst_cmd c;
4238 
4239 	memset(&c, 0, sizeof(c));
4240 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4241 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4242 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4243 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4244 				   V_FW_LDST_CMD_MMD(mmd));
4245 	c.u.mdio.raddr = htons(reg);
4246 	c.u.mdio.rval = htons(val);
4247 
4248 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4249 }
4250 
4251 /**
4252  *	t4_i2c_rd - read I2C data from adapter
4253  *	@adap: the adapter
4254  *	@port: Port number if per-port device; <0 if not
4255  *	@devid: per-port device ID or absolute device ID
4256  *	@offset: byte offset into device I2C space
4257  *	@len: byte length of I2C space data
4258  *	@buf: buffer in which to return I2C data
4259  *
4260  *	Reads the I2C data from the indicated device and location.
4261  */
4262 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
4263 	      int port, unsigned int devid,
4264 	      unsigned int offset, unsigned int len,
4265 	      u8 *buf)
4266 {
4267 	struct fw_ldst_cmd ldst;
4268 	int ret;
4269 
4270 	if (port >= 4 ||
4271 	    devid >= 256 ||
4272 	    offset >= 256 ||
4273 	    len > sizeof ldst.u.i2c.data)
4274 		return -EINVAL;
4275 
4276 	memset(&ldst, 0, sizeof ldst);
4277 	ldst.op_to_addrspace =
4278 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4279 			    F_FW_CMD_REQUEST |
4280 			    F_FW_CMD_READ |
4281 			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4282 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4283 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4284 	ldst.u.i2c.did = devid;
4285 	ldst.u.i2c.boffset = offset;
4286 	ldst.u.i2c.blen = len;
4287 	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4288 	if (!ret)
4289 		memcpy(buf, ldst.u.i2c.data, len);
4290 	return ret;
4291 }
4292 
4293 /**
4294  *	t4_i2c_wr - write I2C data to adapter
4295  *	@adap: the adapter
4296  *	@port: Port number if per-port device; <0 if not
4297  *	@devid: per-port device ID or absolute device ID
4298  *	@offset: byte offset into device I2C space
4299  *	@len: byte length of I2C space data
4300  *	@buf: buffer containing new I2C data
4301  *
4302  *	Write the I2C data to the indicated device and location.
4303  */
4304 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
4305 	      int port, unsigned int devid,
4306 	      unsigned int offset, unsigned int len,
4307 	      u8 *buf)
4308 {
4309 	struct fw_ldst_cmd ldst;
4310 
4311 	if (port >= 4 ||
4312 	    devid >= 256 ||
4313 	    offset >= 256 ||
4314 	    len > sizeof ldst.u.i2c.data)
4315 		return -EINVAL;
4316 
4317 	memset(&ldst, 0, sizeof ldst);
4318 	ldst.op_to_addrspace =
4319 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4320 			    F_FW_CMD_REQUEST |
4321 			    F_FW_CMD_WRITE |
4322 			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4323 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4324 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4325 	ldst.u.i2c.did = devid;
4326 	ldst.u.i2c.boffset = offset;
4327 	ldst.u.i2c.blen = len;
4328 	memcpy(ldst.u.i2c.data, buf, len);
4329 	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4330 }
4331 
4332 /**
4333  *	t4_sge_ctxt_flush - flush the SGE context cache
4334  *	@adap: the adapter
4335  *	@mbox: mailbox to use for the FW command
4336  *
4337  *	Issues a FW command through the given mailbox to flush the
4338  *	SGE context cache.
4339  */
4340 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4341 {
4342 	int ret;
4343 	struct fw_ldst_cmd c;
4344 
4345 	memset(&c, 0, sizeof(c));
4346 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4347 			F_FW_CMD_READ |
4348 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4349 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4350 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4351 
4352 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4353 	return ret;
4354 }
4355 
4356 /**
4357  *	t4_sge_ctxt_rd - read an SGE context through FW
4358  *	@adap: the adapter
4359  *	@mbox: mailbox to use for the FW command
4360  *	@cid: the context id
4361  *	@ctype: the context type
4362  *	@data: where to store the context data
4363  *
4364  *	Issues a FW command through the given mailbox to read an SGE context.
4365  */
4366 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4367 		   enum ctxt_type ctype, u32 *data)
4368 {
4369 	int ret;
4370 	struct fw_ldst_cmd c;
4371 
4372 	if (ctype == CTXT_EGRESS)
4373 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4374 	else if (ctype == CTXT_INGRESS)
4375 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4376 	else if (ctype == CTXT_FLM)
4377 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4378 	else
4379 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4380 
4381 	memset(&c, 0, sizeof(c));
4382 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4383 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4384 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4385 	c.u.idctxt.physid = htonl(cid);
4386 
4387 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4388 	if (ret == 0) {
4389 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4390 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4391 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4392 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4393 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4394 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4395 	}
4396 	return ret;
4397 }
4398 
4399 /**
4400  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4401  *	@adap: the adapter
4402  *	@cid: the context id
4403  *	@ctype: the context type
4404  *	@data: where to store the context data
4405  *
4406  *	Reads an SGE context directly, bypassing FW.  This is only for
4407  *	debugging when FW is unavailable.
4408  */
4409 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4410 		      u32 *data)
4411 {
4412 	int i, ret;
4413 
4414 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4415 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4416 	if (!ret)
4417 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4418 			*data++ = t4_read_reg(adap, i);
4419 	return ret;
4420 }
4421 
4422 /**
4423  *	t4_fw_hello - establish communication with FW
4424  *	@adap: the adapter
4425  *	@mbox: mailbox to use for the FW command
4426  *	@evt_mbox: mailbox to receive async FW events
4427  *	@master: specifies the caller's willingness to be the device master
4428  *	@state: returns the current device state (if non-NULL)
4429  *
4430  *	Issues a command to establish communication with FW.  Returns either
4431  *	an error (negative integer) or the mailbox of the Master PF.
4432  */
4433 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4434 		enum dev_master master, enum dev_state *state)
4435 {
4436 	int ret;
4437 	struct fw_hello_cmd c;
4438 	u32 v;
4439 	unsigned int master_mbox;
4440 	int retries = FW_CMD_HELLO_RETRIES;
4441 
4442 retry:
4443 	memset(&c, 0, sizeof(c));
4444 	INIT_CMD(c, HELLO, WRITE);
4445 	c.err_to_clearinit = htonl(
4446 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4447 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4448 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4449 			M_FW_HELLO_CMD_MBMASTER) |
4450 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4451 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4452 		F_FW_HELLO_CMD_CLEARINIT);
4453 
4454 	/*
4455 	 * Issue the HELLO command to the firmware.  If it's not successful
4456 	 * but indicates that we got a "busy" or "timeout" condition, retry
4457 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4458 	 * retry limit, check to see if the firmware left us any error
4459 	 * information and report that if so ...
4460 	 */
4461 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4462 	if (ret != FW_SUCCESS) {
4463 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4464 			goto retry;
4465 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4466 			t4_report_fw_error(adap);
4467 		return ret;
4468 	}
4469 
4470 	v = ntohl(c.err_to_clearinit);
4471 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4472 	if (state) {
4473 		if (v & F_FW_HELLO_CMD_ERR)
4474 			*state = DEV_STATE_ERR;
4475 		else if (v & F_FW_HELLO_CMD_INIT)
4476 			*state = DEV_STATE_INIT;
4477 		else
4478 			*state = DEV_STATE_UNINIT;
4479 	}
4480 
4481 	/*
4482 	 * If we're not the Master PF then we need to wait around for the
4483 	 * Master PF Driver to finish setting up the adapter.
4484 	 *
4485 	 * Note that we also do this wait if we're a non-Master-capable PF and
4486 	 * there is no current Master PF; a Master PF may show up momentarily
4487 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4488 	 * OS loads lots of different drivers rapidly at the same time).  In
4489 	 * this case, the Master PF returned by the firmware will be
4490 	 * M_PCIE_FW_MASTER so the test below will work ...
4491 	 */
4492 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4493 	    master_mbox != mbox) {
4494 		int waiting = FW_CMD_HELLO_TIMEOUT;
4495 
4496 		/*
4497 		 * Wait for the firmware to either indicate an error or
4498 		 * initialized state.  If we see either of these we bail out
4499 		 * and report the issue to the caller.  If we exhaust the
4500 		 * "hello timeout" and we haven't exhausted our retries, try
4501 		 * again.  Otherwise bail with a timeout error.
4502 		 */
4503 		for (;;) {
4504 			u32 pcie_fw;
4505 
4506 			msleep(50);
4507 			waiting -= 50;
4508 
4509 			/*
4510 			 * If neither Error nor Initialialized are indicated
4511 			 * by the firmware keep waiting till we exhaust our
4512 			 * timeout ... and then retry if we haven't exhausted
4513 			 * our retries ...
4514 			 */
4515 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4516 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4517 				if (waiting <= 0) {
4518 					if (retries-- > 0)
4519 						goto retry;
4520 
4521 					return -ETIMEDOUT;
4522 				}
4523 				continue;
4524 			}
4525 
4526 			/*
4527 			 * We either have an Error or Initialized condition
4528 			 * report errors preferentially.
4529 			 */
4530 			if (state) {
4531 				if (pcie_fw & F_PCIE_FW_ERR)
4532 					*state = DEV_STATE_ERR;
4533 				else if (pcie_fw & F_PCIE_FW_INIT)
4534 					*state = DEV_STATE_INIT;
4535 			}
4536 
4537 			/*
4538 			 * If we arrived before a Master PF was selected and
4539 			 * there's not a valid Master PF, grab its identity
4540 			 * for our caller.
4541 			 */
4542 			if (master_mbox == M_PCIE_FW_MASTER &&
4543 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4544 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4545 			break;
4546 		}
4547 	}
4548 
4549 	return master_mbox;
4550 }
4551 
4552 /**
4553  *	t4_fw_bye - end communication with FW
4554  *	@adap: the adapter
4555  *	@mbox: mailbox to use for the FW command
4556  *
4557  *	Issues a command to terminate communication with FW.
4558  */
4559 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4560 {
4561 	struct fw_bye_cmd c;
4562 
4563 	memset(&c, 0, sizeof(c));
4564 	INIT_CMD(c, BYE, WRITE);
4565 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4566 }
4567 
4568 /**
4569  *	t4_fw_reset - issue a reset to FW
4570  *	@adap: the adapter
4571  *	@mbox: mailbox to use for the FW command
4572  *	@reset: specifies the type of reset to perform
4573  *
4574  *	Issues a reset command of the specified type to FW.
4575  */
4576 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4577 {
4578 	struct fw_reset_cmd c;
4579 
4580 	memset(&c, 0, sizeof(c));
4581 	INIT_CMD(c, RESET, WRITE);
4582 	c.val = htonl(reset);
4583 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4584 }
4585 
4586 /**
4587  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4588  *	@adap: the adapter
4589  *	@mbox: mailbox to use for the FW RESET command (if desired)
4590  *	@force: force uP into RESET even if FW RESET command fails
4591  *
4592  *	Issues a RESET command to firmware (if desired) with a HALT indication
4593  *	and then puts the microprocessor into RESET state.  The RESET command
4594  *	will only be issued if a legitimate mailbox is provided (mbox <=
4595  *	M_PCIE_FW_MASTER).
4596  *
4597  *	This is generally used in order for the host to safely manipulate the
4598  *	adapter without fear of conflicting with whatever the firmware might
4599  *	be doing.  The only way out of this state is to RESTART the firmware
4600  *	...
4601  */
4602 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4603 {
4604 	int ret = 0;
4605 
4606 	/*
4607 	 * If a legitimate mailbox is provided, issue a RESET command
4608 	 * with a HALT indication.
4609 	 */
4610 	if (mbox <= M_PCIE_FW_MASTER) {
4611 		struct fw_reset_cmd c;
4612 
4613 		memset(&c, 0, sizeof(c));
4614 		INIT_CMD(c, RESET, WRITE);
4615 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4616 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4617 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4618 	}
4619 
4620 	/*
4621 	 * Normally we won't complete the operation if the firmware RESET
4622 	 * command fails but if our caller insists we'll go ahead and put the
4623 	 * uP into RESET.  This can be useful if the firmware is hung or even
4624 	 * missing ...  We'll have to take the risk of putting the uP into
4625 	 * RESET without the cooperation of firmware in that case.
4626 	 *
4627 	 * We also force the firmware's HALT flag to be on in case we bypassed
4628 	 * the firmware RESET command above or we're dealing with old firmware
4629 	 * which doesn't have the HALT capability.  This will serve as a flag
4630 	 * for the incoming firmware to know that it's coming out of a HALT
4631 	 * rather than a RESET ... if it's new enough to understand that ...
4632 	 */
4633 	if (ret == 0 || force) {
4634 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4635 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4636 	}
4637 
4638 	/*
4639 	 * And we always return the result of the firmware RESET command
4640 	 * even when we force the uP into RESET ...
4641 	 */
4642 	return ret;
4643 }
4644 
4645 /**
4646  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4647  *	@adap: the adapter
4648  *	@reset: if we want to do a RESET to restart things
4649  *
4650  *	Restart firmware previously halted by t4_fw_halt().  On successful
4651  *	return the previous PF Master remains as the new PF Master and there
4652  *	is no need to issue a new HELLO command, etc.
4653  *
4654  *	We do this in two ways:
4655  *
4656  *	 1. If we're dealing with newer firmware we'll simply want to take
4657  *	    the chip's microprocessor out of RESET.  This will cause the
4658  *	    firmware to start up from its start vector.  And then we'll loop
4659  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4660  *	    reset to 0) or we timeout.
4661  *
4662  *	 2. If we're dealing with older firmware then we'll need to RESET
4663  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4664  *	    flag and automatically RESET itself on startup.
4665  */
4666 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4667 {
4668 	if (reset) {
4669 		/*
4670 		 * Since we're directing the RESET instead of the firmware
4671 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4672 		 * bit.
4673 		 */
4674 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4675 
4676 		/*
4677 		 * If we've been given a valid mailbox, first try to get the
4678 		 * firmware to do the RESET.  If that works, great and we can
4679 		 * return success.  Otherwise, if we haven't been given a
4680 		 * valid mailbox or the RESET command failed, fall back to
4681 		 * hitting the chip with a hammer.
4682 		 */
4683 		if (mbox <= M_PCIE_FW_MASTER) {
4684 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4685 			msleep(100);
4686 			if (t4_fw_reset(adap, mbox,
4687 					F_PIORST | F_PIORSTMODE) == 0)
4688 				return 0;
4689 		}
4690 
4691 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4692 		msleep(2000);
4693 	} else {
4694 		int ms;
4695 
4696 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4697 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4698 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4699 				return FW_SUCCESS;
4700 			msleep(100);
4701 			ms += 100;
4702 		}
4703 		return -ETIMEDOUT;
4704 	}
4705 	return 0;
4706 }
4707 
4708 /**
4709  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4710  *	@adap: the adapter
4711  *	@mbox: mailbox to use for the FW RESET command (if desired)
4712  *	@fw_data: the firmware image to write
4713  *	@size: image size
4714  *	@force: force upgrade even if firmware doesn't cooperate
4715  *
4716  *	Perform all of the steps necessary for upgrading an adapter's
4717  *	firmware image.  Normally this requires the cooperation of the
4718  *	existing firmware in order to halt all existing activities
4719  *	but if an invalid mailbox token is passed in we skip that step
4720  *	(though we'll still put the adapter microprocessor into RESET in
4721  *	that case).
4722  *
4723  *	On successful return the new firmware will have been loaded and
4724  *	the adapter will have been fully RESET losing all previous setup
4725  *	state.  On unsuccessful return the adapter may be completely hosed ...
4726  *	positive errno indicates that the adapter is ~probably~ intact, a
4727  *	negative errno indicates that things are looking bad ...
4728  */
4729 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4730 		  const u8 *fw_data, unsigned int size, int force)
4731 {
4732 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4733 	unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4734 	int reset, ret;
4735 
4736 	if (!bootstrap) {
4737 		ret = t4_fw_halt(adap, mbox, force);
4738 		if (ret < 0 && !force)
4739 			return ret;
4740 	}
4741 
4742 	ret = t4_load_fw(adap, fw_data, size);
4743 	if (ret < 0 || bootstrap)
4744 		return ret;
4745 
4746 	/*
4747 	 * Older versions of the firmware don't understand the new
4748 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4749 	 * restart.  So for newly loaded older firmware we'll have to do the
4750 	 * RESET for it so it starts up on a clean slate.  We can tell if
4751 	 * the newly loaded firmware will handle this right by checking
4752 	 * its header flags to see if it advertises the capability.
4753 	 */
4754 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4755 	return t4_fw_restart(adap, mbox, reset);
4756 }
4757 
4758 /**
4759  *	t4_fw_initialize - ask FW to initialize the device
4760  *	@adap: the adapter
4761  *	@mbox: mailbox to use for the FW command
4762  *
4763  *	Issues a command to FW to partially initialize the device.  This
4764  *	performs initialization that generally doesn't depend on user input.
4765  */
4766 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4767 {
4768 	struct fw_initialize_cmd c;
4769 
4770 	memset(&c, 0, sizeof(c));
4771 	INIT_CMD(c, INITIALIZE, WRITE);
4772 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4773 }
4774 
4775 /**
4776  *	t4_query_params - query FW or device parameters
4777  *	@adap: the adapter
4778  *	@mbox: mailbox to use for the FW command
4779  *	@pf: the PF
4780  *	@vf: the VF
4781  *	@nparams: the number of parameters
4782  *	@params: the parameter names
4783  *	@val: the parameter values
4784  *
4785  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4786  *	queried at once.
4787  */
4788 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4789 		    unsigned int vf, unsigned int nparams, const u32 *params,
4790 		    u32 *val)
4791 {
4792 	int i, ret;
4793 	struct fw_params_cmd c;
4794 	__be32 *p = &c.param[0].mnem;
4795 
4796 	if (nparams > 7)
4797 		return -EINVAL;
4798 
4799 	memset(&c, 0, sizeof(c));
4800 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4801 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4802 			    V_FW_PARAMS_CMD_VFN(vf));
4803 	c.retval_len16 = htonl(FW_LEN16(c));
4804 
4805 	for (i = 0; i < nparams; i++, p += 2, params++)
4806 		*p = htonl(*params);
4807 
4808 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4809 	if (ret == 0)
4810 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4811 			*val++ = ntohl(*p);
4812 	return ret;
4813 }
4814 
4815 /**
4816  *	t4_set_params - sets FW or device parameters
4817  *	@adap: the adapter
4818  *	@mbox: mailbox to use for the FW command
4819  *	@pf: the PF
4820  *	@vf: the VF
4821  *	@nparams: the number of parameters
4822  *	@params: the parameter names
4823  *	@val: the parameter values
4824  *
4825  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4826  *	specified at once.
4827  */
4828 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4829 		  unsigned int vf, unsigned int nparams, const u32 *params,
4830 		  const u32 *val)
4831 {
4832 	struct fw_params_cmd c;
4833 	__be32 *p = &c.param[0].mnem;
4834 
4835 	if (nparams > 7)
4836 		return -EINVAL;
4837 
4838 	memset(&c, 0, sizeof(c));
4839 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4840 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4841 			    V_FW_PARAMS_CMD_VFN(vf));
4842 	c.retval_len16 = htonl(FW_LEN16(c));
4843 
4844 	while (nparams--) {
4845 		*p++ = htonl(*params);
4846 		params++;
4847 		*p++ = htonl(*val);
4848 		val++;
4849 	}
4850 
4851 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4852 }
4853 
4854 /**
4855  *	t4_cfg_pfvf - configure PF/VF resource limits
4856  *	@adap: the adapter
4857  *	@mbox: mailbox to use for the FW command
4858  *	@pf: the PF being configured
4859  *	@vf: the VF being configured
4860  *	@txq: the max number of egress queues
4861  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4862  *	@rxqi: the max number of interrupt-capable ingress queues
4863  *	@rxq: the max number of interruptless ingress queues
4864  *	@tc: the PCI traffic class
4865  *	@vi: the max number of virtual interfaces
4866  *	@cmask: the channel access rights mask for the PF/VF
4867  *	@pmask: the port access rights mask for the PF/VF
4868  *	@nexact: the maximum number of exact MPS filters
4869  *	@rcaps: read capabilities
4870  *	@wxcaps: write/execute capabilities
4871  *
4872  *	Configures resource limits and capabilities for a physical or virtual
4873  *	function.
4874  */
4875 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4876 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4877 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4878 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4879 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4880 {
4881 	struct fw_pfvf_cmd c;
4882 
4883 	memset(&c, 0, sizeof(c));
4884 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4885 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4886 			    V_FW_PFVF_CMD_VFN(vf));
4887 	c.retval_len16 = htonl(FW_LEN16(c));
4888 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4889 			       V_FW_PFVF_CMD_NIQ(rxq));
4890 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4891 			      V_FW_PFVF_CMD_PMASK(pmask) |
4892 			      V_FW_PFVF_CMD_NEQ(txq));
4893 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4894 				V_FW_PFVF_CMD_NEXACTF(nexact));
4895 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4896 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4897 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4898 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4899 }
4900 
4901 /**
4902  *	t4_alloc_vi_func - allocate a virtual interface
4903  *	@adap: the adapter
4904  *	@mbox: mailbox to use for the FW command
4905  *	@port: physical port associated with the VI
4906  *	@pf: the PF owning the VI
4907  *	@vf: the VF owning the VI
4908  *	@nmac: number of MAC addresses needed (1 to 5)
4909  *	@mac: the MAC addresses of the VI
4910  *	@rss_size: size of RSS table slice associated with this VI
4911  *	@portfunc: which Port Application Function MAC Address is desired
4912  *	@idstype: Intrusion Detection Type
4913  *
4914  *	Allocates a virtual interface for the given physical port.  If @mac is
4915  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4916  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4917  *	stored consecutively so the space needed is @nmac * 6 bytes.
4918  *	Returns a negative error number or the non-negative VI id.
4919  */
4920 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4921 		     unsigned int port, unsigned int pf, unsigned int vf,
4922 		     unsigned int nmac, u8 *mac, u16 *rss_size,
4923 		     unsigned int portfunc, unsigned int idstype)
4924 {
4925 	int ret;
4926 	struct fw_vi_cmd c;
4927 
4928 	memset(&c, 0, sizeof(c));
4929 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4930 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4931 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4932 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4933 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4934 			       V_FW_VI_CMD_FUNC(portfunc));
4935 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4936 	c.nmac = nmac - 1;
4937 
4938 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4939 	if (ret)
4940 		return ret;
4941 
4942 	if (mac) {
4943 		memcpy(mac, c.mac, sizeof(c.mac));
4944 		switch (nmac) {
4945 		case 5:
4946 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4947 		case 4:
4948 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4949 		case 3:
4950 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4951 		case 2:
4952 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4953 		}
4954 	}
4955 	if (rss_size)
4956 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4957 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4958 }
4959 
4960 /**
4961  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4962  *	@adap: the adapter
4963  *	@mbox: mailbox to use for the FW command
4964  *	@port: physical port associated with the VI
4965  *	@pf: the PF owning the VI
4966  *	@vf: the VF owning the VI
4967  *	@nmac: number of MAC addresses needed (1 to 5)
4968  *	@mac: the MAC addresses of the VI
4969  *	@rss_size: size of RSS table slice associated with this VI
4970  *
4971  *	backwards compatible and convieniance routine to allocate a Virtual
4972  *	Interface with a Ethernet Port Application Function and Intrustion
4973  *	Detection System disabled.
4974  */
4975 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4976 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4977 		u16 *rss_size)
4978 {
4979 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4980 				FW_VI_FUNC_ETH, 0);
4981 }
4982 
4983 /**
4984  *	t4_free_vi - free a virtual interface
4985  *	@adap: the adapter
4986  *	@mbox: mailbox to use for the FW command
4987  *	@pf: the PF owning the VI
4988  *	@vf: the VF owning the VI
4989  *	@viid: virtual interface identifiler
4990  *
4991  *	Free a previously allocated virtual interface.
4992  */
4993 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4994 	       unsigned int vf, unsigned int viid)
4995 {
4996 	struct fw_vi_cmd c;
4997 
4998 	memset(&c, 0, sizeof(c));
4999 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
5000 			    F_FW_CMD_REQUEST |
5001 			    F_FW_CMD_EXEC |
5002 			    V_FW_VI_CMD_PFN(pf) |
5003 			    V_FW_VI_CMD_VFN(vf));
5004 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
5005 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
5006 
5007 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5008 }
5009 
5010 /**
5011  *	t4_set_rxmode - set Rx properties of a virtual interface
5012  *	@adap: the adapter
5013  *	@mbox: mailbox to use for the FW command
5014  *	@viid: the VI id
5015  *	@mtu: the new MTU or -1
5016  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
5017  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
5018  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
5019  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
5020  *	@sleep_ok: if true we may sleep while awaiting command completion
5021  *
5022  *	Sets Rx properties of a virtual interface.
5023  */
5024 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
5025 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
5026 		  bool sleep_ok)
5027 {
5028 	struct fw_vi_rxmode_cmd c;
5029 
5030 	/* convert to FW values */
5031 	if (mtu < 0)
5032 		mtu = M_FW_VI_RXMODE_CMD_MTU;
5033 	if (promisc < 0)
5034 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
5035 	if (all_multi < 0)
5036 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
5037 	if (bcast < 0)
5038 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
5039 	if (vlanex < 0)
5040 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
5041 
5042 	memset(&c, 0, sizeof(c));
5043 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
5044 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
5045 	c.retval_len16 = htonl(FW_LEN16(c));
5046 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
5047 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
5048 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
5049 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
5050 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
5051 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5052 }
5053 
5054 /**
5055  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5056  *	@adap: the adapter
5057  *	@mbox: mailbox to use for the FW command
5058  *	@viid: the VI id
5059  *	@free: if true any existing filters for this VI id are first removed
5060  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
5061  *	@addr: the MAC address(es)
5062  *	@idx: where to store the index of each allocated filter
5063  *	@hash: pointer to hash address filter bitmap
5064  *	@sleep_ok: call is allowed to sleep
5065  *
5066  *	Allocates an exact-match filter for each of the supplied addresses and
5067  *	sets it to the corresponding address.  If @idx is not %NULL it should
5068  *	have at least @naddr entries, each of which will be set to the index of
5069  *	the filter allocated for the corresponding MAC address.  If a filter
5070  *	could not be allocated for an address its index is set to 0xffff.
5071  *	If @hash is not %NULL addresses that fail to allocate an exact filter
5072  *	are hashed and update the hash filter bitmap pointed at by @hash.
5073  *
5074  *	Returns a negative error number or the number of filters allocated.
5075  */
5076 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5077 		      unsigned int viid, bool free, unsigned int naddr,
5078 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5079 {
5080 	int offset, ret = 0;
5081 	struct fw_vi_mac_cmd c;
5082 	unsigned int nfilters = 0;
5083 	unsigned int max_naddr = is_t4(adap) ?
5084 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
5085 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5086 	unsigned int rem = naddr;
5087 
5088 	if (naddr > max_naddr)
5089 		return -EINVAL;
5090 
5091 	for (offset = 0; offset < naddr ; /**/) {
5092 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
5093 					 ? rem
5094 					 : ARRAY_SIZE(c.u.exact));
5095 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5096 						     u.exact[fw_naddr]), 16);
5097 		struct fw_vi_mac_exact *p;
5098 		int i;
5099 
5100 		memset(&c, 0, sizeof(c));
5101 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5102 				     F_FW_CMD_REQUEST |
5103 				     F_FW_CMD_WRITE |
5104 				     V_FW_CMD_EXEC(free) |
5105 				     V_FW_VI_MAC_CMD_VIID(viid));
5106 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5107 					    V_FW_CMD_LEN16(len16));
5108 
5109 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5110 			p->valid_to_idx = htons(
5111 				F_FW_VI_MAC_CMD_VALID |
5112 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5113 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5114 		}
5115 
5116 		/*
5117 		 * It's okay if we run out of space in our MAC address arena.
5118 		 * Some of the addresses we submit may get stored so we need
5119 		 * to run through the reply to see what the results were ...
5120 		 */
5121 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5122 		if (ret && ret != -FW_ENOMEM)
5123 			break;
5124 
5125 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5126 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5127 
5128 			if (idx)
5129 				idx[offset+i] = (index >=  max_naddr
5130 						 ? 0xffff
5131 						 : index);
5132 			if (index < max_naddr)
5133 				nfilters++;
5134 			else if (hash)
5135 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5136 		}
5137 
5138 		free = false;
5139 		offset += fw_naddr;
5140 		rem -= fw_naddr;
5141 	}
5142 
5143 	if (ret == 0 || ret == -FW_ENOMEM)
5144 		ret = nfilters;
5145 	return ret;
5146 }
5147 
5148 /**
5149  *	t4_change_mac - modifies the exact-match filter for a MAC address
5150  *	@adap: the adapter
5151  *	@mbox: mailbox to use for the FW command
5152  *	@viid: the VI id
5153  *	@idx: index of existing filter for old value of MAC address, or -1
5154  *	@addr: the new MAC address value
5155  *	@persist: whether a new MAC allocation should be persistent
5156  *	@add_smt: if true also add the address to the HW SMT
5157  *
5158  *	Modifies an exact-match filter and sets it to the new MAC address if
5159  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5160  *	latter case the address is added persistently if @persist is %true.
5161  *
5162  *	Note that in general it is not possible to modify the value of a given
5163  *	filter so the generic way to modify an address filter is to free the one
5164  *	being used by the old address value and allocate a new filter for the
5165  *	new address value.
5166  *
5167  *	Returns a negative error number or the index of the filter with the new
5168  *	MAC value.  Note that this index may differ from @idx.
5169  */
5170 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5171 		  int idx, const u8 *addr, bool persist, bool add_smt)
5172 {
5173 	int ret, mode;
5174 	struct fw_vi_mac_cmd c;
5175 	struct fw_vi_mac_exact *p = c.u.exact;
5176 	unsigned int max_mac_addr = is_t4(adap) ?
5177 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5178 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5179 
5180 	if (idx < 0)                             /* new allocation */
5181 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5182 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5183 
5184 	memset(&c, 0, sizeof(c));
5185 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5186 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5187 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5188 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5189 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5190 				V_FW_VI_MAC_CMD_IDX(idx));
5191 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5192 
5193 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5194 	if (ret == 0) {
5195 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5196 		if (ret >= max_mac_addr)
5197 			ret = -ENOMEM;
5198 	}
5199 	return ret;
5200 }
5201 
5202 /**
5203  *	t4_set_addr_hash - program the MAC inexact-match hash filter
5204  *	@adap: the adapter
5205  *	@mbox: mailbox to use for the FW command
5206  *	@viid: the VI id
5207  *	@ucast: whether the hash filter should also match unicast addresses
5208  *	@vec: the value to be written to the hash filter
5209  *	@sleep_ok: call is allowed to sleep
5210  *
5211  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5212  */
5213 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5214 		     bool ucast, u64 vec, bool sleep_ok)
5215 {
5216 	struct fw_vi_mac_cmd c;
5217 
5218 	memset(&c, 0, sizeof(c));
5219 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5220 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5221 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5222 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5223 				    V_FW_CMD_LEN16(1));
5224 	c.u.hash.hashvec = cpu_to_be64(vec);
5225 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5226 }
5227 
5228 /**
5229  *	t4_enable_vi - enable/disable a virtual interface
5230  *	@adap: the adapter
5231  *	@mbox: mailbox to use for the FW command
5232  *	@viid: the VI id
5233  *	@rx_en: 1=enable Rx, 0=disable Rx
5234  *	@tx_en: 1=enable Tx, 0=disable Tx
5235  *
5236  *	Enables/disables a virtual interface.
5237  */
5238 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5239 		 bool rx_en, bool tx_en)
5240 {
5241 	struct fw_vi_enable_cmd c;
5242 
5243 	memset(&c, 0, sizeof(c));
5244 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5245 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5246 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5247 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5248 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5249 }
5250 
5251 /**
5252  *	t4_identify_port - identify a VI's port by blinking its LED
5253  *	@adap: the adapter
5254  *	@mbox: mailbox to use for the FW command
5255  *	@viid: the VI id
5256  *	@nblinks: how many times to blink LED at 2.5 Hz
5257  *
5258  *	Identifies a VI's port by blinking its LED.
5259  */
5260 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5261 		     unsigned int nblinks)
5262 {
5263 	struct fw_vi_enable_cmd c;
5264 
5265 	memset(&c, 0, sizeof(c));
5266 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5267 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5268 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5269 	c.blinkdur = htons(nblinks);
5270 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5271 }
5272 
5273 /**
5274  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
5275  *	@adap: the adapter
5276  *	@mbox: mailbox to use for the FW command
5277  *	@start: %true to enable the queues, %false to disable them
5278  *	@pf: the PF owning the queues
5279  *	@vf: the VF owning the queues
5280  *	@iqid: ingress queue id
5281  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5282  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5283  *
5284  *	Starts or stops an ingress queue and its associated FLs, if any.
5285  */
5286 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5287 		     unsigned int pf, unsigned int vf, unsigned int iqid,
5288 		     unsigned int fl0id, unsigned int fl1id)
5289 {
5290 	struct fw_iq_cmd c;
5291 
5292 	memset(&c, 0, sizeof(c));
5293 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5294 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5295 			    V_FW_IQ_CMD_VFN(vf));
5296 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5297 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5298 	c.iqid = htons(iqid);
5299 	c.fl0id = htons(fl0id);
5300 	c.fl1id = htons(fl1id);
5301 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5302 }
5303 
5304 /**
5305  *	t4_iq_free - free an ingress queue and its FLs
5306  *	@adap: the adapter
5307  *	@mbox: mailbox to use for the FW command
5308  *	@pf: the PF owning the queues
5309  *	@vf: the VF owning the queues
5310  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5311  *	@iqid: ingress queue id
5312  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5313  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5314  *
5315  *	Frees an ingress queue and its associated FLs, if any.
5316  */
5317 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5318 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5319 	       unsigned int fl0id, unsigned int fl1id)
5320 {
5321 	struct fw_iq_cmd c;
5322 
5323 	memset(&c, 0, sizeof(c));
5324 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5325 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5326 			    V_FW_IQ_CMD_VFN(vf));
5327 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5328 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5329 	c.iqid = htons(iqid);
5330 	c.fl0id = htons(fl0id);
5331 	c.fl1id = htons(fl1id);
5332 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5333 }
5334 
5335 /**
5336  *	t4_eth_eq_free - free an Ethernet egress queue
5337  *	@adap: the adapter
5338  *	@mbox: mailbox to use for the FW command
5339  *	@pf: the PF owning the queue
5340  *	@vf: the VF owning the queue
5341  *	@eqid: egress queue id
5342  *
5343  *	Frees an Ethernet egress queue.
5344  */
5345 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5346 		   unsigned int vf, unsigned int eqid)
5347 {
5348 	struct fw_eq_eth_cmd c;
5349 
5350 	memset(&c, 0, sizeof(c));
5351 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5352 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5353 			    V_FW_EQ_ETH_CMD_VFN(vf));
5354 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5355 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5356 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5357 }
5358 
5359 /**
5360  *	t4_ctrl_eq_free - free a control egress queue
5361  *	@adap: the adapter
5362  *	@mbox: mailbox to use for the FW command
5363  *	@pf: the PF owning the queue
5364  *	@vf: the VF owning the queue
5365  *	@eqid: egress queue id
5366  *
5367  *	Frees a control egress queue.
5368  */
5369 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5370 		    unsigned int vf, unsigned int eqid)
5371 {
5372 	struct fw_eq_ctrl_cmd c;
5373 
5374 	memset(&c, 0, sizeof(c));
5375 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5376 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5377 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5378 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5379 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5380 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5381 }
5382 
5383 /**
5384  *	t4_ofld_eq_free - free an offload egress queue
5385  *	@adap: the adapter
5386  *	@mbox: mailbox to use for the FW command
5387  *	@pf: the PF owning the queue
5388  *	@vf: the VF owning the queue
5389  *	@eqid: egress queue id
5390  *
5391  *	Frees a control egress queue.
5392  */
5393 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5394 		    unsigned int vf, unsigned int eqid)
5395 {
5396 	struct fw_eq_ofld_cmd c;
5397 
5398 	memset(&c, 0, sizeof(c));
5399 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5400 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5401 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5402 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5403 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5404 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5405 }
5406 
5407 /**
5408  *	t4_handle_fw_rpl - process a FW reply message
5409  *	@adap: the adapter
5410  *	@rpl: start of the FW message
5411  *
5412  *	Processes a FW message, such as link state change messages.
5413  */
5414 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5415 {
5416 	u8 opcode = *(const u8 *)rpl;
5417 	const struct fw_port_cmd *p = (const void *)rpl;
5418 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5419 
5420 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5421 		/* link/module state change message */
5422 		int speed = 0, fc = 0, i;
5423 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5424 		struct port_info *pi = NULL;
5425 		struct link_config *lc;
5426 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5427 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5428 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5429 
5430 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5431 			fc |= PAUSE_RX;
5432 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5433 			fc |= PAUSE_TX;
5434 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5435 			speed = SPEED_100;
5436 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5437 			speed = SPEED_1000;
5438 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5439 			speed = SPEED_10000;
5440 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5441 			speed = SPEED_40000;
5442 
5443 		for_each_port(adap, i) {
5444 			pi = adap2pinfo(adap, i);
5445 			if (pi->tx_chan == chan)
5446 				break;
5447 		}
5448 		lc = &pi->link_cfg;
5449 
5450 		if (mod != pi->mod_type) {
5451 			pi->mod_type = mod;
5452 			t4_os_portmod_changed(adap, i);
5453 		}
5454 		if (link_ok != lc->link_ok || speed != lc->speed ||
5455 		    fc != lc->fc) {                    /* something changed */
5456 			int reason;
5457 
5458 			if (!link_ok && lc->link_ok)
5459 				reason = G_FW_PORT_CMD_LINKDNRC(stat);
5460 			else
5461 				reason = -1;
5462 
5463 			lc->link_ok = link_ok;
5464 			lc->speed = speed;
5465 			lc->fc = fc;
5466 			lc->supported = ntohs(p->u.info.pcap);
5467 			t4_os_link_changed(adap, i, link_ok, reason);
5468 		}
5469 	} else {
5470 		CH_WARN_RATELIMIT(adap,
5471 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5472 		return -EINVAL;
5473 	}
5474 	return 0;
5475 }
5476 
5477 /**
5478  *	get_pci_mode - determine a card's PCI mode
5479  *	@adapter: the adapter
5480  *	@p: where to store the PCI settings
5481  *
5482  *	Determines a card's PCI mode and associated parameters, such as speed
5483  *	and width.
5484  */
5485 static void __devinit get_pci_mode(struct adapter *adapter,
5486 				   struct pci_params *p)
5487 {
5488 	u16 val;
5489 	u32 pcie_cap;
5490 
5491 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5492 	if (pcie_cap) {
5493 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5494 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5495 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5496 	}
5497 }
5498 
5499 /**
5500  *	init_link_config - initialize a link's SW state
5501  *	@lc: structure holding the link state
5502  *	@caps: link capabilities
5503  *
5504  *	Initializes the SW state maintained for each link, including the link's
5505  *	capabilities and default speed/flow-control/autonegotiation settings.
5506  */
5507 static void __devinit init_link_config(struct link_config *lc,
5508 				       unsigned int caps)
5509 {
5510 	lc->supported = caps;
5511 	lc->requested_speed = 0;
5512 	lc->speed = 0;
5513 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5514 	if (lc->supported & FW_PORT_CAP_ANEG) {
5515 		lc->advertising = lc->supported & ADVERT_MASK;
5516 		lc->autoneg = AUTONEG_ENABLE;
5517 		lc->requested_fc |= PAUSE_AUTONEG;
5518 	} else {
5519 		lc->advertising = 0;
5520 		lc->autoneg = AUTONEG_DISABLE;
5521 	}
5522 }
5523 
5524 static int __devinit get_flash_params(struct adapter *adapter)
5525 {
5526 	int ret;
5527 	u32 info = 0;
5528 
5529 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5530 	if (!ret)
5531 		ret = sf1_read(adapter, 3, 0, 1, &info);
5532 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5533 	if (ret < 0)
5534 		return ret;
5535 
5536 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5537 		return -EINVAL;
5538 	info >>= 16;                           /* log2 of size */
5539 	if (info >= 0x14 && info < 0x18)
5540 		adapter->params.sf_nsec = 1 << (info - 16);
5541 	else if (info == 0x18)
5542 		adapter->params.sf_nsec = 64;
5543 	else
5544 		return -EINVAL;
5545 	adapter->params.sf_size = 1 << info;
5546 	return 0;
5547 }
5548 
5549 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5550 						  u8 range)
5551 {
5552 	u16 val;
5553 	u32 pcie_cap;
5554 
5555 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5556 	if (pcie_cap) {
5557 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5558 		val &= 0xfff0;
5559 		val |= range ;
5560 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5561 	}
5562 }
5563 
5564 /**
5565  *	t4_prep_adapter - prepare SW and HW for operation
5566  *	@adapter: the adapter
5567  *	@reset: if true perform a HW reset
5568  *
5569  *	Initialize adapter SW state for the various HW modules, set initial
5570  *	values for some adapter tunables, take PHYs out of reset, and
5571  *	initialize the MDIO interface.
5572  */
5573 int __devinit t4_prep_adapter(struct adapter *adapter)
5574 {
5575 	int ret;
5576 	uint16_t device_id;
5577 	uint32_t pl_rev;
5578 
5579 	get_pci_mode(adapter, &adapter->params.pci);
5580 
5581 	pl_rev = t4_read_reg(adapter, A_PL_REV);
5582 	adapter->params.chipid = G_CHIPID(pl_rev);
5583 	adapter->params.rev = G_REV(pl_rev);
5584 	if (adapter->params.chipid == 0) {
5585 		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5586 		adapter->params.chipid = CHELSIO_T4;
5587 
5588 		/* T4A1 chip is not supported */
5589 		if (adapter->params.rev == 1) {
5590 			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5591 			return -EINVAL;
5592 		}
5593 	}
5594 	adapter->params.pci.vpd_cap_addr =
5595 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5596 
5597 	ret = get_flash_params(adapter);
5598 	if (ret < 0)
5599 		return ret;
5600 
5601 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5602 	if (ret < 0)
5603 		return ret;
5604 
5605 	/* Cards with real ASICs have the chipid in the PCIe device id */
5606 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5607 	if (device_id >> 12 == adapter->params.chipid)
5608 		adapter->params.cim_la_size = CIMLA_SIZE;
5609 	else {
5610 		/* FPGA */
5611 		adapter->params.fpga = 1;
5612 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5613 	}
5614 
5615 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5616 
5617 	/*
5618 	 * Default port and clock for debugging in case we can't reach FW.
5619 	 */
5620 	adapter->params.nports = 1;
5621 	adapter->params.portvec = 1;
5622 	adapter->params.vpd.cclk = 50000;
5623 
5624 	/* Set pci completion timeout value to 4 seconds. */
5625 	set_pcie_completion_timeout(adapter, 0xd);
5626 	return 0;
5627 }
5628 
5629 /**
5630  *	t4_init_tp_params - initialize adap->params.tp
5631  *	@adap: the adapter
5632  *
5633  *	Initialize various fields of the adapter's TP Parameters structure.
5634  */
5635 int __devinit t4_init_tp_params(struct adapter *adap)
5636 {
5637 	int chan;
5638 	u32 v;
5639 
5640 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5641 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
5642 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5643 
5644 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5645 	for (chan = 0; chan < NCHAN; chan++)
5646 		adap->params.tp.tx_modq[chan] = chan;
5647 
5648 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5649 			 &adap->params.tp.ingress_config, 1,
5650 			 A_TP_INGRESS_CONFIG);
5651 	refresh_vlan_pri_map(adap);
5652 
5653 	return 0;
5654 }
5655 
5656 /**
5657  *	t4_filter_field_shift - calculate filter field shift
5658  *	@adap: the adapter
5659  *	@filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5660  *
5661  *	Return the shift position of a filter field within the Compressed
5662  *	Filter Tuple.  The filter field is specified via its selection bit
5663  *	within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
5664  */
5665 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5666 {
5667 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5668 	unsigned int sel;
5669 	int field_shift;
5670 
5671 	if ((filter_mode & filter_sel) == 0)
5672 		return -1;
5673 
5674 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5675 	    switch (filter_mode & sel) {
5676 		case F_FCOE:          field_shift += W_FT_FCOE;          break;
5677 		case F_PORT:          field_shift += W_FT_PORT;          break;
5678 		case F_VNIC_ID:       field_shift += W_FT_VNIC_ID;       break;
5679 		case F_VLAN:          field_shift += W_FT_VLAN;          break;
5680 		case F_TOS:           field_shift += W_FT_TOS;           break;
5681 		case F_PROTOCOL:      field_shift += W_FT_PROTOCOL;      break;
5682 		case F_ETHERTYPE:     field_shift += W_FT_ETHERTYPE;     break;
5683 		case F_MACMATCH:      field_shift += W_FT_MACMATCH;      break;
5684 		case F_MPSHITTYPE:    field_shift += W_FT_MPSHITTYPE;    break;
5685 		case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5686 	    }
5687 	}
5688 	return field_shift;
5689 }
5690 
5691 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5692 {
5693 	u8 addr[6];
5694 	int ret, i, j;
5695 	struct fw_port_cmd c;
5696 	u16 rss_size;
5697 	adapter_t *adap = p->adapter;
5698 
5699 	memset(&c, 0, sizeof(c));
5700 
5701 	for (i = 0, j = -1; i <= p->port_id; i++) {
5702 		do {
5703 			j++;
5704 		} while ((adap->params.portvec & (1 << j)) == 0);
5705 	}
5706 
5707 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5708 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5709 			       V_FW_PORT_CMD_PORTID(j));
5710 	c.action_to_len16 = htonl(
5711 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5712 		FW_LEN16(c));
5713 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5714 	if (ret)
5715 		return ret;
5716 
5717 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5718 	if (ret < 0)
5719 		return ret;
5720 
5721 	p->viid = ret;
5722 	p->tx_chan = j;
5723 	p->rx_chan_map = get_mps_bg_map(adap, j);
5724 	p->lport = j;
5725 	p->rss_size = rss_size;
5726 	t4_os_set_hw_addr(adap, p->port_id, addr);
5727 
5728 	ret = ntohl(c.u.info.lstatus_to_modtype);
5729 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5730 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5731 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5732 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5733 
5734 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5735 
5736 	return 0;
5737 }
5738 
5739 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
5740     		    int sleep_ok)
5741 {
5742 	struct fw_sched_cmd cmd;
5743 
5744 	memset(&cmd, 0, sizeof(cmd));
5745 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5746 				      F_FW_CMD_REQUEST |
5747 				      F_FW_CMD_WRITE);
5748 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5749 
5750 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
5751 	cmd.u.config.type = type;
5752 	cmd.u.config.minmaxen = minmaxen;
5753 
5754 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5755 			       NULL, sleep_ok);
5756 }
5757 
5758 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
5759 		    int rateunit, int ratemode, int channel, int cl,
5760 		    int minrate, int maxrate, int weight, int pktsize,
5761 		    int sleep_ok)
5762 {
5763 	struct fw_sched_cmd cmd;
5764 
5765 	memset(&cmd, 0, sizeof(cmd));
5766 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5767 				      F_FW_CMD_REQUEST |
5768 				      F_FW_CMD_WRITE);
5769 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5770 
5771 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
5772 	cmd.u.params.type = type;
5773 	cmd.u.params.level = level;
5774 	cmd.u.params.mode = mode;
5775 	cmd.u.params.ch = channel;
5776 	cmd.u.params.cl = cl;
5777 	cmd.u.params.unit = rateunit;
5778 	cmd.u.params.rate = ratemode;
5779 	cmd.u.params.min = cpu_to_be32(minrate);
5780 	cmd.u.params.max = cpu_to_be32(maxrate);
5781 	cmd.u.params.weight = cpu_to_be16(weight);
5782 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
5783 
5784 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5785 			       NULL, sleep_ok);
5786 }
5787