xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision ddd5b8e9b4d8957fce018c520657cdfa4ecffad3)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include "common.h"
33 #include "t4_regs.h"
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
36 
37 #undef msleep
38 #define msleep(x) do { \
39 	if (cold) \
40 		DELAY((x) * 1000); \
41 	else \
42 		pause("t4hw", (x) * hz / 1000); \
43 } while (0)
44 
45 /**
46  *	t4_wait_op_done_val - wait until an operation is completed
47  *	@adapter: the adapter performing the operation
48  *	@reg: the register to check for completion
49  *	@mask: a single-bit field within @reg that indicates completion
50  *	@polarity: the value of the field when the operation is completed
51  *	@attempts: number of check iterations
52  *	@delay: delay in usecs between iterations
53  *	@valp: where to store the value of the register at completion time
54  *
55  *	Wait until an operation is completed by checking a bit in a register
56  *	up to @attempts times.  If @valp is not NULL the value of the register
57  *	at the time it indicated completion is stored there.  Returns 0 if the
58  *	operation completes and	-EAGAIN	otherwise.
59  */
60 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 		        int polarity, int attempts, int delay, u32 *valp)
62 {
63 	while (1) {
64 		u32 val = t4_read_reg(adapter, reg);
65 
66 		if (!!(val & mask) == polarity) {
67 			if (valp)
68 				*valp = val;
69 			return 0;
70 		}
71 		if (--attempts == 0)
72 			return -EAGAIN;
73 		if (delay)
74 			udelay(delay);
75 	}
76 }
77 
78 /**
79  *	t4_set_reg_field - set a register field to a value
80  *	@adapter: the adapter to program
81  *	@addr: the register address
82  *	@mask: specifies the portion of the register to modify
83  *	@val: the new value for the register field
84  *
85  *	Sets a register field specified by the supplied mask to the
86  *	given value.
87  */
88 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89 		      u32 val)
90 {
91 	u32 v = t4_read_reg(adapter, addr) & ~mask;
92 
93 	t4_write_reg(adapter, addr, v | val);
94 	(void) t4_read_reg(adapter, addr);      /* flush */
95 }
96 
97 /**
98  *	t4_read_indirect - read indirectly addressed registers
99  *	@adap: the adapter
100  *	@addr_reg: register holding the indirect address
101  *	@data_reg: register holding the value of the indirect register
102  *	@vals: where the read register values are stored
103  *	@nregs: how many indirect registers to read
104  *	@start_idx: index of first indirect register to read
105  *
106  *	Reads registers that are accessed indirectly through an address/data
107  *	register pair.
108  */
109 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
111 		      unsigned int start_idx)
112 {
113 	while (nregs--) {
114 		t4_write_reg(adap, addr_reg, start_idx);
115 		*vals++ = t4_read_reg(adap, data_reg);
116 		start_idx++;
117 	}
118 }
119 
120 /**
121  *	t4_write_indirect - write indirectly addressed registers
122  *	@adap: the adapter
123  *	@addr_reg: register holding the indirect addresses
124  *	@data_reg: register holding the value for the indirect registers
125  *	@vals: values to write
126  *	@nregs: how many indirect registers to write
127  *	@start_idx: address of first indirect register to write
128  *
129  *	Writes a sequential block of registers that are accessed indirectly
130  *	through an address/data register pair.
131  */
132 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133 		       unsigned int data_reg, const u32 *vals,
134 		       unsigned int nregs, unsigned int start_idx)
135 {
136 	while (nregs--) {
137 		t4_write_reg(adap, addr_reg, start_idx++);
138 		t4_write_reg(adap, data_reg, *vals++);
139 	}
140 }
141 
142 /*
143  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144  * mechanism.  This guarantees that we get the real value even if we're
145  * operating within a Virtual Machine and the Hypervisor is trapping our
146  * Configuration Space accesses.
147  */
148 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149 {
150 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152 		     V_REGISTER(reg));
153 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154 }
155 
156 /*
157  *	t4_report_fw_error - report firmware error
158  *	@adap: the adapter
159  *
160  *	The adapter firmware can indicate error conditions to the host.
161  *	This routine prints out the reason for the firmware error (as
162  *	reported by the firmware).
163  */
164 static void t4_report_fw_error(struct adapter *adap)
165 {
166 	static const char *reason[] = {
167 		"Crash",			/* PCIE_FW_EVAL_CRASH */
168 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
169 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
170 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
171 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
173 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
174 		"Reserved",			/* reserved */
175 	};
176 	u32 pcie_fw;
177 
178 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179 	if (!(pcie_fw & F_PCIE_FW_ERR))
180 		CH_ERR(adap, "Firmware error report called with no error\n");
181 	else
182 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
183 		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
184 }
185 
186 /*
187  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
188  */
189 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
190 			 u32 mbox_addr)
191 {
192 	for ( ; nflit; nflit--, mbox_addr += 8)
193 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
194 }
195 
196 /*
197  * Handle a FW assertion reported in a mailbox.
198  */
199 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
200 {
201 	struct fw_debug_cmd asrt;
202 
203 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
204 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
205 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
206 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
207 }
208 
209 #define X_CIM_PF_NOACCESS 0xeeeeeeee
210 /**
211  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
212  *	@adap: the adapter
213  *	@mbox: index of the mailbox to use
214  *	@cmd: the command to write
215  *	@size: command length in bytes
216  *	@rpl: where to optionally store the reply
217  *	@sleep_ok: if true we may sleep while awaiting command completion
218  *
219  *	Sends the given command to FW through the selected mailbox and waits
220  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
221  *	store the FW's reply to the command.  The command and its optional
222  *	reply are of the same length.  Some FW commands like RESET and
223  *	INITIALIZE can take a considerable amount of time to execute.
224  *	@sleep_ok determines whether we may sleep while awaiting the response.
225  *	If sleeping is allowed we use progressive backoff otherwise we spin.
226  *
227  *	The return value is 0 on success or a negative errno on failure.  A
228  *	failure can happen either because we are not able to execute the
229  *	command or FW executes it but signals an error.  In the latter case
230  *	the return value is the error code indicated by FW (negated).
231  */
232 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
233 		    void *rpl, bool sleep_ok)
234 {
235 	/*
236 	 * We delay in small increments at first in an effort to maintain
237 	 * responsiveness for simple, fast executing commands but then back
238 	 * off to larger delays to a maximum retry delay.
239 	 */
240 	static const int delay[] = {
241 		1, 1, 3, 5, 10, 10, 20, 50, 100
242 	};
243 
244 	u32 v;
245 	u64 res;
246 	int i, ms, delay_idx;
247 	const __be64 *p = cmd;
248 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
249 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
250 
251 	if ((size & 15) || size > MBOX_LEN)
252 		return -EINVAL;
253 
254 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
256 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
257 
258 	if (v != X_MBOWNER_PL)
259 		return v ? -EBUSY : -ETIMEDOUT;
260 
261 	for (i = 0; i < size; i += 8, p++)
262 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
263 
264 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
265 	t4_read_reg(adap, ctl_reg);          /* flush write */
266 
267 	delay_idx = 0;
268 	ms = delay[0];
269 
270 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
271 		if (sleep_ok) {
272 			ms = delay[delay_idx];  /* last element may repeat */
273 			if (delay_idx < ARRAY_SIZE(delay) - 1)
274 				delay_idx++;
275 			msleep(ms);
276 		} else
277 			mdelay(ms);
278 
279 		v = t4_read_reg(adap, ctl_reg);
280 		if (v == X_CIM_PF_NOACCESS)
281 			continue;
282 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
283 			if (!(v & F_MBMSGVALID)) {
284 				t4_write_reg(adap, ctl_reg,
285 					     V_MBOWNER(X_MBOWNER_NONE));
286 				continue;
287 			}
288 
289 			res = t4_read_reg64(adap, data_reg);
290 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
291 				fw_asrt(adap, data_reg);
292 				res = V_FW_CMD_RETVAL(EIO);
293 			} else if (rpl)
294 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
295 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
296 			return -G_FW_CMD_RETVAL((int)res);
297 		}
298 	}
299 
300 	/*
301 	 * We timed out waiting for a reply to our mailbox command.  Report
302 	 * the error and also check to see if the firmware reported any
303 	 * errors ...
304 	 */
305 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
306 	       *(const u8 *)cmd, mbox);
307 	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
308 		t4_report_fw_error(adap);
309 	return -ETIMEDOUT;
310 }
311 
312 /**
313  *	t4_mc_read - read from MC through backdoor accesses
314  *	@adap: the adapter
315  *	@idx: which MC to access
316  *	@addr: address of first byte requested
317  *	@data: 64 bytes of data containing the requested address
318  *	@ecc: where to store the corresponding 64-bit ECC word
319  *
320  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
321  *	that covers the requested address @addr.  If @parity is not %NULL it
322  *	is assigned the 64-bit ECC word for the read data.
323  */
324 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
325 {
326 	int i;
327 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
328 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
329 
330 	if (is_t4(adap)) {
331 		mc_bist_cmd_reg = A_MC_BIST_CMD;
332 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
333 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
334 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
335 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
336 	} else {
337 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
338 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
339 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
340 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
341 						  idx);
342 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
343 						  idx);
344 	}
345 
346 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
347 		return -EBUSY;
348 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
349 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
350 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
351 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
352 		     F_START_BIST | V_BIST_CMD_GAP(1));
353 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
354 	if (i)
355 		return i;
356 
357 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
358 
359 	for (i = 15; i >= 0; i--)
360 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
361 	if (ecc)
362 		*ecc = t4_read_reg64(adap, MC_DATA(16));
363 #undef MC_DATA
364 	return 0;
365 }
366 
367 /**
368  *	t4_edc_read - read from EDC through backdoor accesses
369  *	@adap: the adapter
370  *	@idx: which EDC to access
371  *	@addr: address of first byte requested
372  *	@data: 64 bytes of data containing the requested address
373  *	@ecc: where to store the corresponding 64-bit ECC word
374  *
375  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
376  *	that covers the requested address @addr.  If @parity is not %NULL it
377  *	is assigned the 64-bit ECC word for the read data.
378  */
379 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
380 {
381 	int i;
382 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
383 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
384 
385 	if (is_t4(adap)) {
386 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
387 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
388 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
389 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
390 						    idx);
391 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
392 						    idx);
393 	} else {
394 /*
395  * These macro are missing in t4_regs.h file.
396  * Added temporarily for testing.
397  */
398 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
399 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
400 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
401 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
402 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
403 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
404 						    idx);
405 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
406 						    idx);
407 #undef EDC_REG_T5
408 #undef EDC_STRIDE_T5
409 	}
410 
411 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
412 		return -EBUSY;
413 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
414 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
415 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
416 	t4_write_reg(adap, edc_bist_cmd_reg,
417 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
418 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
419 	if (i)
420 		return i;
421 
422 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
423 
424 	for (i = 15; i >= 0; i--)
425 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
426 	if (ecc)
427 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
428 #undef EDC_DATA
429 	return 0;
430 }
431 
432 /**
433  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
434  *	@adap: the adapter
435  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
436  *	@addr: address within indicated memory type
437  *	@len: amount of memory to read
438  *	@buf: host memory buffer
439  *
440  *	Reads an [almost] arbitrary memory region in the firmware: the
441  *	firmware memory address, length and host buffer must be aligned on
442  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
443  *	the firmware's memory.  If this memory contains data structures which
444  *	contain multi-byte integers, it's the callers responsibility to
445  *	perform appropriate byte order conversions.
446  */
447 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
448 		__be32 *buf)
449 {
450 	u32 pos, start, end, offset;
451 	int ret;
452 
453 	/*
454 	 * Argument sanity checks ...
455 	 */
456 	if ((addr & 0x3) || (len & 0x3))
457 		return -EINVAL;
458 
459 	/*
460 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
461 	 * need to round down the start and round up the end.  We'll start
462 	 * copying out of the first line at (addr - start) a word at a time.
463 	 */
464 	start = addr & ~(64-1);
465 	end = (addr + len + 64-1) & ~(64-1);
466 	offset = (addr - start)/sizeof(__be32);
467 
468 	for (pos = start; pos < end; pos += 64, offset = 0) {
469 		__be32 data[16];
470 
471 		/*
472 		 * Read the chip's memory block and bail if there's an error.
473 		 */
474 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
475 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
476 		else
477 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
478 		if (ret)
479 			return ret;
480 
481 		/*
482 		 * Copy the data into the caller's memory buffer.
483 		 */
484 		while (offset < 16 && len > 0) {
485 			*buf++ = data[offset++];
486 			len -= sizeof(__be32);
487 		}
488 	}
489 
490 	return 0;
491 }
492 
493 /*
494  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
495  * VPD-R header.
496  */
497 struct t4_vpd_hdr {
498 	u8  id_tag;
499 	u8  id_len[2];
500 	u8  id_data[ID_LEN];
501 	u8  vpdr_tag;
502 	u8  vpdr_len[2];
503 };
504 
505 /*
506  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
507  */
508 #define EEPROM_MAX_RD_POLL 40
509 #define EEPROM_MAX_WR_POLL 6
510 #define EEPROM_STAT_ADDR   0x7bfc
511 #define VPD_BASE           0x400
512 #define VPD_BASE_OLD       0
513 #define VPD_LEN            1024
514 #define VPD_INFO_FLD_HDR_SIZE	3
515 
516 /**
517  *	t4_seeprom_read - read a serial EEPROM location
518  *	@adapter: adapter to read
519  *	@addr: EEPROM virtual address
520  *	@data: where to store the read data
521  *
522  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
523  *	VPD capability.  Note that this function must be called with a virtual
524  *	address.
525  */
526 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
527 {
528 	u16 val;
529 	int attempts = EEPROM_MAX_RD_POLL;
530 	unsigned int base = adapter->params.pci.vpd_cap_addr;
531 
532 	if (addr >= EEPROMVSIZE || (addr & 3))
533 		return -EINVAL;
534 
535 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
536 	do {
537 		udelay(10);
538 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
539 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
540 
541 	if (!(val & PCI_VPD_ADDR_F)) {
542 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
543 		return -EIO;
544 	}
545 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
546 	*data = le32_to_cpu(*data);
547 	return 0;
548 }
549 
550 /**
551  *	t4_seeprom_write - write a serial EEPROM location
552  *	@adapter: adapter to write
553  *	@addr: virtual EEPROM address
554  *	@data: value to write
555  *
556  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
557  *	VPD capability.  Note that this function must be called with a virtual
558  *	address.
559  */
560 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
561 {
562 	u16 val;
563 	int attempts = EEPROM_MAX_WR_POLL;
564 	unsigned int base = adapter->params.pci.vpd_cap_addr;
565 
566 	if (addr >= EEPROMVSIZE || (addr & 3))
567 		return -EINVAL;
568 
569 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
570 				 cpu_to_le32(data));
571 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
572 				 (u16)addr | PCI_VPD_ADDR_F);
573 	do {
574 		msleep(1);
575 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
576 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
577 
578 	if (val & PCI_VPD_ADDR_F) {
579 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
580 		return -EIO;
581 	}
582 	return 0;
583 }
584 
585 /**
586  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
587  *	@phys_addr: the physical EEPROM address
588  *	@fn: the PCI function number
589  *	@sz: size of function-specific area
590  *
591  *	Translate a physical EEPROM address to virtual.  The first 1K is
592  *	accessed through virtual addresses starting at 31K, the rest is
593  *	accessed through virtual addresses starting at 0.
594  *
595  *	The mapping is as follows:
596  *	[0..1K) -> [31K..32K)
597  *	[1K..1K+A) -> [ES-A..ES)
598  *	[1K+A..ES) -> [0..ES-A-1K)
599  *
600  *	where A = @fn * @sz, and ES = EEPROM size.
601  */
602 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
603 {
604 	fn *= sz;
605 	if (phys_addr < 1024)
606 		return phys_addr + (31 << 10);
607 	if (phys_addr < 1024 + fn)
608 		return EEPROMSIZE - fn + phys_addr - 1024;
609 	if (phys_addr < EEPROMSIZE)
610 		return phys_addr - 1024 - fn;
611 	return -EINVAL;
612 }
613 
614 /**
615  *	t4_seeprom_wp - enable/disable EEPROM write protection
616  *	@adapter: the adapter
617  *	@enable: whether to enable or disable write protection
618  *
619  *	Enables or disables write protection on the serial EEPROM.
620  */
621 int t4_seeprom_wp(struct adapter *adapter, int enable)
622 {
623 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
624 }
625 
626 /**
627  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
628  *	@v: Pointer to buffered vpd data structure
629  *	@kw: The keyword to search for
630  *
631  *	Returns the value of the information field keyword or
632  *	-ENOENT otherwise.
633  */
634 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
635 {
636          int i;
637 	 unsigned int offset , len;
638 	 const u8 *buf = &v->id_tag;
639 	 const u8 *vpdr_len = &v->vpdr_tag;
640 	 offset = sizeof(struct t4_vpd_hdr);
641 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
642 
643 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
644 		 return -ENOENT;
645 	 }
646 
647          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
648 		 if(memcmp(buf + i , kw , 2) == 0){
649 			 i += VPD_INFO_FLD_HDR_SIZE;
650                          return i;
651 		  }
652 
653                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
654          }
655 
656          return -ENOENT;
657 }
658 
659 
660 /**
661  *	get_vpd_params - read VPD parameters from VPD EEPROM
662  *	@adapter: adapter to read
663  *	@p: where to store the parameters
664  *
665  *	Reads card parameters stored in VPD EEPROM.
666  */
667 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
668 {
669 	int i, ret, addr;
670 	int ec, sn, pn, na;
671 	u8 vpd[VPD_LEN], csum;
672 	const struct t4_vpd_hdr *v;
673 
674 	/*
675 	 * Card information normally starts at VPD_BASE but early cards had
676 	 * it at 0.
677 	 */
678 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
679 	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
680 
681 	for (i = 0; i < sizeof(vpd); i += 4) {
682 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
683 		if (ret)
684 			return ret;
685 	}
686  	v = (const struct t4_vpd_hdr *)vpd;
687 
688 #define FIND_VPD_KW(var,name) do { \
689 	var = get_vpd_keyword_val(v , name); \
690 	if (var < 0) { \
691 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
692 		return -EINVAL; \
693 	} \
694 } while (0)
695 
696 	FIND_VPD_KW(i, "RV");
697 	for (csum = 0; i >= 0; i--)
698 		csum += vpd[i];
699 
700 	if (csum) {
701 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
702 		return -EINVAL;
703 	}
704 	FIND_VPD_KW(ec, "EC");
705 	FIND_VPD_KW(sn, "SN");
706 	FIND_VPD_KW(pn, "PN");
707 	FIND_VPD_KW(na, "NA");
708 #undef FIND_VPD_KW
709 
710 	memcpy(p->id, v->id_data, ID_LEN);
711 	strstrip(p->id);
712 	memcpy(p->ec, vpd + ec, EC_LEN);
713 	strstrip(p->ec);
714 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
715 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
716 	strstrip(p->sn);
717 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
718 	strstrip((char *)p->pn);
719 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
720 	strstrip((char *)p->na);
721 
722 	return 0;
723 }
724 
725 /* serial flash and firmware constants and flash config file constants */
726 enum {
727 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
728 
729 	/* flash command opcodes */
730 	SF_PROG_PAGE    = 2,          /* program page */
731 	SF_WR_DISABLE   = 4,          /* disable writes */
732 	SF_RD_STATUS    = 5,          /* read status register */
733 	SF_WR_ENABLE    = 6,          /* enable writes */
734 	SF_RD_DATA_FAST = 0xb,        /* read flash */
735 	SF_RD_ID        = 0x9f,       /* read ID */
736 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
737 };
738 
739 /**
740  *	sf1_read - read data from the serial flash
741  *	@adapter: the adapter
742  *	@byte_cnt: number of bytes to read
743  *	@cont: whether another operation will be chained
744  *	@lock: whether to lock SF for PL access only
745  *	@valp: where to store the read data
746  *
747  *	Reads up to 4 bytes of data from the serial flash.  The location of
748  *	the read needs to be specified prior to calling this by issuing the
749  *	appropriate commands to the serial flash.
750  */
751 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
752 		    int lock, u32 *valp)
753 {
754 	int ret;
755 
756 	if (!byte_cnt || byte_cnt > 4)
757 		return -EINVAL;
758 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
759 		return -EBUSY;
760 	t4_write_reg(adapter, A_SF_OP,
761 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
762 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
763 	if (!ret)
764 		*valp = t4_read_reg(adapter, A_SF_DATA);
765 	return ret;
766 }
767 
768 /**
769  *	sf1_write - write data to the serial flash
770  *	@adapter: the adapter
771  *	@byte_cnt: number of bytes to write
772  *	@cont: whether another operation will be chained
773  *	@lock: whether to lock SF for PL access only
774  *	@val: value to write
775  *
776  *	Writes up to 4 bytes of data to the serial flash.  The location of
777  *	the write needs to be specified prior to calling this by issuing the
778  *	appropriate commands to the serial flash.
779  */
780 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
781 		     int lock, u32 val)
782 {
783 	if (!byte_cnt || byte_cnt > 4)
784 		return -EINVAL;
785 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
786 		return -EBUSY;
787 	t4_write_reg(adapter, A_SF_DATA, val);
788 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
789 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
790 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
791 }
792 
793 /**
794  *	flash_wait_op - wait for a flash operation to complete
795  *	@adapter: the adapter
796  *	@attempts: max number of polls of the status register
797  *	@delay: delay between polls in ms
798  *
799  *	Wait for a flash operation to complete by polling the status register.
800  */
801 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
802 {
803 	int ret;
804 	u32 status;
805 
806 	while (1) {
807 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
808 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
809 			return ret;
810 		if (!(status & 1))
811 			return 0;
812 		if (--attempts == 0)
813 			return -EAGAIN;
814 		if (delay)
815 			msleep(delay);
816 	}
817 }
818 
819 /**
820  *	t4_read_flash - read words from serial flash
821  *	@adapter: the adapter
822  *	@addr: the start address for the read
823  *	@nwords: how many 32-bit words to read
824  *	@data: where to store the read data
825  *	@byte_oriented: whether to store data as bytes or as words
826  *
827  *	Read the specified number of 32-bit words from the serial flash.
828  *	If @byte_oriented is set the read data is stored as a byte array
829  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
830  *	natural endianess.
831  */
832 int t4_read_flash(struct adapter *adapter, unsigned int addr,
833 		  unsigned int nwords, u32 *data, int byte_oriented)
834 {
835 	int ret;
836 
837 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
838 		return -EINVAL;
839 
840 	addr = swab32(addr) | SF_RD_DATA_FAST;
841 
842 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
843 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
844 		return ret;
845 
846 	for ( ; nwords; nwords--, data++) {
847 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
848 		if (nwords == 1)
849 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
850 		if (ret)
851 			return ret;
852 		if (byte_oriented)
853 			*data = htonl(*data);
854 	}
855 	return 0;
856 }
857 
858 /**
859  *	t4_write_flash - write up to a page of data to the serial flash
860  *	@adapter: the adapter
861  *	@addr: the start address to write
862  *	@n: length of data to write in bytes
863  *	@data: the data to write
864  *	@byte_oriented: whether to store data as bytes or as words
865  *
866  *	Writes up to a page of data (256 bytes) to the serial flash starting
867  *	at the given address.  All the data must be written to the same page.
868  *	If @byte_oriented is set the write data is stored as byte stream
869  *	(i.e. matches what on disk), otherwise in big-endian.
870  */
871 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
872 			  unsigned int n, const u8 *data, int byte_oriented)
873 {
874 	int ret;
875 	u32 buf[SF_PAGE_SIZE / 4];
876 	unsigned int i, c, left, val, offset = addr & 0xff;
877 
878 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
879 		return -EINVAL;
880 
881 	val = swab32(addr) | SF_PROG_PAGE;
882 
883 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
884 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
885 		goto unlock;
886 
887 	for (left = n; left; left -= c) {
888 		c = min(left, 4U);
889 		for (val = 0, i = 0; i < c; ++i)
890 			val = (val << 8) + *data++;
891 
892 		if (!byte_oriented)
893 			val = htonl(val);
894 
895 		ret = sf1_write(adapter, c, c != left, 1, val);
896 		if (ret)
897 			goto unlock;
898 	}
899 	ret = flash_wait_op(adapter, 8, 1);
900 	if (ret)
901 		goto unlock;
902 
903 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
904 
905 	/* Read the page to verify the write succeeded */
906 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
907 			    byte_oriented);
908 	if (ret)
909 		return ret;
910 
911 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
912 		CH_ERR(adapter, "failed to correctly write the flash page "
913 		       "at %#x\n", addr);
914 		return -EIO;
915 	}
916 	return 0;
917 
918 unlock:
919 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
920 	return ret;
921 }
922 
923 /**
924  *	t4_get_fw_version - read the firmware version
925  *	@adapter: the adapter
926  *	@vers: where to place the version
927  *
928  *	Reads the FW version from flash.
929  */
930 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
931 {
932 	return t4_read_flash(adapter,
933 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
934 			     vers, 0);
935 }
936 
937 /**
938  *	t4_get_tp_version - read the TP microcode version
939  *	@adapter: the adapter
940  *	@vers: where to place the version
941  *
942  *	Reads the TP microcode version from flash.
943  */
944 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
945 {
946 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
947 							      tp_microcode_ver),
948 			     1, vers, 0);
949 }
950 
951 /**
952  *	t4_check_fw_version - check if the FW is compatible with this driver
953  *	@adapter: the adapter
954  *
955  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
956  *	if there's exact match, a negative error if the version could not be
957  *	read or there's a major version mismatch, and a positive value if the
958  *	expected major version is found but there's a minor version mismatch.
959  */
960 int t4_check_fw_version(struct adapter *adapter)
961 {
962 	int ret, major, minor, micro;
963 	int exp_major, exp_minor, exp_micro;
964 
965 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
966 	if (!ret)
967 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
968 	if (ret)
969 		return ret;
970 
971 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
972 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
973 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
974 
975 	switch (chip_id(adapter)) {
976 	case CHELSIO_T4:
977 		exp_major = FW_VERSION_MAJOR_T4;
978 		exp_minor = FW_VERSION_MINOR_T4;
979 		exp_micro = FW_VERSION_MICRO_T4;
980 		break;
981 	case CHELSIO_T5:
982 		exp_major = FW_VERSION_MAJOR_T5;
983 		exp_minor = FW_VERSION_MINOR_T5;
984 		exp_micro = FW_VERSION_MICRO_T5;
985 		break;
986 	default:
987 		CH_ERR(adapter, "Unsupported chip type, %x\n",
988 		    chip_id(adapter));
989 		return -EINVAL;
990 	}
991 
992 	if (major != exp_major) {            /* major mismatch - fail */
993 		CH_ERR(adapter, "card FW has major version %u, driver wants "
994 		       "%u\n", major, exp_major);
995 		return -EINVAL;
996 	}
997 
998 	if (minor == exp_minor && micro == exp_micro)
999 		return 0;                                   /* perfect match */
1000 
1001 	/* Minor/micro version mismatch.  Report it but often it's OK. */
1002 	return 1;
1003 }
1004 
1005 /**
1006  *	t4_flash_erase_sectors - erase a range of flash sectors
1007  *	@adapter: the adapter
1008  *	@start: the first sector to erase
1009  *	@end: the last sector to erase
1010  *
1011  *	Erases the sectors in the given inclusive range.
1012  */
1013 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1014 {
1015 	int ret = 0;
1016 
1017 	while (start <= end) {
1018 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1019 		    (ret = sf1_write(adapter, 4, 0, 1,
1020 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1021 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1022 			CH_ERR(adapter, "erase of flash sector %d failed, "
1023 			       "error %d\n", start, ret);
1024 			break;
1025 		}
1026 		start++;
1027 	}
1028 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1029 	return ret;
1030 }
1031 
1032 /**
1033  *	t4_flash_cfg_addr - return the address of the flash configuration file
1034  *	@adapter: the adapter
1035  *
1036  *	Return the address within the flash where the Firmware Configuration
1037  *	File is stored.
1038  */
1039 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1040 {
1041 	if (adapter->params.sf_size == 0x100000)
1042 		return FLASH_FPGA_CFG_START;
1043 	else
1044 		return FLASH_CFG_START;
1045 }
1046 
1047 /**
1048  *	t4_load_cfg - download config file
1049  *	@adap: the adapter
1050  *	@cfg_data: the cfg text file to write
1051  *	@size: text file size
1052  *
1053  *	Write the supplied config text file to the card's serial flash.
1054  */
1055 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1056 {
1057 	int ret, i, n;
1058 	unsigned int addr;
1059 	unsigned int flash_cfg_start_sec;
1060 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1061 
1062 	addr = t4_flash_cfg_addr(adap);
1063 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1064 
1065 	if (size > FLASH_CFG_MAX_SIZE) {
1066 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1067 		       FLASH_CFG_MAX_SIZE);
1068 		return -EFBIG;
1069 	}
1070 
1071 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1072 			 sf_sec_size);
1073 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1074 				     flash_cfg_start_sec + i - 1);
1075 	/*
1076 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1077 	 * with the on-adapter Firmware Configuration File.
1078 	 */
1079 	if (ret || size == 0)
1080 		goto out;
1081 
1082 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1083 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1084 		if ( (size - i) <  SF_PAGE_SIZE)
1085 			n = size - i;
1086 		else
1087 			n = SF_PAGE_SIZE;
1088 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1089 		if (ret)
1090 			goto out;
1091 
1092 		addr += SF_PAGE_SIZE;
1093 		cfg_data += SF_PAGE_SIZE;
1094 	}
1095 
1096 out:
1097 	if (ret)
1098 		CH_ERR(adap, "config file %s failed %d\n",
1099 		       (size == 0 ? "clear" : "download"), ret);
1100 	return ret;
1101 }
1102 
1103 
1104 /**
1105  *	t4_load_fw - download firmware
1106  *	@adap: the adapter
1107  *	@fw_data: the firmware image to write
1108  *	@size: image size
1109  *
1110  *	Write the supplied firmware image to the card's serial flash.
1111  */
1112 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1113 {
1114 	u32 csum;
1115 	int ret, addr;
1116 	unsigned int i;
1117 	u8 first_page[SF_PAGE_SIZE];
1118 	const u32 *p = (const u32 *)fw_data;
1119 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1120 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1121 
1122 	if (!size) {
1123 		CH_ERR(adap, "FW image has no data\n");
1124 		return -EINVAL;
1125 	}
1126 	if (size & 511) {
1127 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1128 		return -EINVAL;
1129 	}
1130 	if (ntohs(hdr->len512) * 512 != size) {
1131 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1132 		return -EINVAL;
1133 	}
1134 	if (size > FLASH_FW_MAX_SIZE) {
1135 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1136 		       FLASH_FW_MAX_SIZE);
1137 		return -EFBIG;
1138 	}
1139 
1140 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1141 		csum += ntohl(p[i]);
1142 
1143 	if (csum != 0xffffffff) {
1144 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1145 		       csum);
1146 		return -EINVAL;
1147 	}
1148 
1149 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1150 	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1151 	    FLASH_FW_START_SEC + i - 1);
1152 	if (ret)
1153 		goto out;
1154 
1155 	/*
1156 	 * We write the correct version at the end so the driver can see a bad
1157 	 * version if the FW write fails.  Start by writing a copy of the
1158 	 * first page with a bad version.
1159 	 */
1160 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1161 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1162 	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1163 	if (ret)
1164 		goto out;
1165 
1166 	addr = FLASH_FW_START;
1167 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1168 		addr += SF_PAGE_SIZE;
1169 		fw_data += SF_PAGE_SIZE;
1170 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1171 		if (ret)
1172 			goto out;
1173 	}
1174 
1175 	ret = t4_write_flash(adap,
1176 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1177 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1178 out:
1179 	if (ret)
1180 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1181 	return ret;
1182 }
1183 
1184 /* BIOS boot headers */
1185 typedef struct pci_expansion_rom_header {
1186 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1187 	u8	reserved[22]; /* Reserved per processor Architecture data */
1188 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1189 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1190 
1191 /* Legacy PCI Expansion ROM Header */
1192 typedef struct legacy_pci_expansion_rom_header {
1193 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1194 	u8	size512; /* Current Image Size in units of 512 bytes */
1195 	u8	initentry_point[4];
1196 	u8	cksum; /* Checksum computed on the entire Image */
1197 	u8	reserved[16]; /* Reserved */
1198 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1199 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1200 
1201 /* EFI PCI Expansion ROM Header */
1202 typedef struct efi_pci_expansion_rom_header {
1203 	u8	signature[2]; // ROM signature. The value 0xaa55
1204 	u8	initialization_size[2]; /* Units 512. Includes this header */
1205 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1206 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1207 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1208 	u8	compression_type[2]; /* Compression type. */
1209 		/*
1210 		 * Compression type definition
1211 		 * 0x0: uncompressed
1212 		 * 0x1: Compressed
1213 		 * 0x2-0xFFFF: Reserved
1214 		 */
1215 	u8	reserved[8]; /* Reserved */
1216 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1217 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1218 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1219 
1220 /* PCI Data Structure Format */
1221 typedef struct pcir_data_structure { /* PCI Data Structure */
1222 	u8	signature[4]; /* Signature. The string "PCIR" */
1223 	u8	vendor_id[2]; /* Vendor Identification */
1224 	u8	device_id[2]; /* Device Identification */
1225 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1226 	u8	length[2]; /* PCIR Data Structure Length */
1227 	u8	revision; /* PCIR Data Structure Revision */
1228 	u8	class_code[3]; /* Class Code */
1229 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1230 	u8	code_revision[2]; /* Revision Level of Code/Data */
1231 	u8	code_type; /* Code Type. */
1232 		/*
1233 		 * PCI Expansion ROM Code Types
1234 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1235 		 * 0x01: Open Firmware standard for PCI. FCODE
1236 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1237 		 * 0x03: EFI Image. EFI
1238 		 * 0x04-0xFF: Reserved.
1239 		 */
1240 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1241 	u8	reserved[2]; /* Reserved */
1242 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1243 
1244 /* BOOT constants */
1245 enum {
1246 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1247 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1248 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1249 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1250 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1251 	VENDOR_ID = 0x1425, /* Vendor ID */
1252 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1253 };
1254 
1255 /*
1256  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1257  *	@adatper: the device ID to write.
1258  *	@boot_data: the boot image to modify.
1259  *
1260  *	Write the supplied device ID to the boot BIOS image.
1261  */
1262 static void modify_device_id(int device_id, u8 *boot_data)
1263 {
1264 	legacy_pci_exp_rom_header_t *header;
1265 	pcir_data_t *pcir_header;
1266 	u32 cur_header = 0;
1267 
1268 	/*
1269 	 * Loop through all chained images and change the device ID's
1270 	 */
1271 	while (1) {
1272 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1273 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1274 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1275 
1276 		/*
1277 		 * Only modify the Device ID if code type is Legacy or HP.
1278 		 * 0x00: Okay to modify
1279 		 * 0x01: FCODE. Do not be modify
1280 		 * 0x03: Okay to modify
1281 		 * 0x04-0xFF: Do not modify
1282 		 */
1283 		if (pcir_header->code_type == 0x00) {
1284 			u8 csum = 0;
1285 			int i;
1286 
1287 			/*
1288 			 * Modify Device ID to match current adatper
1289 			 */
1290 			*(u16*) pcir_header->device_id = device_id;
1291 
1292 			/*
1293 			 * Set checksum temporarily to 0.
1294 			 * We will recalculate it later.
1295 			 */
1296 			header->cksum = 0x0;
1297 
1298 			/*
1299 			 * Calculate and update checksum
1300 			 */
1301 			for (i = 0; i < (header->size512 * 512); i++)
1302 				csum += (u8)boot_data[cur_header + i];
1303 
1304 			/*
1305 			 * Invert summed value to create the checksum
1306 			 * Writing new checksum value directly to the boot data
1307 			 */
1308 			boot_data[cur_header + 7] = -csum;
1309 
1310 		} else if (pcir_header->code_type == 0x03) {
1311 
1312 			/*
1313 			 * Modify Device ID to match current adatper
1314 			 */
1315 			*(u16*) pcir_header->device_id = device_id;
1316 
1317 		}
1318 
1319 
1320 		/*
1321 		 * Check indicator element to identify if this is the last
1322 		 * image in the ROM.
1323 		 */
1324 		if (pcir_header->indicator & 0x80)
1325 			break;
1326 
1327 		/*
1328 		 * Move header pointer up to the next image in the ROM.
1329 		 */
1330 		cur_header += header->size512 * 512;
1331 	}
1332 }
1333 
1334 /*
1335  *	t4_load_boot - download boot flash
1336  *	@adapter: the adapter
1337  *	@boot_data: the boot image to write
1338  *	@boot_addr: offset in flash to write boot_data
1339  *	@size: image size
1340  *
1341  *	Write the supplied boot image to the card's serial flash.
1342  *	The boot image has the following sections: a 28-byte header and the
1343  *	boot image.
1344  */
1345 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1346 		 unsigned int boot_addr, unsigned int size)
1347 {
1348 	pci_exp_rom_header_t *header;
1349 	int pcir_offset ;
1350 	pcir_data_t *pcir_header;
1351 	int ret, addr;
1352 	uint16_t device_id;
1353 	unsigned int i;
1354 	unsigned int boot_sector = boot_addr * 1024;
1355 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1356 
1357 	/*
1358 	 * Make sure the boot image does not encroach on the firmware region
1359 	 */
1360 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1361 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1362 		return -EFBIG;
1363 	}
1364 
1365 	/*
1366 	 * Number of sectors spanned
1367 	 */
1368 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1369 			sf_sec_size);
1370 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1371 				     (boot_sector >> 16) + i - 1);
1372 
1373 	/*
1374 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1375 	 * with the on-adapter option ROM file
1376 	 */
1377 	if (ret || (size == 0))
1378 		goto out;
1379 
1380 	/* Get boot header */
1381 	header = (pci_exp_rom_header_t *)boot_data;
1382 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1383 	/* PCIR Data Structure */
1384 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1385 
1386 	/*
1387 	 * Perform some primitive sanity testing to avoid accidentally
1388 	 * writing garbage over the boot sectors.  We ought to check for
1389 	 * more but it's not worth it for now ...
1390 	 */
1391 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1392 		CH_ERR(adap, "boot image too small/large\n");
1393 		return -EFBIG;
1394 	}
1395 
1396 	/*
1397 	 * Check BOOT ROM header signature
1398 	 */
1399 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1400 		CH_ERR(adap, "Boot image missing signature\n");
1401 		return -EINVAL;
1402 	}
1403 
1404 	/*
1405 	 * Check PCI header signature
1406 	 */
1407 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1408 		CH_ERR(adap, "PCI header missing signature\n");
1409 		return -EINVAL;
1410 	}
1411 
1412 	/*
1413 	 * Check Vendor ID matches Chelsio ID
1414 	 */
1415 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1416 		CH_ERR(adap, "Vendor ID missing signature\n");
1417 		return -EINVAL;
1418 	}
1419 
1420 	/*
1421 	 * Retrieve adapter's device ID
1422 	 */
1423 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1424 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1425 	device_id = (device_id & 0xff) | 0x4000;
1426 
1427 	/*
1428 	 * Check PCIE Device ID
1429 	 */
1430 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1431 		/*
1432 		 * Change the device ID in the Boot BIOS image to match
1433 		 * the Device ID of the current adapter.
1434 		 */
1435 		modify_device_id(device_id, boot_data);
1436 	}
1437 
1438 	/*
1439 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1440 	 * we finish copying the rest of the boot image. This will ensure
1441 	 * that the BIOS boot header will only be written if the boot image
1442 	 * was written in full.
1443 	 */
1444 	addr = boot_sector;
1445 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1446 		addr += SF_PAGE_SIZE;
1447 		boot_data += SF_PAGE_SIZE;
1448 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1449 		if (ret)
1450 			goto out;
1451 	}
1452 
1453 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1454 
1455 out:
1456 	if (ret)
1457 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1458 	return ret;
1459 }
1460 
1461 /**
1462  *	t4_read_cimq_cfg - read CIM queue configuration
1463  *	@adap: the adapter
1464  *	@base: holds the queue base addresses in bytes
1465  *	@size: holds the queue sizes in bytes
1466  *	@thres: holds the queue full thresholds in bytes
1467  *
1468  *	Returns the current configuration of the CIM queues, starting with
1469  *	the IBQs, then the OBQs.
1470  */
1471 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1472 {
1473 	unsigned int i, v;
1474 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1475 
1476 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1477 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1478 			     V_QUENUMSELECT(i));
1479 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1480 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1481 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1482 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1483 	}
1484 	for (i = 0; i < cim_num_obq; i++) {
1485 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1486 			     V_QUENUMSELECT(i));
1487 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1488 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1489 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1490 	}
1491 }
1492 
1493 /**
1494  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1495  *	@adap: the adapter
1496  *	@qid: the queue index
1497  *	@data: where to store the queue contents
1498  *	@n: capacity of @data in 32-bit words
1499  *
1500  *	Reads the contents of the selected CIM queue starting at address 0 up
1501  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1502  *	error and the number of 32-bit words actually read on success.
1503  */
1504 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1505 {
1506 	int i, err;
1507 	unsigned int addr;
1508 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1509 
1510 	if (qid > 5 || (n & 3))
1511 		return -EINVAL;
1512 
1513 	addr = qid * nwords;
1514 	if (n > nwords)
1515 		n = nwords;
1516 
1517 	for (i = 0; i < n; i++, addr++) {
1518 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1519 			     F_IBQDBGEN);
1520 		/*
1521 		 * It might take 3-10ms before the IBQ debug read access is
1522 		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1523 		 */
1524 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1525 				      1000000, 1);
1526 		if (err)
1527 			return err;
1528 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1529 	}
1530 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1531 	return i;
1532 }
1533 
1534 /**
1535  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1536  *	@adap: the adapter
1537  *	@qid: the queue index
1538  *	@data: where to store the queue contents
1539  *	@n: capacity of @data in 32-bit words
1540  *
1541  *	Reads the contents of the selected CIM queue starting at address 0 up
1542  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1543  *	error and the number of 32-bit words actually read on success.
1544  */
1545 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1546 {
1547 	int i, err;
1548 	unsigned int addr, v, nwords;
1549 	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1550 
1551 	if (qid >= cim_num_obq || (n & 3))
1552 		return -EINVAL;
1553 
1554 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1555 		     V_QUENUMSELECT(qid));
1556 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1557 
1558 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1559 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1560 	if (n > nwords)
1561 		n = nwords;
1562 
1563 	for (i = 0; i < n; i++, addr++) {
1564 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1565 			     F_OBQDBGEN);
1566 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1567 				      2, 1);
1568 		if (err)
1569 			return err;
1570 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1571 	}
1572 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1573 	return i;
1574 }
1575 
1576 enum {
1577 	CIM_QCTL_BASE     = 0,
1578 	CIM_CTL_BASE      = 0x2000,
1579 	CIM_PBT_ADDR_BASE = 0x2800,
1580 	CIM_PBT_LRF_BASE  = 0x3000,
1581 	CIM_PBT_DATA_BASE = 0x3800
1582 };
1583 
1584 /**
1585  *	t4_cim_read - read a block from CIM internal address space
1586  *	@adap: the adapter
1587  *	@addr: the start address within the CIM address space
1588  *	@n: number of words to read
1589  *	@valp: where to store the result
1590  *
1591  *	Reads a block of 4-byte words from the CIM intenal address space.
1592  */
1593 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1594 		unsigned int *valp)
1595 {
1596 	int ret = 0;
1597 
1598 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1599 		return -EBUSY;
1600 
1601 	for ( ; !ret && n--; addr += 4) {
1602 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1603 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1604 				      0, 5, 2);
1605 		if (!ret)
1606 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1607 	}
1608 	return ret;
1609 }
1610 
1611 /**
1612  *	t4_cim_write - write a block into CIM internal address space
1613  *	@adap: the adapter
1614  *	@addr: the start address within the CIM address space
1615  *	@n: number of words to write
1616  *	@valp: set of values to write
1617  *
1618  *	Writes a block of 4-byte words into the CIM intenal address space.
1619  */
1620 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1621 		 const unsigned int *valp)
1622 {
1623 	int ret = 0;
1624 
1625 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1626 		return -EBUSY;
1627 
1628 	for ( ; !ret && n--; addr += 4) {
1629 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1630 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1631 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1632 				      0, 5, 2);
1633 	}
1634 	return ret;
1635 }
1636 
1637 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1638 {
1639 	return t4_cim_write(adap, addr, 1, &val);
1640 }
1641 
1642 /**
1643  *	t4_cim_ctl_read - read a block from CIM control region
1644  *	@adap: the adapter
1645  *	@addr: the start address within the CIM control region
1646  *	@n: number of words to read
1647  *	@valp: where to store the result
1648  *
1649  *	Reads a block of 4-byte words from the CIM control region.
1650  */
1651 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1652 		    unsigned int *valp)
1653 {
1654 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1655 }
1656 
1657 /**
1658  *	t4_cim_read_la - read CIM LA capture buffer
1659  *	@adap: the adapter
1660  *	@la_buf: where to store the LA data
1661  *	@wrptr: the HW write pointer within the capture buffer
1662  *
1663  *	Reads the contents of the CIM LA buffer with the most recent entry at
1664  *	the end	of the returned data and with the entry at @wrptr first.
1665  *	We try to leave the LA in the running state we find it in.
1666  */
1667 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1668 {
1669 	int i, ret;
1670 	unsigned int cfg, val, idx;
1671 
1672 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1673 	if (ret)
1674 		return ret;
1675 
1676 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1677 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1678 		if (ret)
1679 			return ret;
1680 	}
1681 
1682 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1683 	if (ret)
1684 		goto restart;
1685 
1686 	idx = G_UPDBGLAWRPTR(val);
1687 	if (wrptr)
1688 		*wrptr = idx;
1689 
1690 	for (i = 0; i < adap->params.cim_la_size; i++) {
1691 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1692 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1693 		if (ret)
1694 			break;
1695 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1696 		if (ret)
1697 			break;
1698 		if (val & F_UPDBGLARDEN) {
1699 			ret = -ETIMEDOUT;
1700 			break;
1701 		}
1702 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1703 		if (ret)
1704 			break;
1705 		idx = (idx + 1) & M_UPDBGLARDPTR;
1706 	}
1707 restart:
1708 	if (cfg & F_UPDBGLAEN) {
1709 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1710 				      cfg & ~F_UPDBGLARDEN);
1711 		if (!ret)
1712 			ret = r;
1713 	}
1714 	return ret;
1715 }
1716 
1717 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1718 			unsigned int *pif_req_wrptr,
1719 			unsigned int *pif_rsp_wrptr)
1720 {
1721 	int i, j;
1722 	u32 cfg, val, req, rsp;
1723 
1724 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1725 	if (cfg & F_LADBGEN)
1726 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1727 
1728 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1729 	req = G_POLADBGWRPTR(val);
1730 	rsp = G_PILADBGWRPTR(val);
1731 	if (pif_req_wrptr)
1732 		*pif_req_wrptr = req;
1733 	if (pif_rsp_wrptr)
1734 		*pif_rsp_wrptr = rsp;
1735 
1736 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1737 		for (j = 0; j < 6; j++) {
1738 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1739 				     V_PILADBGRDPTR(rsp));
1740 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1741 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1742 			req++;
1743 			rsp++;
1744 		}
1745 		req = (req + 2) & M_POLADBGRDPTR;
1746 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1747 	}
1748 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1749 }
1750 
1751 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1752 {
1753 	u32 cfg;
1754 	int i, j, idx;
1755 
1756 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1757 	if (cfg & F_LADBGEN)
1758 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1759 
1760 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1761 		for (j = 0; j < 5; j++) {
1762 			idx = 8 * i + j;
1763 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1764 				     V_PILADBGRDPTR(idx));
1765 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1766 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1767 		}
1768 	}
1769 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1770 }
1771 
1772 /**
1773  *	t4_tp_read_la - read TP LA capture buffer
1774  *	@adap: the adapter
1775  *	@la_buf: where to store the LA data
1776  *	@wrptr: the HW write pointer within the capture buffer
1777  *
1778  *	Reads the contents of the TP LA buffer with the most recent entry at
1779  *	the end	of the returned data and with the entry at @wrptr first.
1780  *	We leave the LA in the running state we find it in.
1781  */
1782 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1783 {
1784 	bool last_incomplete;
1785 	unsigned int i, cfg, val, idx;
1786 
1787 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1788 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1789 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1790 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1791 
1792 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1793 	idx = G_DBGLAWPTR(val);
1794 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1795 	if (last_incomplete)
1796 		idx = (idx + 1) & M_DBGLARPTR;
1797 	if (wrptr)
1798 		*wrptr = idx;
1799 
1800 	val &= 0xffff;
1801 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1802 	val |= adap->params.tp.la_mask;
1803 
1804 	for (i = 0; i < TPLA_SIZE; i++) {
1805 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1806 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1807 		idx = (idx + 1) & M_DBGLARPTR;
1808 	}
1809 
1810 	/* Wipe out last entry if it isn't valid */
1811 	if (last_incomplete)
1812 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1813 
1814 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1815 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1816 			     cfg | adap->params.tp.la_mask);
1817 }
1818 
1819 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1820 {
1821 	unsigned int i, j;
1822 
1823 	for (i = 0; i < 8; i++) {
1824 		u32 *p = la_buf + i;
1825 
1826 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1827 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1828 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1829 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1830 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1831 	}
1832 }
1833 
1834 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1835 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1836 
1837 /**
1838  *	t4_link_start - apply link configuration to MAC/PHY
1839  *	@phy: the PHY to setup
1840  *	@mac: the MAC to setup
1841  *	@lc: the requested link configuration
1842  *
1843  *	Set up a port's MAC and PHY according to a desired link configuration.
1844  *	- If the PHY can auto-negotiate first decide what to advertise, then
1845  *	  enable/disable auto-negotiation as desired, and reset.
1846  *	- If the PHY does not auto-negotiate just reset it.
1847  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1848  *	  otherwise do it later based on the outcome of auto-negotiation.
1849  */
1850 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1851 		  struct link_config *lc)
1852 {
1853 	struct fw_port_cmd c;
1854 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1855 
1856 	lc->link_ok = 0;
1857 	if (lc->requested_fc & PAUSE_RX)
1858 		fc |= FW_PORT_CAP_FC_RX;
1859 	if (lc->requested_fc & PAUSE_TX)
1860 		fc |= FW_PORT_CAP_FC_TX;
1861 
1862 	memset(&c, 0, sizeof(c));
1863 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1864 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1865 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1866 				  FW_LEN16(c));
1867 
1868 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1869 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1870 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1871 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1872 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1873 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1874 	} else
1875 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1876 
1877 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1878 }
1879 
1880 /**
1881  *	t4_restart_aneg - restart autonegotiation
1882  *	@adap: the adapter
1883  *	@mbox: mbox to use for the FW command
1884  *	@port: the port id
1885  *
1886  *	Restarts autonegotiation for the selected port.
1887  */
1888 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1889 {
1890 	struct fw_port_cmd c;
1891 
1892 	memset(&c, 0, sizeof(c));
1893 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1894 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1895 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1896 				  FW_LEN16(c));
1897 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1898 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1899 }
1900 
1901 struct intr_info {
1902 	unsigned int mask;       /* bits to check in interrupt status */
1903 	const char *msg;         /* message to print or NULL */
1904 	short stat_idx;          /* stat counter to increment or -1 */
1905 	unsigned short fatal;    /* whether the condition reported is fatal */
1906 };
1907 
1908 /**
1909  *	t4_handle_intr_status - table driven interrupt handler
1910  *	@adapter: the adapter that generated the interrupt
1911  *	@reg: the interrupt status register to process
1912  *	@acts: table of interrupt actions
1913  *
1914  *	A table driven interrupt handler that applies a set of masks to an
1915  *	interrupt status word and performs the corresponding actions if the
1916  *	interrupts described by the mask have occured.  The actions include
1917  *	optionally emitting a warning or alert message.  The table is terminated
1918  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1919  *	conditions.
1920  */
1921 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1922 				 const struct intr_info *acts)
1923 {
1924 	int fatal = 0;
1925 	unsigned int mask = 0;
1926 	unsigned int status = t4_read_reg(adapter, reg);
1927 
1928 	for ( ; acts->mask; ++acts) {
1929 		if (!(status & acts->mask))
1930 			continue;
1931 		if (acts->fatal) {
1932 			fatal++;
1933 			CH_ALERT(adapter, "%s (0x%x)\n",
1934 				 acts->msg, status & acts->mask);
1935 		} else if (acts->msg)
1936 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1937 					  acts->msg, status & acts->mask);
1938 		mask |= acts->mask;
1939 	}
1940 	status &= mask;
1941 	if (status)                           /* clear processed interrupts */
1942 		t4_write_reg(adapter, reg, status);
1943 	return fatal;
1944 }
1945 
1946 /*
1947  * Interrupt handler for the PCIE module.
1948  */
1949 static void pcie_intr_handler(struct adapter *adapter)
1950 {
1951 	static struct intr_info sysbus_intr_info[] = {
1952 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1953 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1954 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1955 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1956 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1957 		{ 0 }
1958 	};
1959 	static struct intr_info pcie_port_intr_info[] = {
1960 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1961 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1962 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1963 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1964 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1965 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1966 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1967 		{ F_RDPE, "Rx data parity error", -1, 1 },
1968 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1969 		{ 0 }
1970 	};
1971 	static struct intr_info pcie_intr_info[] = {
1972 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1973 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1974 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1975 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1976 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1977 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1978 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1979 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1980 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1981 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1982 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1983 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1984 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1985 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1986 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1987 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1988 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1989 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1990 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1991 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1992 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1993 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1994 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1995 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1996 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1997 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1998 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1999 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2000 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2001 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2002 		  0 },
2003 		{ 0 }
2004 	};
2005 
2006 	static struct intr_info t5_pcie_intr_info[] = {
2007 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2008 		  -1, 1 },
2009 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2010 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2011 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2012 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2013 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2014 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2015 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2016 		  -1, 1 },
2017 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2018 		  -1, 1 },
2019 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2020 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2021 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2022 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2023 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2024 		  -1, 1 },
2025 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2026 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2027 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2028 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2029 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2030 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2031 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2032 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2033 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2034 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2035 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2036 		  -1, 1 },
2037 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2038 		  -1, 1 },
2039 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2040 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2041 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2042 		{ F_READRSPERR, "Outbound read error", -1,
2043 		  0 },
2044 		{ 0 }
2045 	};
2046 
2047 	int fat;
2048 
2049 	fat = t4_handle_intr_status(adapter,
2050 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2051 				    sysbus_intr_info) +
2052 	      t4_handle_intr_status(adapter,
2053 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2054 				    pcie_port_intr_info) +
2055 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2056 				    is_t4(adapter) ?
2057 				    pcie_intr_info : t5_pcie_intr_info);
2058 	if (fat)
2059 		t4_fatal_err(adapter);
2060 }
2061 
2062 /*
2063  * TP interrupt handler.
2064  */
2065 static void tp_intr_handler(struct adapter *adapter)
2066 {
2067 	static struct intr_info tp_intr_info[] = {
2068 		{ 0x3fffffff, "TP parity error", -1, 1 },
2069 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2070 		{ 0 }
2071 	};
2072 
2073 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2074 		t4_fatal_err(adapter);
2075 }
2076 
2077 /*
2078  * SGE interrupt handler.
2079  */
2080 static void sge_intr_handler(struct adapter *adapter)
2081 {
2082 	u64 v;
2083 	u32 err;
2084 
2085 	static struct intr_info sge_intr_info[] = {
2086 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2087 		  "SGE received CPL exceeding IQE size", -1, 1 },
2088 		{ F_ERR_INVALID_CIDX_INC,
2089 		  "SGE GTS CIDX increment too large", -1, 0 },
2090 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2091 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2092 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2093 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2094 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2095 		  0 },
2096 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2097 		  0 },
2098 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2099 		  0 },
2100 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2101 		  0 },
2102 		{ F_ERR_ING_CTXT_PRIO,
2103 		  "SGE too many priority ingress contexts", -1, 0 },
2104 		{ F_ERR_EGR_CTXT_PRIO,
2105 		  "SGE too many priority egress contexts", -1, 0 },
2106 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2107 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2108 		{ 0 }
2109 	};
2110 
2111 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2112 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2113 	if (v) {
2114 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2115 			 (unsigned long long)v);
2116 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2117 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2118 	}
2119 
2120 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2121 
2122 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2123 	if (err & F_ERROR_QID_VALID) {
2124 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2125 		if (err & F_UNCAPTURED_ERROR)
2126 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2127 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2128 			     F_UNCAPTURED_ERROR);
2129 	}
2130 
2131 	if (v != 0)
2132 		t4_fatal_err(adapter);
2133 }
2134 
2135 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2136 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2137 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2138 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2139 
2140 /*
2141  * CIM interrupt handler.
2142  */
2143 static void cim_intr_handler(struct adapter *adapter)
2144 {
2145 	static struct intr_info cim_intr_info[] = {
2146 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2147 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2148 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2149 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2150 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2151 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2152 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2153 		{ 0 }
2154 	};
2155 	static struct intr_info cim_upintr_info[] = {
2156 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2157 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2158 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2159 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2160 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2161 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2162 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2163 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2164 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2165 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2166 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2167 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2168 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2169 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2170 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2171 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2172 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2173 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2174 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2175 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2176 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2177 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2178 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2179 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2180 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2181 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2182 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2183 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2184 		{ 0 }
2185 	};
2186 	int fat;
2187 
2188 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2189 		t4_report_fw_error(adapter);
2190 
2191 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2192 				    cim_intr_info) +
2193 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2194 				    cim_upintr_info);
2195 	if (fat)
2196 		t4_fatal_err(adapter);
2197 }
2198 
2199 /*
2200  * ULP RX interrupt handler.
2201  */
2202 static void ulprx_intr_handler(struct adapter *adapter)
2203 {
2204 	static struct intr_info ulprx_intr_info[] = {
2205 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2206 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2207 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2208 		{ 0 }
2209 	};
2210 
2211 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2212 		t4_fatal_err(adapter);
2213 }
2214 
2215 /*
2216  * ULP TX interrupt handler.
2217  */
2218 static void ulptx_intr_handler(struct adapter *adapter)
2219 {
2220 	static struct intr_info ulptx_intr_info[] = {
2221 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2222 		  0 },
2223 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2224 		  0 },
2225 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2226 		  0 },
2227 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2228 		  0 },
2229 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2230 		{ 0 }
2231 	};
2232 
2233 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2234 		t4_fatal_err(adapter);
2235 }
2236 
2237 /*
2238  * PM TX interrupt handler.
2239  */
2240 static void pmtx_intr_handler(struct adapter *adapter)
2241 {
2242 	static struct intr_info pmtx_intr_info[] = {
2243 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2244 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2245 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2246 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2247 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2248 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2249 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2250 		  1 },
2251 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2252 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2253 		{ 0 }
2254 	};
2255 
2256 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2257 		t4_fatal_err(adapter);
2258 }
2259 
2260 /*
2261  * PM RX interrupt handler.
2262  */
2263 static void pmrx_intr_handler(struct adapter *adapter)
2264 {
2265 	static struct intr_info pmrx_intr_info[] = {
2266 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2267 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2268 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2269 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2270 		  1 },
2271 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2272 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2273 		{ 0 }
2274 	};
2275 
2276 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2277 		t4_fatal_err(adapter);
2278 }
2279 
2280 /*
2281  * CPL switch interrupt handler.
2282  */
2283 static void cplsw_intr_handler(struct adapter *adapter)
2284 {
2285 	static struct intr_info cplsw_intr_info[] = {
2286 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2287 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2288 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2289 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2290 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2291 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2292 		{ 0 }
2293 	};
2294 
2295 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2296 		t4_fatal_err(adapter);
2297 }
2298 
2299 /*
2300  * LE interrupt handler.
2301  */
2302 static void le_intr_handler(struct adapter *adap)
2303 {
2304 	static struct intr_info le_intr_info[] = {
2305 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2306 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2307 		{ F_PARITYERR, "LE parity error", -1, 1 },
2308 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2309 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2310 		{ 0 }
2311 	};
2312 
2313 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2314 		t4_fatal_err(adap);
2315 }
2316 
2317 /*
2318  * MPS interrupt handler.
2319  */
2320 static void mps_intr_handler(struct adapter *adapter)
2321 {
2322 	static struct intr_info mps_rx_intr_info[] = {
2323 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2324 		{ 0 }
2325 	};
2326 	static struct intr_info mps_tx_intr_info[] = {
2327 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2328 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2329 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2330 		  -1, 1 },
2331 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2332 		  -1, 1 },
2333 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2334 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2335 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2336 		{ 0 }
2337 	};
2338 	static struct intr_info mps_trc_intr_info[] = {
2339 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2340 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2341 		  1 },
2342 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2343 		{ 0 }
2344 	};
2345 	static struct intr_info mps_stat_sram_intr_info[] = {
2346 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2347 		{ 0 }
2348 	};
2349 	static struct intr_info mps_stat_tx_intr_info[] = {
2350 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2351 		{ 0 }
2352 	};
2353 	static struct intr_info mps_stat_rx_intr_info[] = {
2354 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2355 		{ 0 }
2356 	};
2357 	static struct intr_info mps_cls_intr_info[] = {
2358 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2359 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2360 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2361 		{ 0 }
2362 	};
2363 
2364 	int fat;
2365 
2366 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2367 				    mps_rx_intr_info) +
2368 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2369 				    mps_tx_intr_info) +
2370 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2371 				    mps_trc_intr_info) +
2372 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2373 				    mps_stat_sram_intr_info) +
2374 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2375 				    mps_stat_tx_intr_info) +
2376 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2377 				    mps_stat_rx_intr_info) +
2378 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2379 				    mps_cls_intr_info);
2380 
2381 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2382 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2383 	if (fat)
2384 		t4_fatal_err(adapter);
2385 }
2386 
2387 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2388 
2389 /*
2390  * EDC/MC interrupt handler.
2391  */
2392 static void mem_intr_handler(struct adapter *adapter, int idx)
2393 {
2394 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2395 
2396 	unsigned int addr, cnt_addr, v;
2397 
2398 	if (idx <= MEM_EDC1) {
2399 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2400 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2401 	} else {
2402 		addr = A_MC_INT_CAUSE;
2403 		cnt_addr = A_MC_ECC_STATUS;
2404 	}
2405 
2406 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2407 	if (v & F_PERR_INT_CAUSE)
2408 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2409 	if (v & F_ECC_CE_INT_CAUSE) {
2410 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2411 
2412 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2413 		CH_WARN_RATELIMIT(adapter,
2414 				  "%u %s correctable ECC data error%s\n",
2415 				  cnt, name[idx], cnt > 1 ? "s" : "");
2416 	}
2417 	if (v & F_ECC_UE_INT_CAUSE)
2418 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2419 			 name[idx]);
2420 
2421 	t4_write_reg(adapter, addr, v);
2422 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2423 		t4_fatal_err(adapter);
2424 }
2425 
2426 /*
2427  * MA interrupt handler.
2428  */
2429 static void ma_intr_handler(struct adapter *adapter)
2430 {
2431 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2432 
2433 	if (status & F_MEM_PERR_INT_CAUSE)
2434 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2435 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2436 	if (status & F_MEM_WRAP_INT_CAUSE) {
2437 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2438 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2439 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2440 			 G_MEM_WRAP_ADDRESS(v) << 4);
2441 	}
2442 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2443 	t4_fatal_err(adapter);
2444 }
2445 
2446 /*
2447  * SMB interrupt handler.
2448  */
2449 static void smb_intr_handler(struct adapter *adap)
2450 {
2451 	static struct intr_info smb_intr_info[] = {
2452 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2453 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2454 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2455 		{ 0 }
2456 	};
2457 
2458 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2459 		t4_fatal_err(adap);
2460 }
2461 
2462 /*
2463  * NC-SI interrupt handler.
2464  */
2465 static void ncsi_intr_handler(struct adapter *adap)
2466 {
2467 	static struct intr_info ncsi_intr_info[] = {
2468 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2469 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2470 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2471 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2472 		{ 0 }
2473 	};
2474 
2475 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2476 		t4_fatal_err(adap);
2477 }
2478 
2479 /*
2480  * XGMAC interrupt handler.
2481  */
2482 static void xgmac_intr_handler(struct adapter *adap, int port)
2483 {
2484 	u32 v, int_cause_reg;
2485 
2486 	if (is_t4(adap))
2487 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2488 	else
2489 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2490 
2491 	v = t4_read_reg(adap, int_cause_reg);
2492 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2493 	if (!v)
2494 		return;
2495 
2496 	if (v & F_TXFIFO_PRTY_ERR)
2497 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2498 	if (v & F_RXFIFO_PRTY_ERR)
2499 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2500 	t4_write_reg(adap, int_cause_reg, v);
2501 	t4_fatal_err(adap);
2502 }
2503 
2504 /*
2505  * PL interrupt handler.
2506  */
2507 static void pl_intr_handler(struct adapter *adap)
2508 {
2509 	static struct intr_info pl_intr_info[] = {
2510 		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
2511 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2512 		{ 0 }
2513 	};
2514 
2515 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2516 		t4_fatal_err(adap);
2517 }
2518 
2519 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2520 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2521 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2522 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2523 
2524 /**
2525  *	t4_slow_intr_handler - control path interrupt handler
2526  *	@adapter: the adapter
2527  *
2528  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2529  *	The designation 'slow' is because it involves register reads, while
2530  *	data interrupts typically don't involve any MMIOs.
2531  */
2532 int t4_slow_intr_handler(struct adapter *adapter)
2533 {
2534 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2535 
2536 	if (!(cause & GLBL_INTR_MASK))
2537 		return 0;
2538 	if (cause & F_CIM)
2539 		cim_intr_handler(adapter);
2540 	if (cause & F_MPS)
2541 		mps_intr_handler(adapter);
2542 	if (cause & F_NCSI)
2543 		ncsi_intr_handler(adapter);
2544 	if (cause & F_PL)
2545 		pl_intr_handler(adapter);
2546 	if (cause & F_SMB)
2547 		smb_intr_handler(adapter);
2548 	if (cause & F_XGMAC0)
2549 		xgmac_intr_handler(adapter, 0);
2550 	if (cause & F_XGMAC1)
2551 		xgmac_intr_handler(adapter, 1);
2552 	if (cause & F_XGMAC_KR0)
2553 		xgmac_intr_handler(adapter, 2);
2554 	if (cause & F_XGMAC_KR1)
2555 		xgmac_intr_handler(adapter, 3);
2556 	if (cause & F_PCIE)
2557 		pcie_intr_handler(adapter);
2558 	if (cause & F_MC)
2559 		mem_intr_handler(adapter, MEM_MC);
2560 	if (cause & F_EDC0)
2561 		mem_intr_handler(adapter, MEM_EDC0);
2562 	if (cause & F_EDC1)
2563 		mem_intr_handler(adapter, MEM_EDC1);
2564 	if (cause & F_LE)
2565 		le_intr_handler(adapter);
2566 	if (cause & F_TP)
2567 		tp_intr_handler(adapter);
2568 	if (cause & F_MA)
2569 		ma_intr_handler(adapter);
2570 	if (cause & F_PM_TX)
2571 		pmtx_intr_handler(adapter);
2572 	if (cause & F_PM_RX)
2573 		pmrx_intr_handler(adapter);
2574 	if (cause & F_ULP_RX)
2575 		ulprx_intr_handler(adapter);
2576 	if (cause & F_CPL_SWITCH)
2577 		cplsw_intr_handler(adapter);
2578 	if (cause & F_SGE)
2579 		sge_intr_handler(adapter);
2580 	if (cause & F_ULP_TX)
2581 		ulptx_intr_handler(adapter);
2582 
2583 	/* Clear the interrupts just processed for which we are the master. */
2584 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2585 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2586 	return 1;
2587 }
2588 
2589 /**
2590  *	t4_intr_enable - enable interrupts
2591  *	@adapter: the adapter whose interrupts should be enabled
2592  *
2593  *	Enable PF-specific interrupts for the calling function and the top-level
2594  *	interrupt concentrator for global interrupts.  Interrupts are already
2595  *	enabled at each module,	here we just enable the roots of the interrupt
2596  *	hierarchies.
2597  *
2598  *	Note: this function should be called only when the driver manages
2599  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2600  *	function at a time should be doing this.
2601  */
2602 void t4_intr_enable(struct adapter *adapter)
2603 {
2604 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2605 
2606 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2607 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2608 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2609 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2610 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2611 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2612 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2613 		     F_EGRESS_SIZE_ERR);
2614 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2615 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2616 }
2617 
2618 /**
2619  *	t4_intr_disable - disable interrupts
2620  *	@adapter: the adapter whose interrupts should be disabled
2621  *
2622  *	Disable interrupts.  We only disable the top-level interrupt
2623  *	concentrators.  The caller must be a PCI function managing global
2624  *	interrupts.
2625  */
2626 void t4_intr_disable(struct adapter *adapter)
2627 {
2628 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2629 
2630 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2631 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2632 }
2633 
2634 /**
2635  *	t4_intr_clear - clear all interrupts
2636  *	@adapter: the adapter whose interrupts should be cleared
2637  *
2638  *	Clears all interrupts.  The caller must be a PCI function managing
2639  *	global interrupts.
2640  */
2641 void t4_intr_clear(struct adapter *adapter)
2642 {
2643 	static const unsigned int cause_reg[] = {
2644 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2645 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2646 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2647 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2648 		A_MC_INT_CAUSE,
2649 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2650 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2651 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2652 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2653 		A_TP_INT_CAUSE,
2654 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2655 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2656 		A_MPS_RX_PERR_INT_CAUSE,
2657 		A_CPL_INTR_CAUSE,
2658 		MYPF_REG(A_PL_PF_INT_CAUSE),
2659 		A_PL_PL_INT_CAUSE,
2660 		A_LE_DB_INT_CAUSE,
2661 	};
2662 
2663 	unsigned int i;
2664 
2665 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2666 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2667 
2668 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2669 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2670 }
2671 
2672 /**
2673  *	hash_mac_addr - return the hash value of a MAC address
2674  *	@addr: the 48-bit Ethernet MAC address
2675  *
2676  *	Hashes a MAC address according to the hash function used by HW inexact
2677  *	(hash) address matching.
2678  */
2679 static int hash_mac_addr(const u8 *addr)
2680 {
2681 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2682 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2683 	a ^= b;
2684 	a ^= (a >> 12);
2685 	a ^= (a >> 6);
2686 	return a & 0x3f;
2687 }
2688 
2689 /**
2690  *	t4_config_rss_range - configure a portion of the RSS mapping table
2691  *	@adapter: the adapter
2692  *	@mbox: mbox to use for the FW command
2693  *	@viid: virtual interface whose RSS subtable is to be written
2694  *	@start: start entry in the table to write
2695  *	@n: how many table entries to write
2696  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2697  *	@nrspq: number of values in @rspq
2698  *
2699  *	Programs the selected part of the VI's RSS mapping table with the
2700  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2701  *	until the full table range is populated.
2702  *
2703  *	The caller must ensure the values in @rspq are in the range allowed for
2704  *	@viid.
2705  */
2706 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2707 			int start, int n, const u16 *rspq, unsigned int nrspq)
2708 {
2709 	int ret;
2710 	const u16 *rsp = rspq;
2711 	const u16 *rsp_end = rspq + nrspq;
2712 	struct fw_rss_ind_tbl_cmd cmd;
2713 
2714 	memset(&cmd, 0, sizeof(cmd));
2715 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2716 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2717 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2718 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2719 
2720 
2721 	/*
2722 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2723 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2724 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2725 	 * reserved.
2726 	 */
2727 	while (n > 0) {
2728 		int nq = min(n, 32);
2729 		int nq_packed = 0;
2730 		__be32 *qp = &cmd.iq0_to_iq2;
2731 
2732 		/*
2733 		 * Set up the firmware RSS command header to send the next
2734 		 * "nq" Ingress Queue IDs to the firmware.
2735 		 */
2736 		cmd.niqid = htons(nq);
2737 		cmd.startidx = htons(start);
2738 
2739 		/*
2740 		 * "nq" more done for the start of the next loop.
2741 		 */
2742 		start += nq;
2743 		n -= nq;
2744 
2745 		/*
2746 		 * While there are still Ingress Queue IDs to stuff into the
2747 		 * current firmware RSS command, retrieve them from the
2748 		 * Ingress Queue ID array and insert them into the command.
2749 		 */
2750 		while (nq > 0) {
2751 			/*
2752 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2753 			 * around the Ingress Queue ID array if necessary) and
2754 			 * insert them into the firmware RSS command at the
2755 			 * current 3-tuple position within the commad.
2756 			 */
2757 			u16 qbuf[3];
2758 			u16 *qbp = qbuf;
2759 			int nqbuf = min(3, nq);
2760 
2761 			nq -= nqbuf;
2762 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2763 			while (nqbuf && nq_packed < 32) {
2764 				nqbuf--;
2765 				nq_packed++;
2766 				*qbp++ = *rsp++;
2767 				if (rsp >= rsp_end)
2768 					rsp = rspq;
2769 			}
2770 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2771 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2772 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2773 		}
2774 
2775 		/*
2776 		 * Send this portion of the RRS table update to the firmware;
2777 		 * bail out on any errors.
2778 		 */
2779 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2780 		if (ret)
2781 			return ret;
2782 	}
2783 
2784 	return 0;
2785 }
2786 
2787 /**
2788  *	t4_config_glbl_rss - configure the global RSS mode
2789  *	@adapter: the adapter
2790  *	@mbox: mbox to use for the FW command
2791  *	@mode: global RSS mode
2792  *	@flags: mode-specific flags
2793  *
2794  *	Sets the global RSS mode.
2795  */
2796 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2797 		       unsigned int flags)
2798 {
2799 	struct fw_rss_glb_config_cmd c;
2800 
2801 	memset(&c, 0, sizeof(c));
2802 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2803 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2804 	c.retval_len16 = htonl(FW_LEN16(c));
2805 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2806 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2807 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2808 		c.u.basicvirtual.mode_pkd =
2809 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2810 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2811 	} else
2812 		return -EINVAL;
2813 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2814 }
2815 
2816 /**
2817  *	t4_config_vi_rss - configure per VI RSS settings
2818  *	@adapter: the adapter
2819  *	@mbox: mbox to use for the FW command
2820  *	@viid: the VI id
2821  *	@flags: RSS flags
2822  *	@defq: id of the default RSS queue for the VI.
2823  *
2824  *	Configures VI-specific RSS properties.
2825  */
2826 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2827 		     unsigned int flags, unsigned int defq)
2828 {
2829 	struct fw_rss_vi_config_cmd c;
2830 
2831 	memset(&c, 0, sizeof(c));
2832 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2833 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2834 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2835 	c.retval_len16 = htonl(FW_LEN16(c));
2836 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2837 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2838 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2839 }
2840 
2841 /* Read an RSS table row */
2842 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2843 {
2844 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2845 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2846 				   5, 0, val);
2847 }
2848 
2849 /**
2850  *	t4_read_rss - read the contents of the RSS mapping table
2851  *	@adapter: the adapter
2852  *	@map: holds the contents of the RSS mapping table
2853  *
2854  *	Reads the contents of the RSS hash->queue mapping table.
2855  */
2856 int t4_read_rss(struct adapter *adapter, u16 *map)
2857 {
2858 	u32 val;
2859 	int i, ret;
2860 
2861 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2862 		ret = rd_rss_row(adapter, i, &val);
2863 		if (ret)
2864 			return ret;
2865 		*map++ = G_LKPTBLQUEUE0(val);
2866 		*map++ = G_LKPTBLQUEUE1(val);
2867 	}
2868 	return 0;
2869 }
2870 
2871 /**
2872  *	t4_read_rss_key - read the global RSS key
2873  *	@adap: the adapter
2874  *	@key: 10-entry array holding the 320-bit RSS key
2875  *
2876  *	Reads the global 320-bit RSS key.
2877  */
2878 void t4_read_rss_key(struct adapter *adap, u32 *key)
2879 {
2880 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2881 			 A_TP_RSS_SECRET_KEY0);
2882 }
2883 
2884 /**
2885  *	t4_write_rss_key - program one of the RSS keys
2886  *	@adap: the adapter
2887  *	@key: 10-entry array holding the 320-bit RSS key
2888  *	@idx: which RSS key to write
2889  *
2890  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2891  *	0..15 the corresponding entry in the RSS key table is written,
2892  *	otherwise the global RSS key is written.
2893  */
2894 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2895 {
2896 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2897 			  A_TP_RSS_SECRET_KEY0);
2898 	if (idx >= 0 && idx < 16)
2899 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2900 			     V_KEYWRADDR(idx) | F_KEYWREN);
2901 }
2902 
2903 /**
2904  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2905  *	@adapter: the adapter
2906  *	@index: the entry in the PF RSS table to read
2907  *	@valp: where to store the returned value
2908  *
2909  *	Reads the PF RSS Configuration Table at the specified index and returns
2910  *	the value found there.
2911  */
2912 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2913 {
2914 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2915 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2916 }
2917 
2918 /**
2919  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2920  *	@adapter: the adapter
2921  *	@index: the entry in the VF RSS table to read
2922  *	@val: the value to store
2923  *
2924  *	Writes the PF RSS Configuration Table at the specified index with the
2925  *	specified value.
2926  */
2927 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2928 {
2929 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2930 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2931 }
2932 
2933 /**
2934  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2935  *	@adapter: the adapter
2936  *	@index: the entry in the VF RSS table to read
2937  *	@vfl: where to store the returned VFL
2938  *	@vfh: where to store the returned VFH
2939  *
2940  *	Reads the VF RSS Configuration Table at the specified index and returns
2941  *	the (VFL, VFH) values found there.
2942  */
2943 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2944 			   u32 *vfl, u32 *vfh)
2945 {
2946 	u32 vrt;
2947 
2948 	/*
2949 	 * Request that the index'th VF Table values be read into VFL/VFH.
2950 	 */
2951 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2952 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2953 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2954 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2955 
2956 	/*
2957 	 * Grab the VFL/VFH values ...
2958 	 */
2959 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2960 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2961 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2962 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2963 }
2964 
2965 /**
2966  *	t4_write_rss_vf_config - write VF RSS Configuration Table
2967  *
2968  *	@adapter: the adapter
2969  *	@index: the entry in the VF RSS table to write
2970  *	@vfl: the VFL to store
2971  *	@vfh: the VFH to store
2972  *
2973  *	Writes the VF RSS Configuration Table at the specified index with the
2974  *	specified (VFL, VFH) values.
2975  */
2976 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2977 			    u32 vfl, u32 vfh)
2978 {
2979 	u32 vrt;
2980 
2981 	/*
2982 	 * Load up VFL/VFH with the values to be written ...
2983 	 */
2984 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2985 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
2986 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2987 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
2988 
2989 	/*
2990 	 * Write the VFL/VFH into the VF Table at index'th location.
2991 	 */
2992 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2993 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2994 	vrt |= V_VFWRADDR(index) | F_VFWREN;
2995 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2996 }
2997 
2998 /**
2999  *	t4_read_rss_pf_map - read PF RSS Map
3000  *	@adapter: the adapter
3001  *
3002  *	Reads the PF RSS Map register and returns its value.
3003  */
3004 u32 t4_read_rss_pf_map(struct adapter *adapter)
3005 {
3006 	u32 pfmap;
3007 
3008 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3009 			 &pfmap, 1, A_TP_RSS_PF_MAP);
3010 	return pfmap;
3011 }
3012 
3013 /**
3014  *	t4_write_rss_pf_map - write PF RSS Map
3015  *	@adapter: the adapter
3016  *	@pfmap: PF RSS Map value
3017  *
3018  *	Writes the specified value to the PF RSS Map register.
3019  */
3020 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3021 {
3022 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3023 			  &pfmap, 1, A_TP_RSS_PF_MAP);
3024 }
3025 
3026 /**
3027  *	t4_read_rss_pf_mask - read PF RSS Mask
3028  *	@adapter: the adapter
3029  *
3030  *	Reads the PF RSS Mask register and returns its value.
3031  */
3032 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3033 {
3034 	u32 pfmask;
3035 
3036 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3037 			 &pfmask, 1, A_TP_RSS_PF_MSK);
3038 	return pfmask;
3039 }
3040 
3041 /**
3042  *	t4_write_rss_pf_mask - write PF RSS Mask
3043  *	@adapter: the adapter
3044  *	@pfmask: PF RSS Mask value
3045  *
3046  *	Writes the specified value to the PF RSS Mask register.
3047  */
3048 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3049 {
3050 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3051 			  &pfmask, 1, A_TP_RSS_PF_MSK);
3052 }
3053 
3054 /**
3055  *	t4_set_filter_mode - configure the optional components of filter tuples
3056  *	@adap: the adapter
3057  *	@mode_map: a bitmap selcting which optional filter components to enable
3058  *
3059  *	Sets the filter mode by selecting the optional components to enable
3060  *	in filter tuples.  Returns 0 on success and a negative error if the
3061  *	requested mode needs more bits than are available for optional
3062  *	components.
3063  */
3064 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3065 {
3066 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3067 
3068 	int i, nbits = 0;
3069 
3070 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3071 		if (mode_map & (1 << i))
3072 			nbits += width[i];
3073 	if (nbits > FILTER_OPT_LEN)
3074 		return -EINVAL;
3075 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3076 			  A_TP_VLAN_PRI_MAP);
3077 	return 0;
3078 }
3079 
3080 /**
3081  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3082  *	@adap: the adapter
3083  *	@v4: holds the TCP/IP counter values
3084  *	@v6: holds the TCP/IPv6 counter values
3085  *
3086  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3087  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3088  */
3089 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3090 			 struct tp_tcp_stats *v6)
3091 {
3092 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3093 
3094 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3095 #define STAT(x)     val[STAT_IDX(x)]
3096 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3097 
3098 	if (v4) {
3099 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3100 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3101 		v4->tcpOutRsts = STAT(OUT_RST);
3102 		v4->tcpInSegs  = STAT64(IN_SEG);
3103 		v4->tcpOutSegs = STAT64(OUT_SEG);
3104 		v4->tcpRetransSegs = STAT64(RXT_SEG);
3105 	}
3106 	if (v6) {
3107 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3108 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3109 		v6->tcpOutRsts = STAT(OUT_RST);
3110 		v6->tcpInSegs  = STAT64(IN_SEG);
3111 		v6->tcpOutSegs = STAT64(OUT_SEG);
3112 		v6->tcpRetransSegs = STAT64(RXT_SEG);
3113 	}
3114 #undef STAT64
3115 #undef STAT
3116 #undef STAT_IDX
3117 }
3118 
3119 /**
3120  *	t4_tp_get_err_stats - read TP's error MIB counters
3121  *	@adap: the adapter
3122  *	@st: holds the counter values
3123  *
3124  *	Returns the values of TP's error counters.
3125  */
3126 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3127 {
3128 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3129 			 12, A_TP_MIB_MAC_IN_ERR_0);
3130 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3131 			 8, A_TP_MIB_TNL_CNG_DROP_0);
3132 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3133 			 4, A_TP_MIB_TNL_DROP_0);
3134 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3135 			 4, A_TP_MIB_OFD_VLN_DROP_0);
3136 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3137 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3138 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3139 			 2, A_TP_MIB_OFD_ARP_DROP);
3140 }
3141 
3142 /**
3143  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3144  *	@adap: the adapter
3145  *	@st: holds the counter values
3146  *
3147  *	Returns the values of TP's proxy counters.
3148  */
3149 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3150 {
3151 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3152 			 4, A_TP_MIB_TNL_LPBK_0);
3153 }
3154 
3155 /**
3156  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3157  *	@adap: the adapter
3158  *	@st: holds the counter values
3159  *
3160  *	Returns the values of TP's CPL counters.
3161  */
3162 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3163 {
3164 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3165 			 8, A_TP_MIB_CPL_IN_REQ_0);
3166 }
3167 
3168 /**
3169  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3170  *	@adap: the adapter
3171  *	@st: holds the counter values
3172  *
3173  *	Returns the values of TP's RDMA counters.
3174  */
3175 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3176 {
3177 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3178 			 2, A_TP_MIB_RQE_DFR_MOD);
3179 }
3180 
3181 /**
3182  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3183  *	@adap: the adapter
3184  *	@idx: the port index
3185  *	@st: holds the counter values
3186  *
3187  *	Returns the values of TP's FCoE counters for the selected port.
3188  */
3189 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3190 		       struct tp_fcoe_stats *st)
3191 {
3192 	u32 val[2];
3193 
3194 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3195 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3196 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3197 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3198 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3199 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3200 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3201 }
3202 
3203 /**
3204  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3205  *	@adap: the adapter
3206  *	@st: holds the counter values
3207  *
3208  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3209  */
3210 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3211 {
3212 	u32 val[4];
3213 
3214 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3215 			 A_TP_MIB_USM_PKTS);
3216 	st->frames = val[0];
3217 	st->drops = val[1];
3218 	st->octets = ((u64)val[2] << 32) | val[3];
3219 }
3220 
3221 /**
3222  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3223  *	@adap: the adapter
3224  *	@mtus: where to store the MTU values
3225  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3226  *
3227  *	Reads the HW path MTU table.
3228  */
3229 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3230 {
3231 	u32 v;
3232 	int i;
3233 
3234 	for (i = 0; i < NMTUS; ++i) {
3235 		t4_write_reg(adap, A_TP_MTU_TABLE,
3236 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3237 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3238 		mtus[i] = G_MTUVALUE(v);
3239 		if (mtu_log)
3240 			mtu_log[i] = G_MTUWIDTH(v);
3241 	}
3242 }
3243 
3244 /**
3245  *	t4_read_cong_tbl - reads the congestion control table
3246  *	@adap: the adapter
3247  *	@incr: where to store the alpha values
3248  *
3249  *	Reads the additive increments programmed into the HW congestion
3250  *	control table.
3251  */
3252 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3253 {
3254 	unsigned int mtu, w;
3255 
3256 	for (mtu = 0; mtu < NMTUS; ++mtu)
3257 		for (w = 0; w < NCCTRL_WIN; ++w) {
3258 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3259 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3260 			incr[mtu][w] = (u16)t4_read_reg(adap,
3261 						A_TP_CCTRL_TABLE) & 0x1fff;
3262 		}
3263 }
3264 
3265 /**
3266  *	t4_read_pace_tbl - read the pace table
3267  *	@adap: the adapter
3268  *	@pace_vals: holds the returned values
3269  *
3270  *	Returns the values of TP's pace table in microseconds.
3271  */
3272 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3273 {
3274 	unsigned int i, v;
3275 
3276 	for (i = 0; i < NTX_SCHED; i++) {
3277 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3278 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3279 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3280 	}
3281 }
3282 
3283 /**
3284  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3285  *	@adap: the adapter
3286  *	@addr: the indirect TP register address
3287  *	@mask: specifies the field within the register to modify
3288  *	@val: new value for the field
3289  *
3290  *	Sets a field of an indirect TP register to the given value.
3291  */
3292 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3293 			    unsigned int mask, unsigned int val)
3294 {
3295 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3296 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3297 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3298 }
3299 
3300 /**
3301  *	init_cong_ctrl - initialize congestion control parameters
3302  *	@a: the alpha values for congestion control
3303  *	@b: the beta values for congestion control
3304  *
3305  *	Initialize the congestion control parameters.
3306  */
3307 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3308 {
3309 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3310 	a[9] = 2;
3311 	a[10] = 3;
3312 	a[11] = 4;
3313 	a[12] = 5;
3314 	a[13] = 6;
3315 	a[14] = 7;
3316 	a[15] = 8;
3317 	a[16] = 9;
3318 	a[17] = 10;
3319 	a[18] = 14;
3320 	a[19] = 17;
3321 	a[20] = 21;
3322 	a[21] = 25;
3323 	a[22] = 30;
3324 	a[23] = 35;
3325 	a[24] = 45;
3326 	a[25] = 60;
3327 	a[26] = 80;
3328 	a[27] = 100;
3329 	a[28] = 200;
3330 	a[29] = 300;
3331 	a[30] = 400;
3332 	a[31] = 500;
3333 
3334 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3335 	b[9] = b[10] = 1;
3336 	b[11] = b[12] = 2;
3337 	b[13] = b[14] = b[15] = b[16] = 3;
3338 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3339 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3340 	b[28] = b[29] = 6;
3341 	b[30] = b[31] = 7;
3342 }
3343 
3344 /* The minimum additive increment value for the congestion control table */
3345 #define CC_MIN_INCR 2U
3346 
3347 /**
3348  *	t4_load_mtus - write the MTU and congestion control HW tables
3349  *	@adap: the adapter
3350  *	@mtus: the values for the MTU table
3351  *	@alpha: the values for the congestion control alpha parameter
3352  *	@beta: the values for the congestion control beta parameter
3353  *
3354  *	Write the HW MTU table with the supplied MTUs and the high-speed
3355  *	congestion control table with the supplied alpha, beta, and MTUs.
3356  *	We write the two tables together because the additive increments
3357  *	depend on the MTUs.
3358  */
3359 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3360 		  const unsigned short *alpha, const unsigned short *beta)
3361 {
3362 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3363 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3364 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3365 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3366 	};
3367 
3368 	unsigned int i, w;
3369 
3370 	for (i = 0; i < NMTUS; ++i) {
3371 		unsigned int mtu = mtus[i];
3372 		unsigned int log2 = fls(mtu);
3373 
3374 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3375 			log2--;
3376 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3377 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3378 
3379 		for (w = 0; w < NCCTRL_WIN; ++w) {
3380 			unsigned int inc;
3381 
3382 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3383 				  CC_MIN_INCR);
3384 
3385 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3386 				     (w << 16) | (beta[w] << 13) | inc);
3387 		}
3388 	}
3389 }
3390 
3391 /**
3392  *	t4_set_pace_tbl - set the pace table
3393  *	@adap: the adapter
3394  *	@pace_vals: the pace values in microseconds
3395  *	@start: index of the first entry in the HW pace table to set
3396  *	@n: how many entries to set
3397  *
3398  *	Sets (a subset of the) HW pace table.
3399  */
3400 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3401 		     unsigned int start, unsigned int n)
3402 {
3403 	unsigned int vals[NTX_SCHED], i;
3404 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3405 
3406 	if (n > NTX_SCHED)
3407 	    return -ERANGE;
3408 
3409 	/* convert values from us to dack ticks, rounding to closest value */
3410 	for (i = 0; i < n; i++, pace_vals++) {
3411 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3412 		if (vals[i] > 0x7ff)
3413 			return -ERANGE;
3414 		if (*pace_vals && vals[i] == 0)
3415 			return -ERANGE;
3416 	}
3417 	for (i = 0; i < n; i++, start++)
3418 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3419 	return 0;
3420 }
3421 
3422 /**
3423  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3424  *	@adap: the adapter
3425  *	@kbps: target rate in Kbps
3426  *	@sched: the scheduler index
3427  *
3428  *	Configure a Tx HW scheduler for the target rate.
3429  */
3430 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3431 {
3432 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3433 	unsigned int clk = adap->params.vpd.cclk * 1000;
3434 	unsigned int selected_cpt = 0, selected_bpt = 0;
3435 
3436 	if (kbps > 0) {
3437 		kbps *= 125;     /* -> bytes */
3438 		for (cpt = 1; cpt <= 255; cpt++) {
3439 			tps = clk / cpt;
3440 			bpt = (kbps + tps / 2) / tps;
3441 			if (bpt > 0 && bpt <= 255) {
3442 				v = bpt * tps;
3443 				delta = v >= kbps ? v - kbps : kbps - v;
3444 				if (delta < mindelta) {
3445 					mindelta = delta;
3446 					selected_cpt = cpt;
3447 					selected_bpt = bpt;
3448 				}
3449 			} else if (selected_cpt)
3450 				break;
3451 		}
3452 		if (!selected_cpt)
3453 			return -EINVAL;
3454 	}
3455 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3456 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3457 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3458 	if (sched & 1)
3459 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3460 	else
3461 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3462 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3463 	return 0;
3464 }
3465 
3466 /**
3467  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3468  *	@adap: the adapter
3469  *	@sched: the scheduler index
3470  *	@ipg: the interpacket delay in tenths of nanoseconds
3471  *
3472  *	Set the interpacket delay for a HW packet rate scheduler.
3473  */
3474 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3475 {
3476 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3477 
3478 	/* convert ipg to nearest number of core clocks */
3479 	ipg *= core_ticks_per_usec(adap);
3480 	ipg = (ipg + 5000) / 10000;
3481 	if (ipg > M_TXTIMERSEPQ0)
3482 		return -EINVAL;
3483 
3484 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3485 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3486 	if (sched & 1)
3487 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3488 	else
3489 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3490 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3491 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3492 	return 0;
3493 }
3494 
3495 /**
3496  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3497  *	@adap: the adapter
3498  *	@sched: the scheduler index
3499  *	@kbps: the byte rate in Kbps
3500  *	@ipg: the interpacket delay in tenths of nanoseconds
3501  *
3502  *	Return the current configuration of a HW Tx scheduler.
3503  */
3504 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3505 		     unsigned int *ipg)
3506 {
3507 	unsigned int v, addr, bpt, cpt;
3508 
3509 	if (kbps) {
3510 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3511 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3512 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3513 		if (sched & 1)
3514 			v >>= 16;
3515 		bpt = (v >> 8) & 0xff;
3516 		cpt = v & 0xff;
3517 		if (!cpt)
3518 			*kbps = 0;        /* scheduler disabled */
3519 		else {
3520 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3521 			*kbps = (v * bpt) / 125;
3522 		}
3523 	}
3524 	if (ipg) {
3525 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3526 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3527 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3528 		if (sched & 1)
3529 			v >>= 16;
3530 		v &= 0xffff;
3531 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3532 	}
3533 }
3534 
3535 /*
3536  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3537  * clocks.  The formula is
3538  *
3539  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3540  *
3541  * which is equivalent to
3542  *
3543  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3544  */
3545 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3546 {
3547 	u64 v = bytes256 * adap->params.vpd.cclk;
3548 
3549 	return v * 62 + v / 2;
3550 }
3551 
3552 /**
3553  *	t4_get_chan_txrate - get the current per channel Tx rates
3554  *	@adap: the adapter
3555  *	@nic_rate: rates for NIC traffic
3556  *	@ofld_rate: rates for offloaded traffic
3557  *
3558  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3559  *	for each channel.
3560  */
3561 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3562 {
3563 	u32 v;
3564 
3565 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3566 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3567 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3568 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3569 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3570 
3571 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3572 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3573 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3574 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3575 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3576 }
3577 
3578 /**
3579  *	t4_set_trace_filter - configure one of the tracing filters
3580  *	@adap: the adapter
3581  *	@tp: the desired trace filter parameters
3582  *	@idx: which filter to configure
3583  *	@enable: whether to enable or disable the filter
3584  *
3585  *	Configures one of the tracing filters available in HW.  If @enable is
3586  *	%0 @tp is not examined and may be %NULL. The user is responsible to
3587  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3588  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3589  *	docs/readme.txt for a complete description of how to setup traceing on
3590  *	T4.
3591  */
3592 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3593 			int enable)
3594 {
3595 	int i, ofst = idx * 4;
3596 	u32 data_reg, mask_reg, cfg;
3597 	u32 multitrc = F_TRCMULTIFILTER;
3598 
3599 	if (!enable) {
3600 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3601 		return 0;
3602 	}
3603 
3604 	/*
3605 	 * TODO - After T4 data book is updated, specify the exact
3606 	 * section below.
3607 	 *
3608 	 * See T4 data book - MPS section for a complete description
3609 	 * of the below if..else handling of A_MPS_TRC_CFG register
3610 	 * value.
3611 	 */
3612 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3613 	if (cfg & F_TRCMULTIFILTER) {
3614 		/*
3615 		 * If multiple tracers are enabled, then maximum
3616 		 * capture size is 2.5KB (FIFO size of a single channel)
3617 		 * minus 2 flits for CPL_TRACE_PKT header.
3618 		 */
3619 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3620 			return -EINVAL;
3621 	}
3622 	else {
3623 		/*
3624 		 * If multiple tracers are disabled, to avoid deadlocks
3625 		 * maximum packet capture size of 9600 bytes is recommended.
3626 		 * Also in this mode, only trace0 can be enabled and running.
3627 		 */
3628 		multitrc = 0;
3629 		if (tp->snap_len > 9600 || idx)
3630 			return -EINVAL;
3631 	}
3632 
3633 	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3634 	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3635 		return -EINVAL;
3636 
3637 	/* stop the tracer we'll be changing */
3638 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3639 
3640 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3641 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3642 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3643 
3644 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3645 		t4_write_reg(adap, data_reg, tp->data[i]);
3646 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3647 	}
3648 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3649 		     V_TFCAPTUREMAX(tp->snap_len) |
3650 		     V_TFMINPKTSIZE(tp->min_len));
3651 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3652 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3653 		     is_t4(adap) ?
3654 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
3655 		     V_T5_TFPORT(tp->port) | F_T5_TFEN |
3656 		     V_T5_TFINVERTMATCH(tp->invert));
3657 
3658 	return 0;
3659 }
3660 
3661 /**
3662  *	t4_get_trace_filter - query one of the tracing filters
3663  *	@adap: the adapter
3664  *	@tp: the current trace filter parameters
3665  *	@idx: which trace filter to query
3666  *	@enabled: non-zero if the filter is enabled
3667  *
3668  *	Returns the current settings of one of the HW tracing filters.
3669  */
3670 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3671 			 int *enabled)
3672 {
3673 	u32 ctla, ctlb;
3674 	int i, ofst = idx * 4;
3675 	u32 data_reg, mask_reg;
3676 
3677 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3678 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3679 
3680 	if (is_t4(adap)) {
3681 		*enabled = !!(ctla & F_TFEN);
3682 		tp->port =  G_TFPORT(ctla);
3683 	} else {
3684 		*enabled = !!(ctla & F_T5_TFEN);
3685 		tp->port = G_T5_TFPORT(ctla);
3686 	}
3687 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3688 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3689 	tp->skip_ofst = G_TFOFFSET(ctla);
3690 	tp->skip_len = G_TFLENGTH(ctla);
3691 	tp->invert = !!(ctla & F_TFINVERTMATCH);
3692 
3693 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3694 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3695 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3696 
3697 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3698 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3699 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3700 	}
3701 }
3702 
3703 /**
3704  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3705  *	@adap: the adapter
3706  *	@cnt: where to store the count statistics
3707  *	@cycles: where to store the cycle statistics
3708  *
3709  *	Returns performance statistics from PMTX.
3710  */
3711 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3712 {
3713 	int i;
3714 	u32 data[2];
3715 
3716 	for (i = 0; i < PM_NSTATS; i++) {
3717 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3718 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3719 		if (is_t4(adap))
3720 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3721 		else {
3722 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3723 					 A_PM_TX_DBG_DATA, data, 2,
3724 					 A_PM_TX_DBG_STAT_MSB);
3725 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3726 		}
3727 	}
3728 }
3729 
3730 /**
3731  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3732  *	@adap: the adapter
3733  *	@cnt: where to store the count statistics
3734  *	@cycles: where to store the cycle statistics
3735  *
3736  *	Returns performance statistics from PMRX.
3737  */
3738 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3739 {
3740 	int i;
3741 	u32 data[2];
3742 
3743 	for (i = 0; i < PM_NSTATS; i++) {
3744 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3745 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3746 		if (is_t4(adap))
3747 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3748 		else {
3749 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3750 					 A_PM_RX_DBG_DATA, data, 2,
3751 					 A_PM_RX_DBG_STAT_MSB);
3752 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3753 		}
3754 	}
3755 }
3756 
3757 /**
3758  *	get_mps_bg_map - return the buffer groups associated with a port
3759  *	@adap: the adapter
3760  *	@idx: the port index
3761  *
3762  *	Returns a bitmap indicating which MPS buffer groups are associated
3763  *	with the given port.  Bit i is set if buffer group i is used by the
3764  *	port.
3765  */
3766 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3767 {
3768 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3769 
3770 	if (n == 0)
3771 		return idx == 0 ? 0xf : 0;
3772 	if (n == 1)
3773 		return idx < 2 ? (3 << (2 * idx)) : 0;
3774 	return 1 << idx;
3775 }
3776 
3777 /**
3778  *      t4_get_port_stats_offset - collect port stats relative to a previous
3779  *                                 snapshot
3780  *      @adap: The adapter
3781  *      @idx: The port
3782  *      @stats: Current stats to fill
3783  *      @offset: Previous stats snapshot
3784  */
3785 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3786 		struct port_stats *stats,
3787 		struct port_stats *offset)
3788 {
3789 	u64 *s, *o;
3790 	int i;
3791 
3792 	t4_get_port_stats(adap, idx, stats);
3793 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3794 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3795 			i++, s++, o++)
3796 		*s -= *o;
3797 }
3798 
3799 /**
3800  *	t4_get_port_stats - collect port statistics
3801  *	@adap: the adapter
3802  *	@idx: the port index
3803  *	@p: the stats structure to fill
3804  *
3805  *	Collect statistics related to the given port from HW.
3806  */
3807 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3808 {
3809 	u32 bgmap = get_mps_bg_map(adap, idx);
3810 
3811 #define GET_STAT(name) \
3812 	t4_read_reg64(adap, \
3813 	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3814 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3815 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3816 
3817 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3818 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3819 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3820 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3821 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3822 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3823 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3824 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3825 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3826 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3827 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3828 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3829 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3830 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3831 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3832 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3833 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3834 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3835 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3836 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3837 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3838 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3839 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3840 
3841 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3842 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3843 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3844 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3845 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3846 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3847 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3848 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3849 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3850 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3851 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3852 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3853 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3854 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3855 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3856 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3857 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3858 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3859 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3860 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3861 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3862 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3863 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3864 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3865 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3866 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3867 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3868 
3869 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3870 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3871 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3872 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3873 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3874 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3875 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3876 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3877 
3878 #undef GET_STAT
3879 #undef GET_STAT_COM
3880 }
3881 
3882 /**
3883  *	t4_clr_port_stats - clear port statistics
3884  *	@adap: the adapter
3885  *	@idx: the port index
3886  *
3887  *	Clear HW statistics for the given port.
3888  */
3889 void t4_clr_port_stats(struct adapter *adap, int idx)
3890 {
3891 	unsigned int i;
3892 	u32 bgmap = get_mps_bg_map(adap, idx);
3893 	u32 port_base_addr;
3894 
3895 	if (is_t4(adap))
3896 		port_base_addr = PORT_BASE(idx);
3897 	else
3898 		port_base_addr = T5_PORT_BASE(idx);
3899 
3900 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3901 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3902 		t4_write_reg(adap, port_base_addr + i, 0);
3903 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3904 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3905 		t4_write_reg(adap, port_base_addr + i, 0);
3906 	for (i = 0; i < 4; i++)
3907 		if (bgmap & (1 << i)) {
3908 			t4_write_reg(adap,
3909 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3910 			t4_write_reg(adap,
3911 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3912 		}
3913 }
3914 
3915 /**
3916  *	t4_get_lb_stats - collect loopback port statistics
3917  *	@adap: the adapter
3918  *	@idx: the loopback port index
3919  *	@p: the stats structure to fill
3920  *
3921  *	Return HW statistics for the given loopback port.
3922  */
3923 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3924 {
3925 	u32 bgmap = get_mps_bg_map(adap, idx);
3926 
3927 #define GET_STAT(name) \
3928 	t4_read_reg64(adap, \
3929 	(is_t4(adap) ? \
3930 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
3931 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
3932 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3933 
3934 	p->octets           = GET_STAT(BYTES);
3935 	p->frames           = GET_STAT(FRAMES);
3936 	p->bcast_frames     = GET_STAT(BCAST);
3937 	p->mcast_frames     = GET_STAT(MCAST);
3938 	p->ucast_frames     = GET_STAT(UCAST);
3939 	p->error_frames     = GET_STAT(ERROR);
3940 
3941 	p->frames_64        = GET_STAT(64B);
3942 	p->frames_65_127    = GET_STAT(65B_127B);
3943 	p->frames_128_255   = GET_STAT(128B_255B);
3944 	p->frames_256_511   = GET_STAT(256B_511B);
3945 	p->frames_512_1023  = GET_STAT(512B_1023B);
3946 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3947 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3948 	p->drop             = GET_STAT(DROP_FRAMES);
3949 
3950 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3951 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3952 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3953 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3954 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3955 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3956 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3957 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3958 
3959 #undef GET_STAT
3960 #undef GET_STAT_COM
3961 }
3962 
3963 /**
3964  *	t4_wol_magic_enable - enable/disable magic packet WoL
3965  *	@adap: the adapter
3966  *	@port: the physical port index
3967  *	@addr: MAC address expected in magic packets, %NULL to disable
3968  *
3969  *	Enables/disables magic packet wake-on-LAN for the selected port.
3970  */
3971 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3972 			 const u8 *addr)
3973 {
3974 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
3975 
3976 	if (is_t4(adap)) {
3977 		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
3978 		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
3979 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
3980 	} else {
3981 		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
3982 		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
3983 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
3984 	}
3985 
3986 	if (addr) {
3987 		t4_write_reg(adap, mag_id_reg_l,
3988 			     (addr[2] << 24) | (addr[3] << 16) |
3989 			     (addr[4] << 8) | addr[5]);
3990 		t4_write_reg(adap, mag_id_reg_h,
3991 			     (addr[0] << 8) | addr[1]);
3992 	}
3993 	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
3994 			 V_MAGICEN(addr != NULL));
3995 }
3996 
3997 /**
3998  *	t4_wol_pat_enable - enable/disable pattern-based WoL
3999  *	@adap: the adapter
4000  *	@port: the physical port index
4001  *	@map: bitmap of which HW pattern filters to set
4002  *	@mask0: byte mask for bytes 0-63 of a packet
4003  *	@mask1: byte mask for bytes 64-127 of a packet
4004  *	@crc: Ethernet CRC for selected bytes
4005  *	@enable: enable/disable switch
4006  *
4007  *	Sets the pattern filters indicated in @map to mask out the bytes
4008  *	specified in @mask0/@mask1 in received packets and compare the CRC of
4009  *	the resulting packet against @crc.  If @enable is %true pattern-based
4010  *	WoL is enabled, otherwise disabled.
4011  */
4012 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4013 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4014 {
4015 	int i;
4016 	u32 port_cfg_reg;
4017 
4018 	if (is_t4(adap))
4019 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4020 	else
4021 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4022 
4023 	if (!enable) {
4024 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4025 		return 0;
4026 	}
4027 	if (map > 0xff)
4028 		return -EINVAL;
4029 
4030 #define EPIO_REG(name) \
4031 	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4032 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4033 
4034 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4035 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4036 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4037 
4038 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4039 		if (!(map & 1))
4040 			continue;
4041 
4042 		/* write byte masks */
4043 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4044 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4045 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4046 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4047 			return -ETIMEDOUT;
4048 
4049 		/* write CRC */
4050 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4051 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4052 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4053 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4054 			return -ETIMEDOUT;
4055 	}
4056 #undef EPIO_REG
4057 
4058 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4059 	return 0;
4060 }
4061 
4062 /**
4063  *	t4_mk_filtdelwr - create a delete filter WR
4064  *	@ftid: the filter ID
4065  *	@wr: the filter work request to populate
4066  *	@qid: ingress queue to receive the delete notification
4067  *
4068  *	Creates a filter work request to delete the supplied filter.  If @qid is
4069  *	negative the delete notification is suppressed.
4070  */
4071 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4072 {
4073 	memset(wr, 0, sizeof(*wr));
4074 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4075 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4076 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4077 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4078 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4079 	if (qid >= 0)
4080 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4081 }
4082 
4083 #define INIT_CMD(var, cmd, rd_wr) do { \
4084 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4085 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4086 	(var).retval_len16 = htonl(FW_LEN16(var)); \
4087 } while (0)
4088 
4089 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4090 {
4091 	struct fw_ldst_cmd c;
4092 
4093 	memset(&c, 0, sizeof(c));
4094 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4095 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4096 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4097 	c.u.addrval.addr = htonl(addr);
4098 	c.u.addrval.val = htonl(val);
4099 
4100 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4101 }
4102 
4103 /**
4104  *	t4_i2c_rd - read a byte from an i2c addressable device
4105  *	@adap: the adapter
4106  *	@mbox: mailbox to use for the FW command
4107  *	@port_id: the port id
4108  *	@dev_addr: the i2c device address
4109  *	@offset: the byte offset to read from
4110  *	@valp: where to store the value
4111  */
4112 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
4113 	       u8 dev_addr, u8 offset, u8 *valp)
4114 {
4115 	int ret;
4116 	struct fw_ldst_cmd c;
4117 
4118 	memset(&c, 0, sizeof(c));
4119 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4120 		F_FW_CMD_READ |
4121 		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
4122 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4123 	c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
4124 	c.u.i2c_deprecated.base = dev_addr;
4125 	c.u.i2c_deprecated.boffset = offset;
4126 
4127 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4128 	if (ret == 0)
4129 		*valp = c.u.i2c_deprecated.data;
4130 	return ret;
4131 }
4132 
4133 /**
4134  *	t4_mdio_rd - read a PHY register through MDIO
4135  *	@adap: the adapter
4136  *	@mbox: mailbox to use for the FW command
4137  *	@phy_addr: the PHY address
4138  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4139  *	@reg: the register to read
4140  *	@valp: where to store the value
4141  *
4142  *	Issues a FW command through the given mailbox to read a PHY register.
4143  */
4144 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4145 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4146 {
4147 	int ret;
4148 	struct fw_ldst_cmd c;
4149 
4150 	memset(&c, 0, sizeof(c));
4151 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4152 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4153 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4154 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4155 				   V_FW_LDST_CMD_MMD(mmd));
4156 	c.u.mdio.raddr = htons(reg);
4157 
4158 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4159 	if (ret == 0)
4160 		*valp = ntohs(c.u.mdio.rval);
4161 	return ret;
4162 }
4163 
4164 /**
4165  *	t4_mdio_wr - write a PHY register through MDIO
4166  *	@adap: the adapter
4167  *	@mbox: mailbox to use for the FW command
4168  *	@phy_addr: the PHY address
4169  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4170  *	@reg: the register to write
4171  *	@valp: value to write
4172  *
4173  *	Issues a FW command through the given mailbox to write a PHY register.
4174  */
4175 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4176 	       unsigned int mmd, unsigned int reg, unsigned int val)
4177 {
4178 	struct fw_ldst_cmd c;
4179 
4180 	memset(&c, 0, sizeof(c));
4181 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4182 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4183 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4184 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4185 				   V_FW_LDST_CMD_MMD(mmd));
4186 	c.u.mdio.raddr = htons(reg);
4187 	c.u.mdio.rval = htons(val);
4188 
4189 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4190 }
4191 
4192 /**
4193  *	t4_sge_ctxt_flush - flush the SGE context cache
4194  *	@adap: the adapter
4195  *	@mbox: mailbox to use for the FW command
4196  *
4197  *	Issues a FW command through the given mailbox to flush the
4198  *	SGE context cache.
4199  */
4200 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4201 {
4202 	int ret;
4203 	struct fw_ldst_cmd c;
4204 
4205 	memset(&c, 0, sizeof(c));
4206 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4207 			F_FW_CMD_READ |
4208 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4209 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4210 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4211 
4212 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4213 	return ret;
4214 }
4215 
4216 /**
4217  *	t4_sge_ctxt_rd - read an SGE context through FW
4218  *	@adap: the adapter
4219  *	@mbox: mailbox to use for the FW command
4220  *	@cid: the context id
4221  *	@ctype: the context type
4222  *	@data: where to store the context data
4223  *
4224  *	Issues a FW command through the given mailbox to read an SGE context.
4225  */
4226 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4227 		   enum ctxt_type ctype, u32 *data)
4228 {
4229 	int ret;
4230 	struct fw_ldst_cmd c;
4231 
4232 	if (ctype == CTXT_EGRESS)
4233 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4234 	else if (ctype == CTXT_INGRESS)
4235 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4236 	else if (ctype == CTXT_FLM)
4237 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4238 	else
4239 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4240 
4241 	memset(&c, 0, sizeof(c));
4242 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4243 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4244 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4245 	c.u.idctxt.physid = htonl(cid);
4246 
4247 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4248 	if (ret == 0) {
4249 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4250 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4251 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4252 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4253 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4254 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4255 	}
4256 	return ret;
4257 }
4258 
4259 /**
4260  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4261  *	@adap: the adapter
4262  *	@cid: the context id
4263  *	@ctype: the context type
4264  *	@data: where to store the context data
4265  *
4266  *	Reads an SGE context directly, bypassing FW.  This is only for
4267  *	debugging when FW is unavailable.
4268  */
4269 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4270 		      u32 *data)
4271 {
4272 	int i, ret;
4273 
4274 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4275 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4276 	if (!ret)
4277 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4278 			*data++ = t4_read_reg(adap, i);
4279 	return ret;
4280 }
4281 
4282 /**
4283  *	t4_fw_hello - establish communication with FW
4284  *	@adap: the adapter
4285  *	@mbox: mailbox to use for the FW command
4286  *	@evt_mbox: mailbox to receive async FW events
4287  *	@master: specifies the caller's willingness to be the device master
4288  *	@state: returns the current device state (if non-NULL)
4289  *
4290  *	Issues a command to establish communication with FW.  Returns either
4291  *	an error (negative integer) or the mailbox of the Master PF.
4292  */
4293 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4294 		enum dev_master master, enum dev_state *state)
4295 {
4296 	int ret;
4297 	struct fw_hello_cmd c;
4298 	u32 v;
4299 	unsigned int master_mbox;
4300 	int retries = FW_CMD_HELLO_RETRIES;
4301 
4302 retry:
4303 	memset(&c, 0, sizeof(c));
4304 	INIT_CMD(c, HELLO, WRITE);
4305 	c.err_to_clearinit = htonl(
4306 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4307 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4308 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4309 			M_FW_HELLO_CMD_MBMASTER) |
4310 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4311 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4312 		F_FW_HELLO_CMD_CLEARINIT);
4313 
4314 	/*
4315 	 * Issue the HELLO command to the firmware.  If it's not successful
4316 	 * but indicates that we got a "busy" or "timeout" condition, retry
4317 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4318 	 * retry limit, check to see if the firmware left us any error
4319 	 * information and report that if so ...
4320 	 */
4321 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4322 	if (ret != FW_SUCCESS) {
4323 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4324 			goto retry;
4325 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4326 			t4_report_fw_error(adap);
4327 		return ret;
4328 	}
4329 
4330 	v = ntohl(c.err_to_clearinit);
4331 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4332 	if (state) {
4333 		if (v & F_FW_HELLO_CMD_ERR)
4334 			*state = DEV_STATE_ERR;
4335 		else if (v & F_FW_HELLO_CMD_INIT)
4336 			*state = DEV_STATE_INIT;
4337 		else
4338 			*state = DEV_STATE_UNINIT;
4339 	}
4340 
4341 	/*
4342 	 * If we're not the Master PF then we need to wait around for the
4343 	 * Master PF Driver to finish setting up the adapter.
4344 	 *
4345 	 * Note that we also do this wait if we're a non-Master-capable PF and
4346 	 * there is no current Master PF; a Master PF may show up momentarily
4347 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4348 	 * OS loads lots of different drivers rapidly at the same time).  In
4349 	 * this case, the Master PF returned by the firmware will be
4350 	 * M_PCIE_FW_MASTER so the test below will work ...
4351 	 */
4352 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4353 	    master_mbox != mbox) {
4354 		int waiting = FW_CMD_HELLO_TIMEOUT;
4355 
4356 		/*
4357 		 * Wait for the firmware to either indicate an error or
4358 		 * initialized state.  If we see either of these we bail out
4359 		 * and report the issue to the caller.  If we exhaust the
4360 		 * "hello timeout" and we haven't exhausted our retries, try
4361 		 * again.  Otherwise bail with a timeout error.
4362 		 */
4363 		for (;;) {
4364 			u32 pcie_fw;
4365 
4366 			msleep(50);
4367 			waiting -= 50;
4368 
4369 			/*
4370 			 * If neither Error nor Initialialized are indicated
4371 			 * by the firmware keep waiting till we exhaust our
4372 			 * timeout ... and then retry if we haven't exhausted
4373 			 * our retries ...
4374 			 */
4375 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4376 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4377 				if (waiting <= 0) {
4378 					if (retries-- > 0)
4379 						goto retry;
4380 
4381 					return -ETIMEDOUT;
4382 				}
4383 				continue;
4384 			}
4385 
4386 			/*
4387 			 * We either have an Error or Initialized condition
4388 			 * report errors preferentially.
4389 			 */
4390 			if (state) {
4391 				if (pcie_fw & F_PCIE_FW_ERR)
4392 					*state = DEV_STATE_ERR;
4393 				else if (pcie_fw & F_PCIE_FW_INIT)
4394 					*state = DEV_STATE_INIT;
4395 			}
4396 
4397 			/*
4398 			 * If we arrived before a Master PF was selected and
4399 			 * there's not a valid Master PF, grab its identity
4400 			 * for our caller.
4401 			 */
4402 			if (master_mbox == M_PCIE_FW_MASTER &&
4403 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4404 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4405 			break;
4406 		}
4407 	}
4408 
4409 	return master_mbox;
4410 }
4411 
4412 /**
4413  *	t4_fw_bye - end communication with FW
4414  *	@adap: the adapter
4415  *	@mbox: mailbox to use for the FW command
4416  *
4417  *	Issues a command to terminate communication with FW.
4418  */
4419 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4420 {
4421 	struct fw_bye_cmd c;
4422 
4423 	memset(&c, 0, sizeof(c));
4424 	INIT_CMD(c, BYE, WRITE);
4425 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4426 }
4427 
4428 /**
4429  *	t4_fw_reset - issue a reset to FW
4430  *	@adap: the adapter
4431  *	@mbox: mailbox to use for the FW command
4432  *	@reset: specifies the type of reset to perform
4433  *
4434  *	Issues a reset command of the specified type to FW.
4435  */
4436 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4437 {
4438 	struct fw_reset_cmd c;
4439 
4440 	memset(&c, 0, sizeof(c));
4441 	INIT_CMD(c, RESET, WRITE);
4442 	c.val = htonl(reset);
4443 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4444 }
4445 
4446 /**
4447  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4448  *	@adap: the adapter
4449  *	@mbox: mailbox to use for the FW RESET command (if desired)
4450  *	@force: force uP into RESET even if FW RESET command fails
4451  *
4452  *	Issues a RESET command to firmware (if desired) with a HALT indication
4453  *	and then puts the microprocessor into RESET state.  The RESET command
4454  *	will only be issued if a legitimate mailbox is provided (mbox <=
4455  *	M_PCIE_FW_MASTER).
4456  *
4457  *	This is generally used in order for the host to safely manipulate the
4458  *	adapter without fear of conflicting with whatever the firmware might
4459  *	be doing.  The only way out of this state is to RESTART the firmware
4460  *	...
4461  */
4462 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4463 {
4464 	int ret = 0;
4465 
4466 	/*
4467 	 * If a legitimate mailbox is provided, issue a RESET command
4468 	 * with a HALT indication.
4469 	 */
4470 	if (mbox <= M_PCIE_FW_MASTER) {
4471 		struct fw_reset_cmd c;
4472 
4473 		memset(&c, 0, sizeof(c));
4474 		INIT_CMD(c, RESET, WRITE);
4475 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4476 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4477 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4478 	}
4479 
4480 	/*
4481 	 * Normally we won't complete the operation if the firmware RESET
4482 	 * command fails but if our caller insists we'll go ahead and put the
4483 	 * uP into RESET.  This can be useful if the firmware is hung or even
4484 	 * missing ...  We'll have to take the risk of putting the uP into
4485 	 * RESET without the cooperation of firmware in that case.
4486 	 *
4487 	 * We also force the firmware's HALT flag to be on in case we bypassed
4488 	 * the firmware RESET command above or we're dealing with old firmware
4489 	 * which doesn't have the HALT capability.  This will serve as a flag
4490 	 * for the incoming firmware to know that it's coming out of a HALT
4491 	 * rather than a RESET ... if it's new enough to understand that ...
4492 	 */
4493 	if (ret == 0 || force) {
4494 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4495 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4496 	}
4497 
4498 	/*
4499 	 * And we always return the result of the firmware RESET command
4500 	 * even when we force the uP into RESET ...
4501 	 */
4502 	return ret;
4503 }
4504 
4505 /**
4506  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4507  *	@adap: the adapter
4508  *	@reset: if we want to do a RESET to restart things
4509  *
4510  *	Restart firmware previously halted by t4_fw_halt().  On successful
4511  *	return the previous PF Master remains as the new PF Master and there
4512  *	is no need to issue a new HELLO command, etc.
4513  *
4514  *	We do this in two ways:
4515  *
4516  *	 1. If we're dealing with newer firmware we'll simply want to take
4517  *	    the chip's microprocessor out of RESET.  This will cause the
4518  *	    firmware to start up from its start vector.  And then we'll loop
4519  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4520  *	    reset to 0) or we timeout.
4521  *
4522  *	 2. If we're dealing with older firmware then we'll need to RESET
4523  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4524  *	    flag and automatically RESET itself on startup.
4525  */
4526 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4527 {
4528 	if (reset) {
4529 		/*
4530 		 * Since we're directing the RESET instead of the firmware
4531 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4532 		 * bit.
4533 		 */
4534 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4535 
4536 		/*
4537 		 * If we've been given a valid mailbox, first try to get the
4538 		 * firmware to do the RESET.  If that works, great and we can
4539 		 * return success.  Otherwise, if we haven't been given a
4540 		 * valid mailbox or the RESET command failed, fall back to
4541 		 * hitting the chip with a hammer.
4542 		 */
4543 		if (mbox <= M_PCIE_FW_MASTER) {
4544 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4545 			msleep(100);
4546 			if (t4_fw_reset(adap, mbox,
4547 					F_PIORST | F_PIORSTMODE) == 0)
4548 				return 0;
4549 		}
4550 
4551 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4552 		msleep(2000);
4553 	} else {
4554 		int ms;
4555 
4556 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4557 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4558 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4559 				return FW_SUCCESS;
4560 			msleep(100);
4561 			ms += 100;
4562 		}
4563 		return -ETIMEDOUT;
4564 	}
4565 	return 0;
4566 }
4567 
4568 /**
4569  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4570  *	@adap: the adapter
4571  *	@mbox: mailbox to use for the FW RESET command (if desired)
4572  *	@fw_data: the firmware image to write
4573  *	@size: image size
4574  *	@force: force upgrade even if firmware doesn't cooperate
4575  *
4576  *	Perform all of the steps necessary for upgrading an adapter's
4577  *	firmware image.  Normally this requires the cooperation of the
4578  *	existing firmware in order to halt all existing activities
4579  *	but if an invalid mailbox token is passed in we skip that step
4580  *	(though we'll still put the adapter microprocessor into RESET in
4581  *	that case).
4582  *
4583  *	On successful return the new firmware will have been loaded and
4584  *	the adapter will have been fully RESET losing all previous setup
4585  *	state.  On unsuccessful return the adapter may be completely hosed ...
4586  *	positive errno indicates that the adapter is ~probably~ intact, a
4587  *	negative errno indicates that things are looking bad ...
4588  */
4589 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4590 		  const u8 *fw_data, unsigned int size, int force)
4591 {
4592 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4593 	int reset, ret;
4594 
4595 	ret = t4_fw_halt(adap, mbox, force);
4596 	if (ret < 0 && !force)
4597 		return ret;
4598 
4599 	ret = t4_load_fw(adap, fw_data, size);
4600 	if (ret < 0)
4601 		return ret;
4602 
4603 	/*
4604 	 * Older versions of the firmware don't understand the new
4605 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4606 	 * restart.  So for newly loaded older firmware we'll have to do the
4607 	 * RESET for it so it starts up on a clean slate.  We can tell if
4608 	 * the newly loaded firmware will handle this right by checking
4609 	 * its header flags to see if it advertises the capability.
4610 	 */
4611 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4612 	return t4_fw_restart(adap, mbox, reset);
4613 }
4614 
4615 /**
4616  *	t4_fw_initialize - ask FW to initialize the device
4617  *	@adap: the adapter
4618  *	@mbox: mailbox to use for the FW command
4619  *
4620  *	Issues a command to FW to partially initialize the device.  This
4621  *	performs initialization that generally doesn't depend on user input.
4622  */
4623 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4624 {
4625 	struct fw_initialize_cmd c;
4626 
4627 	memset(&c, 0, sizeof(c));
4628 	INIT_CMD(c, INITIALIZE, WRITE);
4629 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4630 }
4631 
4632 /**
4633  *	t4_query_params - query FW or device parameters
4634  *	@adap: the adapter
4635  *	@mbox: mailbox to use for the FW command
4636  *	@pf: the PF
4637  *	@vf: the VF
4638  *	@nparams: the number of parameters
4639  *	@params: the parameter names
4640  *	@val: the parameter values
4641  *
4642  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4643  *	queried at once.
4644  */
4645 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4646 		    unsigned int vf, unsigned int nparams, const u32 *params,
4647 		    u32 *val)
4648 {
4649 	int i, ret;
4650 	struct fw_params_cmd c;
4651 	__be32 *p = &c.param[0].mnem;
4652 
4653 	if (nparams > 7)
4654 		return -EINVAL;
4655 
4656 	memset(&c, 0, sizeof(c));
4657 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4658 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4659 			    V_FW_PARAMS_CMD_VFN(vf));
4660 	c.retval_len16 = htonl(FW_LEN16(c));
4661 
4662 	for (i = 0; i < nparams; i++, p += 2)
4663 		*p = htonl(*params++);
4664 
4665 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4666 	if (ret == 0)
4667 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4668 			*val++ = ntohl(*p);
4669 	return ret;
4670 }
4671 
4672 /**
4673  *	t4_set_params - sets FW or device parameters
4674  *	@adap: the adapter
4675  *	@mbox: mailbox to use for the FW command
4676  *	@pf: the PF
4677  *	@vf: the VF
4678  *	@nparams: the number of parameters
4679  *	@params: the parameter names
4680  *	@val: the parameter values
4681  *
4682  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4683  *	specified at once.
4684  */
4685 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4686 		  unsigned int vf, unsigned int nparams, const u32 *params,
4687 		  const u32 *val)
4688 {
4689 	struct fw_params_cmd c;
4690 	__be32 *p = &c.param[0].mnem;
4691 
4692 	if (nparams > 7)
4693 		return -EINVAL;
4694 
4695 	memset(&c, 0, sizeof(c));
4696 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4697 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4698 			    V_FW_PARAMS_CMD_VFN(vf));
4699 	c.retval_len16 = htonl(FW_LEN16(c));
4700 
4701 	while (nparams--) {
4702 		*p++ = htonl(*params++);
4703 		*p++ = htonl(*val++);
4704 	}
4705 
4706 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4707 }
4708 
4709 /**
4710  *	t4_cfg_pfvf - configure PF/VF resource limits
4711  *	@adap: the adapter
4712  *	@mbox: mailbox to use for the FW command
4713  *	@pf: the PF being configured
4714  *	@vf: the VF being configured
4715  *	@txq: the max number of egress queues
4716  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4717  *	@rxqi: the max number of interrupt-capable ingress queues
4718  *	@rxq: the max number of interruptless ingress queues
4719  *	@tc: the PCI traffic class
4720  *	@vi: the max number of virtual interfaces
4721  *	@cmask: the channel access rights mask for the PF/VF
4722  *	@pmask: the port access rights mask for the PF/VF
4723  *	@nexact: the maximum number of exact MPS filters
4724  *	@rcaps: read capabilities
4725  *	@wxcaps: write/execute capabilities
4726  *
4727  *	Configures resource limits and capabilities for a physical or virtual
4728  *	function.
4729  */
4730 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4731 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4732 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4733 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4734 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4735 {
4736 	struct fw_pfvf_cmd c;
4737 
4738 	memset(&c, 0, sizeof(c));
4739 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4740 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4741 			    V_FW_PFVF_CMD_VFN(vf));
4742 	c.retval_len16 = htonl(FW_LEN16(c));
4743 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4744 			       V_FW_PFVF_CMD_NIQ(rxq));
4745 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4746 			      V_FW_PFVF_CMD_PMASK(pmask) |
4747 			      V_FW_PFVF_CMD_NEQ(txq));
4748 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4749 				V_FW_PFVF_CMD_NEXACTF(nexact));
4750 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4751 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4752 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4753 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4754 }
4755 
4756 /**
4757  *	t4_alloc_vi_func - allocate a virtual interface
4758  *	@adap: the adapter
4759  *	@mbox: mailbox to use for the FW command
4760  *	@port: physical port associated with the VI
4761  *	@pf: the PF owning the VI
4762  *	@vf: the VF owning the VI
4763  *	@nmac: number of MAC addresses needed (1 to 5)
4764  *	@mac: the MAC addresses of the VI
4765  *	@rss_size: size of RSS table slice associated with this VI
4766  *	@portfunc: which Port Application Function MAC Address is desired
4767  *	@idstype: Intrusion Detection Type
4768  *
4769  *	Allocates a virtual interface for the given physical port.  If @mac is
4770  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4771  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4772  *	stored consecutively so the space needed is @nmac * 6 bytes.
4773  *	Returns a negative error number or the non-negative VI id.
4774  */
4775 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4776 		     unsigned int port, unsigned int pf, unsigned int vf,
4777 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4778 		     unsigned int portfunc, unsigned int idstype)
4779 {
4780 	int ret;
4781 	struct fw_vi_cmd c;
4782 
4783 	memset(&c, 0, sizeof(c));
4784 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4785 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4786 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4787 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4788 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4789 			       V_FW_VI_CMD_FUNC(portfunc));
4790 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4791 	c.nmac = nmac - 1;
4792 
4793 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4794 	if (ret)
4795 		return ret;
4796 
4797 	if (mac) {
4798 		memcpy(mac, c.mac, sizeof(c.mac));
4799 		switch (nmac) {
4800 		case 5:
4801 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4802 		case 4:
4803 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4804 		case 3:
4805 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4806 		case 2:
4807 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4808 		}
4809 	}
4810 	if (rss_size)
4811 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4812 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4813 }
4814 
4815 /**
4816  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4817  *	@adap: the adapter
4818  *	@mbox: mailbox to use for the FW command
4819  *	@port: physical port associated with the VI
4820  *	@pf: the PF owning the VI
4821  *	@vf: the VF owning the VI
4822  *	@nmac: number of MAC addresses needed (1 to 5)
4823  *	@mac: the MAC addresses of the VI
4824  *	@rss_size: size of RSS table slice associated with this VI
4825  *
4826  *	backwards compatible and convieniance routine to allocate a Virtual
4827  *	Interface with a Ethernet Port Application Function and Intrustion
4828  *	Detection System disabled.
4829  */
4830 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4831 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4832 		unsigned int *rss_size)
4833 {
4834 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4835 				FW_VI_FUNC_ETH, 0);
4836 }
4837 
4838 /**
4839  *	t4_free_vi - free a virtual interface
4840  *	@adap: the adapter
4841  *	@mbox: mailbox to use for the FW command
4842  *	@pf: the PF owning the VI
4843  *	@vf: the VF owning the VI
4844  *	@viid: virtual interface identifiler
4845  *
4846  *	Free a previously allocated virtual interface.
4847  */
4848 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4849 	       unsigned int vf, unsigned int viid)
4850 {
4851 	struct fw_vi_cmd c;
4852 
4853 	memset(&c, 0, sizeof(c));
4854 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4855 			    F_FW_CMD_REQUEST |
4856 			    F_FW_CMD_EXEC |
4857 			    V_FW_VI_CMD_PFN(pf) |
4858 			    V_FW_VI_CMD_VFN(vf));
4859 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4860 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4861 
4862 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4863 }
4864 
4865 /**
4866  *	t4_set_rxmode - set Rx properties of a virtual interface
4867  *	@adap: the adapter
4868  *	@mbox: mailbox to use for the FW command
4869  *	@viid: the VI id
4870  *	@mtu: the new MTU or -1
4871  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4872  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4873  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4874  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4875  *	@sleep_ok: if true we may sleep while awaiting command completion
4876  *
4877  *	Sets Rx properties of a virtual interface.
4878  */
4879 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4880 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4881 		  bool sleep_ok)
4882 {
4883 	struct fw_vi_rxmode_cmd c;
4884 
4885 	/* convert to FW values */
4886 	if (mtu < 0)
4887 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4888 	if (promisc < 0)
4889 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4890 	if (all_multi < 0)
4891 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4892 	if (bcast < 0)
4893 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4894 	if (vlanex < 0)
4895 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4896 
4897 	memset(&c, 0, sizeof(c));
4898 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4899 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4900 	c.retval_len16 = htonl(FW_LEN16(c));
4901 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4902 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4903 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4904 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4905 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4906 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4907 }
4908 
4909 /**
4910  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4911  *	@adap: the adapter
4912  *	@mbox: mailbox to use for the FW command
4913  *	@viid: the VI id
4914  *	@free: if true any existing filters for this VI id are first removed
4915  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4916  *	@addr: the MAC address(es)
4917  *	@idx: where to store the index of each allocated filter
4918  *	@hash: pointer to hash address filter bitmap
4919  *	@sleep_ok: call is allowed to sleep
4920  *
4921  *	Allocates an exact-match filter for each of the supplied addresses and
4922  *	sets it to the corresponding address.  If @idx is not %NULL it should
4923  *	have at least @naddr entries, each of which will be set to the index of
4924  *	the filter allocated for the corresponding MAC address.  If a filter
4925  *	could not be allocated for an address its index is set to 0xffff.
4926  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4927  *	are hashed and update the hash filter bitmap pointed at by @hash.
4928  *
4929  *	Returns a negative error number or the number of filters allocated.
4930  */
4931 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4932 		      unsigned int viid, bool free, unsigned int naddr,
4933 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4934 {
4935 	int offset, ret = 0;
4936 	struct fw_vi_mac_cmd c;
4937 	unsigned int nfilters = 0;
4938 	unsigned int max_naddr = is_t4(adap) ?
4939 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
4940 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4941 	unsigned int rem = naddr;
4942 
4943 	if (naddr > max_naddr)
4944 		return -EINVAL;
4945 
4946 	for (offset = 0; offset < naddr ; /**/) {
4947 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4948 					 ? rem
4949 					 : ARRAY_SIZE(c.u.exact));
4950 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4951 						     u.exact[fw_naddr]), 16);
4952 		struct fw_vi_mac_exact *p;
4953 		int i;
4954 
4955 		memset(&c, 0, sizeof(c));
4956 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4957 				     F_FW_CMD_REQUEST |
4958 				     F_FW_CMD_WRITE |
4959 				     V_FW_CMD_EXEC(free) |
4960 				     V_FW_VI_MAC_CMD_VIID(viid));
4961 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4962 					    V_FW_CMD_LEN16(len16));
4963 
4964 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4965 			p->valid_to_idx = htons(
4966 				F_FW_VI_MAC_CMD_VALID |
4967 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4968 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4969 		}
4970 
4971 		/*
4972 		 * It's okay if we run out of space in our MAC address arena.
4973 		 * Some of the addresses we submit may get stored so we need
4974 		 * to run through the reply to see what the results were ...
4975 		 */
4976 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4977 		if (ret && ret != -FW_ENOMEM)
4978 			break;
4979 
4980 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4981 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4982 
4983 			if (idx)
4984 				idx[offset+i] = (index >=  max_naddr
4985 						 ? 0xffff
4986 						 : index);
4987 			if (index < max_naddr)
4988 				nfilters++;
4989 			else if (hash)
4990 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4991 		}
4992 
4993 		free = false;
4994 		offset += fw_naddr;
4995 		rem -= fw_naddr;
4996 	}
4997 
4998 	if (ret == 0 || ret == -FW_ENOMEM)
4999 		ret = nfilters;
5000 	return ret;
5001 }
5002 
5003 /**
5004  *	t4_change_mac - modifies the exact-match filter for a MAC address
5005  *	@adap: the adapter
5006  *	@mbox: mailbox to use for the FW command
5007  *	@viid: the VI id
5008  *	@idx: index of existing filter for old value of MAC address, or -1
5009  *	@addr: the new MAC address value
5010  *	@persist: whether a new MAC allocation should be persistent
5011  *	@add_smt: if true also add the address to the HW SMT
5012  *
5013  *	Modifies an exact-match filter and sets it to the new MAC address if
5014  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5015  *	latter case the address is added persistently if @persist is %true.
5016  *
5017  *	Note that in general it is not possible to modify the value of a given
5018  *	filter so the generic way to modify an address filter is to free the one
5019  *	being used by the old address value and allocate a new filter for the
5020  *	new address value.
5021  *
5022  *	Returns a negative error number or the index of the filter with the new
5023  *	MAC value.  Note that this index may differ from @idx.
5024  */
5025 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5026 		  int idx, const u8 *addr, bool persist, bool add_smt)
5027 {
5028 	int ret, mode;
5029 	struct fw_vi_mac_cmd c;
5030 	struct fw_vi_mac_exact *p = c.u.exact;
5031 	unsigned int max_mac_addr = is_t4(adap) ?
5032 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5033 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5034 
5035 	if (idx < 0)                             /* new allocation */
5036 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5037 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5038 
5039 	memset(&c, 0, sizeof(c));
5040 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5041 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5042 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5043 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5044 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5045 				V_FW_VI_MAC_CMD_IDX(idx));
5046 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5047 
5048 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5049 	if (ret == 0) {
5050 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5051 		if (ret >= max_mac_addr)
5052 			ret = -ENOMEM;
5053 	}
5054 	return ret;
5055 }
5056 
5057 /**
5058  *	t4_set_addr_hash - program the MAC inexact-match hash filter
5059  *	@adap: the adapter
5060  *	@mbox: mailbox to use for the FW command
5061  *	@viid: the VI id
5062  *	@ucast: whether the hash filter should also match unicast addresses
5063  *	@vec: the value to be written to the hash filter
5064  *	@sleep_ok: call is allowed to sleep
5065  *
5066  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5067  */
5068 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5069 		     bool ucast, u64 vec, bool sleep_ok)
5070 {
5071 	struct fw_vi_mac_cmd c;
5072 
5073 	memset(&c, 0, sizeof(c));
5074 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5075 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5076 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5077 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5078 				    V_FW_CMD_LEN16(1));
5079 	c.u.hash.hashvec = cpu_to_be64(vec);
5080 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5081 }
5082 
5083 /**
5084  *	t4_enable_vi - enable/disable a virtual interface
5085  *	@adap: the adapter
5086  *	@mbox: mailbox to use for the FW command
5087  *	@viid: the VI id
5088  *	@rx_en: 1=enable Rx, 0=disable Rx
5089  *	@tx_en: 1=enable Tx, 0=disable Tx
5090  *
5091  *	Enables/disables a virtual interface.
5092  */
5093 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5094 		 bool rx_en, bool tx_en)
5095 {
5096 	struct fw_vi_enable_cmd c;
5097 
5098 	memset(&c, 0, sizeof(c));
5099 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5100 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5101 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5102 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5103 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5104 }
5105 
5106 /**
5107  *	t4_identify_port - identify a VI's port by blinking its LED
5108  *	@adap: the adapter
5109  *	@mbox: mailbox to use for the FW command
5110  *	@viid: the VI id
5111  *	@nblinks: how many times to blink LED at 2.5 Hz
5112  *
5113  *	Identifies a VI's port by blinking its LED.
5114  */
5115 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5116 		     unsigned int nblinks)
5117 {
5118 	struct fw_vi_enable_cmd c;
5119 
5120 	memset(&c, 0, sizeof(c));
5121 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5122 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5123 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5124 	c.blinkdur = htons(nblinks);
5125 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5126 }
5127 
5128 /**
5129  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
5130  *	@adap: the adapter
5131  *	@mbox: mailbox to use for the FW command
5132  *	@start: %true to enable the queues, %false to disable them
5133  *	@pf: the PF owning the queues
5134  *	@vf: the VF owning the queues
5135  *	@iqid: ingress queue id
5136  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5137  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5138  *
5139  *	Starts or stops an ingress queue and its associated FLs, if any.
5140  */
5141 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5142 		     unsigned int pf, unsigned int vf, unsigned int iqid,
5143 		     unsigned int fl0id, unsigned int fl1id)
5144 {
5145 	struct fw_iq_cmd c;
5146 
5147 	memset(&c, 0, sizeof(c));
5148 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5149 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5150 			    V_FW_IQ_CMD_VFN(vf));
5151 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5152 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5153 	c.iqid = htons(iqid);
5154 	c.fl0id = htons(fl0id);
5155 	c.fl1id = htons(fl1id);
5156 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5157 }
5158 
5159 /**
5160  *	t4_iq_free - free an ingress queue and its FLs
5161  *	@adap: the adapter
5162  *	@mbox: mailbox to use for the FW command
5163  *	@pf: the PF owning the queues
5164  *	@vf: the VF owning the queues
5165  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5166  *	@iqid: ingress queue id
5167  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5168  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5169  *
5170  *	Frees an ingress queue and its associated FLs, if any.
5171  */
5172 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5173 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5174 	       unsigned int fl0id, unsigned int fl1id)
5175 {
5176 	struct fw_iq_cmd c;
5177 
5178 	memset(&c, 0, sizeof(c));
5179 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5180 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5181 			    V_FW_IQ_CMD_VFN(vf));
5182 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5183 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5184 	c.iqid = htons(iqid);
5185 	c.fl0id = htons(fl0id);
5186 	c.fl1id = htons(fl1id);
5187 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5188 }
5189 
5190 /**
5191  *	t4_eth_eq_free - free an Ethernet egress queue
5192  *	@adap: the adapter
5193  *	@mbox: mailbox to use for the FW command
5194  *	@pf: the PF owning the queue
5195  *	@vf: the VF owning the queue
5196  *	@eqid: egress queue id
5197  *
5198  *	Frees an Ethernet egress queue.
5199  */
5200 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5201 		   unsigned int vf, unsigned int eqid)
5202 {
5203 	struct fw_eq_eth_cmd c;
5204 
5205 	memset(&c, 0, sizeof(c));
5206 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5207 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5208 			    V_FW_EQ_ETH_CMD_VFN(vf));
5209 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5210 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5211 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5212 }
5213 
5214 /**
5215  *	t4_ctrl_eq_free - free a control egress queue
5216  *	@adap: the adapter
5217  *	@mbox: mailbox to use for the FW command
5218  *	@pf: the PF owning the queue
5219  *	@vf: the VF owning the queue
5220  *	@eqid: egress queue id
5221  *
5222  *	Frees a control egress queue.
5223  */
5224 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5225 		    unsigned int vf, unsigned int eqid)
5226 {
5227 	struct fw_eq_ctrl_cmd c;
5228 
5229 	memset(&c, 0, sizeof(c));
5230 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5231 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5232 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5233 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5234 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5235 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5236 }
5237 
5238 /**
5239  *	t4_ofld_eq_free - free an offload egress queue
5240  *	@adap: the adapter
5241  *	@mbox: mailbox to use for the FW command
5242  *	@pf: the PF owning the queue
5243  *	@vf: the VF owning the queue
5244  *	@eqid: egress queue id
5245  *
5246  *	Frees a control egress queue.
5247  */
5248 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5249 		    unsigned int vf, unsigned int eqid)
5250 {
5251 	struct fw_eq_ofld_cmd c;
5252 
5253 	memset(&c, 0, sizeof(c));
5254 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5255 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5256 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5257 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5258 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5259 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5260 }
5261 
5262 /**
5263  *	t4_handle_fw_rpl - process a FW reply message
5264  *	@adap: the adapter
5265  *	@rpl: start of the FW message
5266  *
5267  *	Processes a FW message, such as link state change messages.
5268  */
5269 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5270 {
5271 	u8 opcode = *(const u8 *)rpl;
5272 	const struct fw_port_cmd *p = (const void *)rpl;
5273 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5274 
5275 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5276 		/* link/module state change message */
5277 		int speed = 0, fc = 0, i;
5278 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5279 		struct port_info *pi = NULL;
5280 		struct link_config *lc;
5281 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5282 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5283 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5284 
5285 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5286 			fc |= PAUSE_RX;
5287 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5288 			fc |= PAUSE_TX;
5289 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5290 			speed = SPEED_100;
5291 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5292 			speed = SPEED_1000;
5293 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5294 			speed = SPEED_10000;
5295 
5296 		for_each_port(adap, i) {
5297 			pi = adap2pinfo(adap, i);
5298 			if (pi->tx_chan == chan)
5299 				break;
5300 		}
5301 		lc = &pi->link_cfg;
5302 
5303 		if (link_ok != lc->link_ok || speed != lc->speed ||
5304 		    fc != lc->fc) {                    /* something changed */
5305 			lc->link_ok = link_ok;
5306 			lc->speed = speed;
5307 			lc->fc = fc;
5308 			t4_os_link_changed(adap, i, link_ok);
5309 		}
5310 		if (mod != pi->mod_type) {
5311 			pi->mod_type = mod;
5312 			t4_os_portmod_changed(adap, i);
5313 		}
5314 	} else {
5315 		CH_WARN_RATELIMIT(adap,
5316 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5317 		return -EINVAL;
5318 	}
5319 	return 0;
5320 }
5321 
5322 /**
5323  *	get_pci_mode - determine a card's PCI mode
5324  *	@adapter: the adapter
5325  *	@p: where to store the PCI settings
5326  *
5327  *	Determines a card's PCI mode and associated parameters, such as speed
5328  *	and width.
5329  */
5330 static void __devinit get_pci_mode(struct adapter *adapter,
5331 				   struct pci_params *p)
5332 {
5333 	u16 val;
5334 	u32 pcie_cap;
5335 
5336 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5337 	if (pcie_cap) {
5338 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5339 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5340 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5341 	}
5342 }
5343 
5344 /**
5345  *	init_link_config - initialize a link's SW state
5346  *	@lc: structure holding the link state
5347  *	@caps: link capabilities
5348  *
5349  *	Initializes the SW state maintained for each link, including the link's
5350  *	capabilities and default speed/flow-control/autonegotiation settings.
5351  */
5352 static void __devinit init_link_config(struct link_config *lc,
5353 				       unsigned int caps)
5354 {
5355 	lc->supported = caps;
5356 	lc->requested_speed = 0;
5357 	lc->speed = 0;
5358 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5359 	if (lc->supported & FW_PORT_CAP_ANEG) {
5360 		lc->advertising = lc->supported & ADVERT_MASK;
5361 		lc->autoneg = AUTONEG_ENABLE;
5362 		lc->requested_fc |= PAUSE_AUTONEG;
5363 	} else {
5364 		lc->advertising = 0;
5365 		lc->autoneg = AUTONEG_DISABLE;
5366 	}
5367 }
5368 
5369 static int __devinit get_flash_params(struct adapter *adapter)
5370 {
5371 	int ret;
5372 	u32 info = 0;
5373 
5374 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5375 	if (!ret)
5376 		ret = sf1_read(adapter, 3, 0, 1, &info);
5377 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5378 	if (ret < 0)
5379 		return ret;
5380 
5381 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5382 		return -EINVAL;
5383 	info >>= 16;                           /* log2 of size */
5384 	if (info >= 0x14 && info < 0x18)
5385 		adapter->params.sf_nsec = 1 << (info - 16);
5386 	else if (info == 0x18)
5387 		adapter->params.sf_nsec = 64;
5388 	else
5389 		return -EINVAL;
5390 	adapter->params.sf_size = 1 << info;
5391 	return 0;
5392 }
5393 
5394 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5395 						  u8 range)
5396 {
5397 	u16 val;
5398 	u32 pcie_cap;
5399 
5400 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5401 	if (pcie_cap) {
5402 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5403 		val &= 0xfff0;
5404 		val |= range ;
5405 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5406 	}
5407 }
5408 
5409 /**
5410  *	t4_prep_adapter - prepare SW and HW for operation
5411  *	@adapter: the adapter
5412  *	@reset: if true perform a HW reset
5413  *
5414  *	Initialize adapter SW state for the various HW modules, set initial
5415  *	values for some adapter tunables, take PHYs out of reset, and
5416  *	initialize the MDIO interface.
5417  */
5418 int __devinit t4_prep_adapter(struct adapter *adapter)
5419 {
5420 	int ret;
5421 	uint16_t device_id;
5422 	uint32_t pl_rev;
5423 
5424 	get_pci_mode(adapter, &adapter->params.pci);
5425 
5426 	pl_rev = t4_read_reg(adapter, A_PL_REV);
5427 	adapter->params.chipid = G_CHIPID(pl_rev);
5428 	adapter->params.rev = G_REV(pl_rev);
5429 	if (adapter->params.chipid == 0) {
5430 		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5431 		adapter->params.chipid = CHELSIO_T4;
5432 
5433 		/* T4A1 chip is not supported */
5434 		if (adapter->params.rev == 1) {
5435 			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5436 			return -EINVAL;
5437 		}
5438 	}
5439 	adapter->params.pci.vpd_cap_addr =
5440 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5441 
5442 	ret = get_flash_params(adapter);
5443 	if (ret < 0)
5444 		return ret;
5445 
5446 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5447 	if (ret < 0)
5448 		return ret;
5449 
5450 	/* Cards with real ASICs have the chipid in the PCIe device id */
5451 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5452 	if (device_id >> 12 == adapter->params.chipid)
5453 		adapter->params.cim_la_size = CIMLA_SIZE;
5454 	else {
5455 		/* FPGA */
5456 		adapter->params.fpga = 1;
5457 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5458 	}
5459 
5460 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5461 
5462 	/*
5463 	 * Default port and clock for debugging in case we can't reach FW.
5464 	 */
5465 	adapter->params.nports = 1;
5466 	adapter->params.portvec = 1;
5467 	adapter->params.vpd.cclk = 50000;
5468 
5469 	/* Set pci completion timeout value to 4 seconds. */
5470 	set_pcie_completion_timeout(adapter, 0xd);
5471 	return 0;
5472 }
5473 
5474 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5475 {
5476 	u8 addr[6];
5477 	int ret, i, j;
5478 	struct fw_port_cmd c;
5479 	unsigned int rss_size;
5480 	adapter_t *adap = p->adapter;
5481 
5482 	memset(&c, 0, sizeof(c));
5483 
5484 	for (i = 0, j = -1; i <= p->port_id; i++) {
5485 		do {
5486 			j++;
5487 		} while ((adap->params.portvec & (1 << j)) == 0);
5488 	}
5489 
5490 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5491 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5492 			       V_FW_PORT_CMD_PORTID(j));
5493 	c.action_to_len16 = htonl(
5494 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5495 		FW_LEN16(c));
5496 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5497 	if (ret)
5498 		return ret;
5499 
5500 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5501 	if (ret < 0)
5502 		return ret;
5503 
5504 	p->viid = ret;
5505 	p->tx_chan = j;
5506 	p->lport = j;
5507 	p->rss_size = rss_size;
5508 	t4_os_set_hw_addr(adap, p->port_id, addr);
5509 
5510 	ret = ntohl(c.u.info.lstatus_to_modtype);
5511 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5512 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5513 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5514 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5515 
5516 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5517 
5518 	return 0;
5519 }
5520