xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision 586f63035fbe5e45cfc971037fd76375661ece26)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "common.h"
31 #include "t4_regs.h"
32 #include "t4_regs_values.h"
33 #include "firmware/t4fw_interface.h"
34 
35 #undef msleep
36 #define msleep(x) pause("t4hw", (x) * hz / 1000)
37 
38 /**
39  *	t4_wait_op_done_val - wait until an operation is completed
40  *	@adapter: the adapter performing the operation
41  *	@reg: the register to check for completion
42  *	@mask: a single-bit field within @reg that indicates completion
43  *	@polarity: the value of the field when the operation is completed
44  *	@attempts: number of check iterations
45  *	@delay: delay in usecs between iterations
46  *	@valp: where to store the value of the register at completion time
47  *
48  *	Wait until an operation is completed by checking a bit in a register
49  *	up to @attempts times.  If @valp is not NULL the value of the register
50  *	at the time it indicated completion is stored there.  Returns 0 if the
51  *	operation completes and	-EAGAIN	otherwise.
52  */
53 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 		        int polarity, int attempts, int delay, u32 *valp)
55 {
56 	while (1) {
57 		u32 val = t4_read_reg(adapter, reg);
58 
59 		if (!!(val & mask) == polarity) {
60 			if (valp)
61 				*valp = val;
62 			return 0;
63 		}
64 		if (--attempts == 0)
65 			return -EAGAIN;
66 		if (delay)
67 			udelay(delay);
68 	}
69 }
70 
71 /**
72  *	t4_set_reg_field - set a register field to a value
73  *	@adapter: the adapter to program
74  *	@addr: the register address
75  *	@mask: specifies the portion of the register to modify
76  *	@val: the new value for the register field
77  *
78  *	Sets a register field specified by the supplied mask to the
79  *	given value.
80  */
81 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
82 		      u32 val)
83 {
84 	u32 v = t4_read_reg(adapter, addr) & ~mask;
85 
86 	t4_write_reg(adapter, addr, v | val);
87 	(void) t4_read_reg(adapter, addr);      /* flush */
88 }
89 
90 /**
91  *	t4_read_indirect - read indirectly addressed registers
92  *	@adap: the adapter
93  *	@addr_reg: register holding the indirect address
94  *	@data_reg: register holding the value of the indirect register
95  *	@vals: where the read register values are stored
96  *	@nregs: how many indirect registers to read
97  *	@start_idx: index of first indirect register to read
98  *
99  *	Reads registers that are accessed indirectly through an address/data
100  *	register pair.
101  */
102 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
103 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
104 		      unsigned int start_idx)
105 {
106 	while (nregs--) {
107 		t4_write_reg(adap, addr_reg, start_idx);
108 		*vals++ = t4_read_reg(adap, data_reg);
109 		start_idx++;
110 	}
111 }
112 
113 /**
114  *	t4_write_indirect - write indirectly addressed registers
115  *	@adap: the adapter
116  *	@addr_reg: register holding the indirect addresses
117  *	@data_reg: register holding the value for the indirect registers
118  *	@vals: values to write
119  *	@nregs: how many indirect registers to write
120  *	@start_idx: address of first indirect register to write
121  *
122  *	Writes a sequential block of registers that are accessed indirectly
123  *	through an address/data register pair.
124  */
125 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
126 		       unsigned int data_reg, const u32 *vals,
127 		       unsigned int nregs, unsigned int start_idx)
128 {
129 	while (nregs--) {
130 		t4_write_reg(adap, addr_reg, start_idx++);
131 		t4_write_reg(adap, data_reg, *vals++);
132 	}
133 }
134 
135 /*
136  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
137  */
138 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
139 			 u32 mbox_addr)
140 {
141 	for ( ; nflit; nflit--, mbox_addr += 8)
142 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
143 }
144 
145 /*
146  * Handle a FW assertion reported in a mailbox.
147  */
148 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
149 {
150 	struct fw_debug_cmd asrt;
151 
152 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
153 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
154 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
155 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
156 }
157 
158 #define X_CIM_PF_NOACCESS 0xeeeeeeee
159 /**
160  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
161  *	@adap: the adapter
162  *	@mbox: index of the mailbox to use
163  *	@cmd: the command to write
164  *	@size: command length in bytes
165  *	@rpl: where to optionally store the reply
166  *	@sleep_ok: if true we may sleep while awaiting command completion
167  *
168  *	Sends the given command to FW through the selected mailbox and waits
169  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
170  *	store the FW's reply to the command.  The command and its optional
171  *	reply are of the same length.  Some FW commands like RESET and
172  *	INITIALIZE can take a considerable amount of time to execute.
173  *	@sleep_ok determines whether we may sleep while awaiting the response.
174  *	If sleeping is allowed we use progressive backoff otherwise we spin.
175  *
176  *	The return value is 0 on success or a negative errno on failure.  A
177  *	failure can happen either because we are not able to execute the
178  *	command or FW executes it but signals an error.  In the latter case
179  *	the return value is the error code indicated by FW (negated).
180  */
181 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
182 		    void *rpl, bool sleep_ok)
183 {
184 	/*
185 	 * We delay in small increments at first in an effort to maintain
186 	 * responsiveness for simple, fast executing commands but then back
187 	 * off to larger delays to a maximum retry delay.
188 	 */
189 	static const int delay[] = {
190 		1, 1, 3, 5, 10, 10, 20, 50, 100
191 	};
192 
193 	u32 v;
194 	u64 res;
195 	int i, ms, delay_idx;
196 	const __be64 *p = cmd;
197 
198 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
199 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
200 
201 	if ((size & 15) || size > MBOX_LEN)
202 		return -EINVAL;
203 
204 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
205 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
206 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
207 
208 	if (v != X_MBOWNER_PL)
209 		return v ? -EBUSY : -ETIMEDOUT;
210 
211 	for (i = 0; i < size; i += 8, p++)
212 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
213 
214 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
215 	t4_read_reg(adap, ctl_reg);          /* flush write */
216 
217 	delay_idx = 0;
218 	ms = delay[0];
219 
220 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
221 		if (sleep_ok) {
222 			ms = delay[delay_idx];  /* last element may repeat */
223 			if (delay_idx < ARRAY_SIZE(delay) - 1)
224 				delay_idx++;
225 			msleep(ms);
226 		} else
227 			mdelay(ms);
228 
229 		v = t4_read_reg(adap, ctl_reg);
230 		if (v == X_CIM_PF_NOACCESS)
231 			continue;
232 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
233 			if (!(v & F_MBMSGVALID)) {
234 				t4_write_reg(adap, ctl_reg,
235 					     V_MBOWNER(X_MBOWNER_NONE));
236 				continue;
237 			}
238 
239 			res = t4_read_reg64(adap, data_reg);
240 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
241 				fw_asrt(adap, data_reg);
242 				res = V_FW_CMD_RETVAL(EIO);
243 			} else if (rpl)
244 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
245 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
246 			return -G_FW_CMD_RETVAL((int)res);
247 		}
248 	}
249 
250 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
251 	       *(const u8 *)cmd, mbox);
252 	return -ETIMEDOUT;
253 }
254 
255 /**
256  *	t4_mc_read - read from MC through backdoor accesses
257  *	@adap: the adapter
258  *	@addr: address of first byte requested
259  *	@data: 64 bytes of data containing the requested address
260  *	@ecc: where to store the corresponding 64-bit ECC word
261  *
262  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
263  *	that covers the requested address @addr.  If @parity is not %NULL it
264  *	is assigned the 64-bit ECC word for the read data.
265  */
266 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
267 {
268 	int i;
269 
270 	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
271 		return -EBUSY;
272 	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
273 	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
274 	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
275 	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
276 		     V_BIST_CMD_GAP(1));
277 	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
278 	if (i)
279 		return i;
280 
281 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
282 
283 	for (i = 15; i >= 0; i--)
284 		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
285 	if (ecc)
286 		*ecc = t4_read_reg64(adap, MC_DATA(16));
287 #undef MC_DATA
288 	return 0;
289 }
290 
291 /**
292  *	t4_edc_read - read from EDC through backdoor accesses
293  *	@adap: the adapter
294  *	@idx: which EDC to access
295  *	@addr: address of first byte requested
296  *	@data: 64 bytes of data containing the requested address
297  *	@ecc: where to store the corresponding 64-bit ECC word
298  *
299  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
300  *	that covers the requested address @addr.  If @parity is not %NULL it
301  *	is assigned the 64-bit ECC word for the read data.
302  */
303 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
304 {
305 	int i;
306 
307 	idx *= EDC_STRIDE;
308 	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
309 		return -EBUSY;
310 	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
311 	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
312 	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
313 	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
314 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
315 	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
316 	if (i)
317 		return i;
318 
319 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
320 
321 	for (i = 15; i >= 0; i--)
322 		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
323 	if (ecc)
324 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
325 #undef EDC_DATA
326 	return 0;
327 }
328 
329 /**
330  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
331  *	@adap: the adapter
332  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
333  *	@addr: address within indicated memory type
334  *	@len: amount of memory to read
335  *	@buf: host memory buffer
336  *
337  *	Reads an [almost] arbitrary memory region in the firmware: the
338  *	firmware memory address, length and host buffer must be aligned on
339  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
340  *	the firmware's memory.  If this memory contains data structures which
341  *	contain multi-byte integers, it's the callers responsibility to
342  *	perform appropriate byte order conversions.
343  */
344 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
345 		__be32 *buf)
346 {
347 	u32 pos, start, end, offset;
348 	int ret;
349 
350 	/*
351 	 * Argument sanity checks ...
352 	 */
353 	if ((addr & 0x3) || (len & 0x3))
354 		return -EINVAL;
355 
356 	/*
357 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
358 	 * need to round down the start and round up the end.  We'll start
359 	 * copying out of the first line at (addr - start) a word at a time.
360 	 */
361 	start = addr & ~(64-1);
362 	end = (addr + len + 64-1) & ~(64-1);
363 	offset = (addr - start)/sizeof(__be32);
364 
365 	for (pos = start; pos < end; pos += 64, offset = 0) {
366 		__be32 data[16];
367 
368 		/*
369 		 * Read the chip's memory block and bail if there's an error.
370 		 */
371 		if (mtype == MEM_MC)
372 			ret = t4_mc_read(adap, pos, data, NULL);
373 		else
374 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
375 		if (ret)
376 			return ret;
377 
378 		/*
379 		 * Copy the data into the caller's memory buffer.
380 		 */
381 		while (offset < 16 && len > 0) {
382 			*buf++ = data[offset++];
383 			len -= sizeof(__be32);
384 		}
385 	}
386 
387 	return 0;
388 }
389 
390 /*
391  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
392  * VPD-R header.
393  */
394 struct t4_vpd_hdr {
395 	u8  id_tag;
396 	u8  id_len[2];
397 	u8  id_data[ID_LEN];
398 	u8  vpdr_tag;
399 	u8  vpdr_len[2];
400 };
401 
402 /*
403  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
404  */
405 #define EEPROM_MAX_RD_POLL 40
406 #define EEPROM_MAX_WR_POLL 6
407 #define EEPROM_STAT_ADDR   0x7bfc
408 #define VPD_BASE           0x400
409 #define VPD_BASE_OLD       0
410 #define VPD_LEN            512
411 #define VPD_INFO_FLD_HDR_SIZE	3
412 
413 /**
414  *	t4_seeprom_read - read a serial EEPROM location
415  *	@adapter: adapter to read
416  *	@addr: EEPROM virtual address
417  *	@data: where to store the read data
418  *
419  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
420  *	VPD capability.  Note that this function must be called with a virtual
421  *	address.
422  */
423 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
424 {
425 	u16 val;
426 	int attempts = EEPROM_MAX_RD_POLL;
427 	unsigned int base = adapter->params.pci.vpd_cap_addr;
428 
429 	if (addr >= EEPROMVSIZE || (addr & 3))
430 		return -EINVAL;
431 
432 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
433 	do {
434 		udelay(10);
435 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
436 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
437 
438 	if (!(val & PCI_VPD_ADDR_F)) {
439 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
440 		return -EIO;
441 	}
442 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
443 	*data = le32_to_cpu(*data);
444 	return 0;
445 }
446 
447 /**
448  *	t4_seeprom_write - write a serial EEPROM location
449  *	@adapter: adapter to write
450  *	@addr: virtual EEPROM address
451  *	@data: value to write
452  *
453  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
454  *	VPD capability.  Note that this function must be called with a virtual
455  *	address.
456  */
457 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
458 {
459 	u16 val;
460 	int attempts = EEPROM_MAX_WR_POLL;
461 	unsigned int base = adapter->params.pci.vpd_cap_addr;
462 
463 	if (addr >= EEPROMVSIZE || (addr & 3))
464 		return -EINVAL;
465 
466 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
467 				 cpu_to_le32(data));
468 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
469 				 (u16)addr | PCI_VPD_ADDR_F);
470 	do {
471 		msleep(1);
472 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
473 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
474 
475 	if (val & PCI_VPD_ADDR_F) {
476 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
477 		return -EIO;
478 	}
479 	return 0;
480 }
481 
482 /**
483  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
484  *	@phys_addr: the physical EEPROM address
485  *	@fn: the PCI function number
486  *	@sz: size of function-specific area
487  *
488  *	Translate a physical EEPROM address to virtual.  The first 1K is
489  *	accessed through virtual addresses starting at 31K, the rest is
490  *	accessed through virtual addresses starting at 0.
491  *
492  *	The mapping is as follows:
493  *	[0..1K) -> [31K..32K)
494  *	[1K..1K+A) -> [ES-A..ES)
495  *	[1K+A..ES) -> [0..ES-A-1K)
496  *
497  *	where A = @fn * @sz, and ES = EEPROM size.
498  */
499 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
500 {
501 	fn *= sz;
502 	if (phys_addr < 1024)
503 		return phys_addr + (31 << 10);
504 	if (phys_addr < 1024 + fn)
505 		return EEPROMSIZE - fn + phys_addr - 1024;
506 	if (phys_addr < EEPROMSIZE)
507 		return phys_addr - 1024 - fn;
508 	return -EINVAL;
509 }
510 
511 /**
512  *	t4_seeprom_wp - enable/disable EEPROM write protection
513  *	@adapter: the adapter
514  *	@enable: whether to enable or disable write protection
515  *
516  *	Enables or disables write protection on the serial EEPROM.
517  */
518 int t4_seeprom_wp(struct adapter *adapter, int enable)
519 {
520 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
521 }
522 
523 /**
524  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
525  *	@v: Pointer to buffered vpd data structure
526  *	@kw: The keyword to search for
527  *
528  *	Returns the value of the information field keyword or
529  *	-ENOENT otherwise.
530  */
531 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
532 {
533          int i;
534 	 unsigned int offset , len;
535 	 const u8 *buf = &v->id_tag;
536 	 const u8 *vpdr_len = &v->vpdr_tag;
537 	 offset = sizeof(struct t4_vpd_hdr);
538 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
539 
540 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
541 		 return -ENOENT;
542 	 }
543 
544          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
545 		 if(memcmp(buf + i , kw , 2) == 0){
546 			 i += VPD_INFO_FLD_HDR_SIZE;
547                          return i;
548 		  }
549 
550                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
551          }
552 
553          return -ENOENT;
554 }
555 
556 
557 /**
558  *	get_vpd_params - read VPD parameters from VPD EEPROM
559  *	@adapter: adapter to read
560  *	@p: where to store the parameters
561  *
562  *	Reads card parameters stored in VPD EEPROM.
563  */
564 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
565 {
566 	int i, ret, addr;
567 	int ec, sn;
568 	u8 vpd[VPD_LEN], csum;
569 	const struct t4_vpd_hdr *v;
570 
571 	/*
572 	 * Card information normally starts at VPD_BASE but early cards had
573 	 * it at 0.
574 	 */
575 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
576 	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
577 
578 	for (i = 0; i < sizeof(vpd); i += 4) {
579 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
580 		if (ret)
581 			return ret;
582 	}
583  	v = (const struct t4_vpd_hdr *)vpd;
584 
585 #define FIND_VPD_KW(var,name) do { \
586 	var = get_vpd_keyword_val(v , name); \
587 	if (var < 0) { \
588 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
589 		return -EINVAL; \
590 	} \
591 } while (0)
592 
593 	FIND_VPD_KW(i, "RV");
594 	for (csum = 0; i >= 0; i--)
595 		csum += vpd[i];
596 
597 	if (csum) {
598 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
599 		return -EINVAL;
600 	}
601 	FIND_VPD_KW(ec, "EC");
602 	FIND_VPD_KW(sn, "SN");
603 #undef FIND_VPD_KW
604 
605 	memcpy(p->id, v->id_data, ID_LEN);
606 	strstrip(p->id);
607 	memcpy(p->ec, vpd + ec, EC_LEN);
608 	strstrip(p->ec);
609 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
610 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
611 	strstrip(p->sn);
612 
613 	return 0;
614 }
615 
616 /* serial flash and firmware constants and flash config file constants */
617 enum {
618 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
619 
620 	/* flash command opcodes */
621 	SF_PROG_PAGE    = 2,          /* program page */
622 	SF_WR_DISABLE   = 4,          /* disable writes */
623 	SF_RD_STATUS    = 5,          /* read status register */
624 	SF_WR_ENABLE    = 6,          /* enable writes */
625 	SF_RD_DATA_FAST = 0xb,        /* read flash */
626 	SF_RD_ID        = 0x9f,       /* read ID */
627 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
628 };
629 
630 /**
631  *	sf1_read - read data from the serial flash
632  *	@adapter: the adapter
633  *	@byte_cnt: number of bytes to read
634  *	@cont: whether another operation will be chained
635  *	@lock: whether to lock SF for PL access only
636  *	@valp: where to store the read data
637  *
638  *	Reads up to 4 bytes of data from the serial flash.  The location of
639  *	the read needs to be specified prior to calling this by issuing the
640  *	appropriate commands to the serial flash.
641  */
642 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
643 		    int lock, u32 *valp)
644 {
645 	int ret;
646 
647 	if (!byte_cnt || byte_cnt > 4)
648 		return -EINVAL;
649 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
650 		return -EBUSY;
651 	t4_write_reg(adapter, A_SF_OP,
652 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
653 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
654 	if (!ret)
655 		*valp = t4_read_reg(adapter, A_SF_DATA);
656 	return ret;
657 }
658 
659 /**
660  *	sf1_write - write data to the serial flash
661  *	@adapter: the adapter
662  *	@byte_cnt: number of bytes to write
663  *	@cont: whether another operation will be chained
664  *	@lock: whether to lock SF for PL access only
665  *	@val: value to write
666  *
667  *	Writes up to 4 bytes of data to the serial flash.  The location of
668  *	the write needs to be specified prior to calling this by issuing the
669  *	appropriate commands to the serial flash.
670  */
671 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
672 		     int lock, u32 val)
673 {
674 	if (!byte_cnt || byte_cnt > 4)
675 		return -EINVAL;
676 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
677 		return -EBUSY;
678 	t4_write_reg(adapter, A_SF_DATA, val);
679 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
680 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
681 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
682 }
683 
684 /**
685  *	flash_wait_op - wait for a flash operation to complete
686  *	@adapter: the adapter
687  *	@attempts: max number of polls of the status register
688  *	@delay: delay between polls in ms
689  *
690  *	Wait for a flash operation to complete by polling the status register.
691  */
692 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
693 {
694 	int ret;
695 	u32 status;
696 
697 	while (1) {
698 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
699 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
700 			return ret;
701 		if (!(status & 1))
702 			return 0;
703 		if (--attempts == 0)
704 			return -EAGAIN;
705 		if (delay)
706 			msleep(delay);
707 	}
708 }
709 
710 /**
711  *	t4_read_flash - read words from serial flash
712  *	@adapter: the adapter
713  *	@addr: the start address for the read
714  *	@nwords: how many 32-bit words to read
715  *	@data: where to store the read data
716  *	@byte_oriented: whether to store data as bytes or as words
717  *
718  *	Read the specified number of 32-bit words from the serial flash.
719  *	If @byte_oriented is set the read data is stored as a byte array
720  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
721  *	natural endianess.
722  */
723 int t4_read_flash(struct adapter *adapter, unsigned int addr,
724 		  unsigned int nwords, u32 *data, int byte_oriented)
725 {
726 	int ret;
727 
728 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
729 		return -EINVAL;
730 
731 	addr = swab32(addr) | SF_RD_DATA_FAST;
732 
733 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
734 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
735 		return ret;
736 
737 	for ( ; nwords; nwords--, data++) {
738 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
739 		if (nwords == 1)
740 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
741 		if (ret)
742 			return ret;
743 		if (byte_oriented)
744 			*data = htonl(*data);
745 	}
746 	return 0;
747 }
748 
749 /**
750  *	t4_write_flash - write up to a page of data to the serial flash
751  *	@adapter: the adapter
752  *	@addr: the start address to write
753  *	@n: length of data to write in bytes
754  *	@data: the data to write
755  *	@byte_oriented: whether to store data as bytes or as words
756  *
757  *	Writes up to a page of data (256 bytes) to the serial flash starting
758  *	at the given address.  All the data must be written to the same page.
759  *	If @byte_oriented is set the write data is stored as byte stream
760  *	(i.e. matches what on disk), otherwise in big-endian.
761  */
762 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
763 			  unsigned int n, const u8 *data, int byte_oriented)
764 {
765 	int ret;
766 	u32 buf[SF_PAGE_SIZE / 4];
767 	unsigned int i, c, left, val, offset = addr & 0xff;
768 
769 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
770 		return -EINVAL;
771 
772 	val = swab32(addr) | SF_PROG_PAGE;
773 
774 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
775 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
776 		goto unlock;
777 
778 	for (left = n; left; left -= c) {
779 		c = min(left, 4U);
780 		for (val = 0, i = 0; i < c; ++i)
781 			val = (val << 8) + *data++;
782 
783 		if (!byte_oriented)
784 			val = htonl(val);
785 
786 		ret = sf1_write(adapter, c, c != left, 1, val);
787 		if (ret)
788 			goto unlock;
789 	}
790 	ret = flash_wait_op(adapter, 8, 1);
791 	if (ret)
792 		goto unlock;
793 
794 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
795 
796 	/* Read the page to verify the write succeeded */
797 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
798 			    byte_oriented);
799 	if (ret)
800 		return ret;
801 
802 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
803 		CH_ERR(adapter, "failed to correctly write the flash page "
804 		       "at %#x\n", addr);
805 		return -EIO;
806 	}
807 	return 0;
808 
809 unlock:
810 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
811 	return ret;
812 }
813 
814 /**
815  *	t4_get_fw_version - read the firmware version
816  *	@adapter: the adapter
817  *	@vers: where to place the version
818  *
819  *	Reads the FW version from flash.
820  */
821 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
822 {
823 	return t4_read_flash(adapter,
824 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
825 			     vers, 0);
826 }
827 
828 /**
829  *	t4_get_tp_version - read the TP microcode version
830  *	@adapter: the adapter
831  *	@vers: where to place the version
832  *
833  *	Reads the TP microcode version from flash.
834  */
835 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
836 {
837 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
838 							      tp_microcode_ver),
839 			     1, vers, 0);
840 }
841 
842 /**
843  *	t4_check_fw_version - check if the FW is compatible with this driver
844  *	@adapter: the adapter
845  *
846  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
847  *	if there's exact match, a negative error if the version could not be
848  *	read or there's a major version mismatch, and a positive value if the
849  *	expected major version is found but there's a minor version mismatch.
850  */
851 int t4_check_fw_version(struct adapter *adapter)
852 {
853 	int ret, major, minor, micro;
854 
855 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
856 	if (!ret)
857 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
858 	if (ret)
859 		return ret;
860 
861 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
862 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
863 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
864 
865 	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
866 		CH_ERR(adapter, "card FW has major version %u, driver wants "
867 		       "%u\n", major, FW_VERSION_MAJOR);
868 		return -EINVAL;
869 	}
870 
871 	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
872 		return 0;                                   /* perfect match */
873 
874 	/* Minor/micro version mismatch.  Report it but often it's OK. */
875 	return 1;
876 }
877 
878 /**
879  *	t4_flash_erase_sectors - erase a range of flash sectors
880  *	@adapter: the adapter
881  *	@start: the first sector to erase
882  *	@end: the last sector to erase
883  *
884  *	Erases the sectors in the given inclusive range.
885  */
886 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
887 {
888 	int ret = 0;
889 
890 	while (start <= end) {
891 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
892 		    (ret = sf1_write(adapter, 4, 0, 1,
893 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
894 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
895 			CH_ERR(adapter, "erase of flash sector %d failed, "
896 			       "error %d\n", start, ret);
897 			break;
898 		}
899 		start++;
900 	}
901 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
902 	return ret;
903 }
904 
905 /**
906  *	t4_flash_cfg_addr - return the address of the flash configuration file
907  *	@adapter: the adapter
908  *
909  *	Return the address within the flash where the Firmware Configuration
910  *	File is stored.
911  */
912 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
913 {
914 	if (adapter->params.sf_size == 0x100000)
915 		return FLASH_FPGA_CFG_START;
916 	else
917 		return FLASH_CFG_START;
918 }
919 
920 /**
921  *	t4_load_cfg - download config file
922  *	@adap: the adapter
923  *	@cfg_data: the cfg text file to write
924  *	@size: text file size
925  *
926  *	Write the supplied config text file to the card's serial flash.
927  */
928 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
929 {
930 	int ret, i, n;
931 	unsigned int addr;
932 	unsigned int flash_cfg_start_sec;
933 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
934 
935 	addr = t4_flash_cfg_addr(adap);
936 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
937 
938 	if (size > FLASH_CFG_MAX_SIZE) {
939 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
940 		       FLASH_CFG_MAX_SIZE);
941 		return -EFBIG;
942 	}
943 
944 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
945 			 sf_sec_size);
946 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
947 				     flash_cfg_start_sec + i - 1);
948 	/*
949 	 * If size == 0 then we're simply erasing the FLASH sectors associated
950 	 * with the on-adapter Firmware Configuration File.
951 	 */
952 	if (ret || size == 0)
953 		goto out;
954 
955         /* this will write to the flash up to SF_PAGE_SIZE at a time */
956 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
957 		if ( (size - i) <  SF_PAGE_SIZE)
958 			n = size - i;
959 		else
960 			n = SF_PAGE_SIZE;
961 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
962 		if (ret)
963 			goto out;
964 
965 		addr += SF_PAGE_SIZE;
966 		cfg_data += SF_PAGE_SIZE;
967 	}
968 
969 out:
970 	if (ret)
971 		CH_ERR(adap, "config file %s failed %d\n",
972 		       (size == 0 ? "clear" : "download"), ret);
973 	return ret;
974 }
975 
976 
977 /**
978  *	t4_load_fw - download firmware
979  *	@adap: the adapter
980  *	@fw_data: the firmware image to write
981  *	@size: image size
982  *
983  *	Write the supplied firmware image to the card's serial flash.
984  */
985 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
986 {
987 	u32 csum;
988 	int ret, addr;
989 	unsigned int i;
990 	u8 first_page[SF_PAGE_SIZE];
991 	const u32 *p = (const u32 *)fw_data;
992 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
993 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
994 
995 	if (!size) {
996 		CH_ERR(adap, "FW image has no data\n");
997 		return -EINVAL;
998 	}
999 	if (size & 511) {
1000 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1001 		return -EINVAL;
1002 	}
1003 	if (ntohs(hdr->len512) * 512 != size) {
1004 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1005 		return -EINVAL;
1006 	}
1007 	if (size > FLASH_FW_MAX_SIZE) {
1008 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1009 		       FLASH_FW_MAX_SIZE);
1010 		return -EFBIG;
1011 	}
1012 
1013 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1014 		csum += ntohl(p[i]);
1015 
1016 	if (csum != 0xffffffff) {
1017 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1018 		       csum);
1019 		return -EINVAL;
1020 	}
1021 
1022 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1023 	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1024 	    FLASH_FW_START_SEC + i - 1);
1025 	if (ret)
1026 		goto out;
1027 
1028 	/*
1029 	 * We write the correct version at the end so the driver can see a bad
1030 	 * version if the FW write fails.  Start by writing a copy of the
1031 	 * first page with a bad version.
1032 	 */
1033 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1034 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1035 	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1036 	if (ret)
1037 		goto out;
1038 
1039 	addr = FLASH_FW_START;
1040 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1041 		addr += SF_PAGE_SIZE;
1042 		fw_data += SF_PAGE_SIZE;
1043 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1044 		if (ret)
1045 			goto out;
1046 	}
1047 
1048 	ret = t4_write_flash(adap,
1049 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1050 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1051 out:
1052 	if (ret)
1053 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1054 	return ret;
1055 }
1056 
1057 /* BIOS boot header */
1058 typedef struct boot_header_s {
1059 	u8	signature[2];	/* signature */
1060 	u8	length;		/* image length (include header) */
1061 	u8	offset[4];	/* initialization vector */
1062 	u8	reserved[19];	/* reserved */
1063 	u8	exheader[2];	/* offset to expansion header */
1064 } boot_header_t;
1065 
1066 enum {
1067 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1068 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1069 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1070 	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
1071 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment  */
1072 };
1073 
1074 /*
1075  *	t4_load_boot - download boot flash
1076  *	@adapter: the adapter
1077  *	@boot_data: the boot image to write
1078  *	@size: image size
1079  *
1080  *	Write the supplied boot image to the card's serial flash.
1081  *	The boot image has the following sections: a 28-byte header and the
1082  *	boot image.
1083  */
1084 int t4_load_boot(struct adapter *adap, const u8 *boot_data,
1085 		 unsigned int boot_addr, unsigned int size)
1086 {
1087 	int ret, addr;
1088 	unsigned int i;
1089 	unsigned int boot_sector = boot_addr * 1024;
1090 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1091 
1092 	/*
1093 	 * Perform some primitive sanity testing to avoid accidentally
1094 	 * writing garbage over the boot sectors.  We ought to check for
1095 	 * more but it's not worth it for now ...
1096 	 */
1097 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1098 		CH_ERR(adap, "boot image too small/large\n");
1099 		return -EFBIG;
1100 	}
1101 
1102 	/*
1103 	 * Make sure the boot image does not encroach on the firmware region
1104 	 */
1105 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1106 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1107 		return -EFBIG;
1108 	}
1109 
1110 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1111 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1112 				     (boot_sector >> 16) + i - 1);
1113 	if (ret)
1114 		goto out;
1115 
1116 	/*
1117 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1118 	 * we finish copying the rest of the boot image. This will ensure
1119 	 * that the BIOS boot header will only be written if the boot image
1120 	 * was written in full.
1121 	 */
1122 	addr = boot_sector;
1123 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1124 		addr += SF_PAGE_SIZE;
1125 		boot_data += SF_PAGE_SIZE;
1126 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1127 		if (ret)
1128 			goto out;
1129 	}
1130 
1131 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1132 
1133 out:
1134 	if (ret)
1135 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1136 	return ret;
1137 }
1138 
1139 /**
1140  *	t4_read_cimq_cfg - read CIM queue configuration
1141  *	@adap: the adapter
1142  *	@base: holds the queue base addresses in bytes
1143  *	@size: holds the queue sizes in bytes
1144  *	@thres: holds the queue full thresholds in bytes
1145  *
1146  *	Returns the current configuration of the CIM queues, starting with
1147  *	the IBQs, then the OBQs.
1148  */
1149 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1150 {
1151 	unsigned int i, v;
1152 
1153 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1154 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1155 			     V_QUENUMSELECT(i));
1156 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1157 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1158 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1159 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1160 	}
1161 	for (i = 0; i < CIM_NUM_OBQ; i++) {
1162 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1163 			     V_QUENUMSELECT(i));
1164 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1165 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1166 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1167 	}
1168 }
1169 
1170 /**
1171  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1172  *	@adap: the adapter
1173  *	@qid: the queue index
1174  *	@data: where to store the queue contents
1175  *	@n: capacity of @data in 32-bit words
1176  *
1177  *	Reads the contents of the selected CIM queue starting at address 0 up
1178  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1179  *	error and the number of 32-bit words actually read on success.
1180  */
1181 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1182 {
1183 	int i, err;
1184 	unsigned int addr;
1185 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1186 
1187 	if (qid > 5 || (n & 3))
1188 		return -EINVAL;
1189 
1190 	addr = qid * nwords;
1191 	if (n > nwords)
1192 		n = nwords;
1193 
1194 	for (i = 0; i < n; i++, addr++) {
1195 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1196 			     F_IBQDBGEN);
1197 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1198 				      2, 1);
1199 		if (err)
1200 			return err;
1201 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1202 	}
1203 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1204 	return i;
1205 }
1206 
1207 /**
1208  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1209  *	@adap: the adapter
1210  *	@qid: the queue index
1211  *	@data: where to store the queue contents
1212  *	@n: capacity of @data in 32-bit words
1213  *
1214  *	Reads the contents of the selected CIM queue starting at address 0 up
1215  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1216  *	error and the number of 32-bit words actually read on success.
1217  */
1218 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1219 {
1220 	int i, err;
1221 	unsigned int addr, v, nwords;
1222 
1223 	if (qid > 5 || (n & 3))
1224 		return -EINVAL;
1225 
1226 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1227 		     V_QUENUMSELECT(qid));
1228 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1229 
1230 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1231 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1232 	if (n > nwords)
1233 		n = nwords;
1234 
1235 	for (i = 0; i < n; i++, addr++) {
1236 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1237 			     F_OBQDBGEN);
1238 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1239 				      2, 1);
1240 		if (err)
1241 			return err;
1242 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1243 	}
1244 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1245 	return i;
1246 }
1247 
1248 enum {
1249 	CIM_QCTL_BASE     = 0,
1250 	CIM_CTL_BASE      = 0x2000,
1251 	CIM_PBT_ADDR_BASE = 0x2800,
1252 	CIM_PBT_LRF_BASE  = 0x3000,
1253 	CIM_PBT_DATA_BASE = 0x3800
1254 };
1255 
1256 /**
1257  *	t4_cim_read - read a block from CIM internal address space
1258  *	@adap: the adapter
1259  *	@addr: the start address within the CIM address space
1260  *	@n: number of words to read
1261  *	@valp: where to store the result
1262  *
1263  *	Reads a block of 4-byte words from the CIM intenal address space.
1264  */
1265 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1266 		unsigned int *valp)
1267 {
1268 	int ret = 0;
1269 
1270 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1271 		return -EBUSY;
1272 
1273 	for ( ; !ret && n--; addr += 4) {
1274 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1275 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1276 				      0, 5, 2);
1277 		if (!ret)
1278 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1279 	}
1280 	return ret;
1281 }
1282 
1283 /**
1284  *	t4_cim_write - write a block into CIM internal address space
1285  *	@adap: the adapter
1286  *	@addr: the start address within the CIM address space
1287  *	@n: number of words to write
1288  *	@valp: set of values to write
1289  *
1290  *	Writes a block of 4-byte words into the CIM intenal address space.
1291  */
1292 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1293 		 const unsigned int *valp)
1294 {
1295 	int ret = 0;
1296 
1297 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1298 		return -EBUSY;
1299 
1300 	for ( ; !ret && n--; addr += 4) {
1301 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1302 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1303 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1304 				      0, 5, 2);
1305 	}
1306 	return ret;
1307 }
1308 
1309 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1310 {
1311 	return t4_cim_write(adap, addr, 1, &val);
1312 }
1313 
1314 /**
1315  *	t4_cim_ctl_read - read a block from CIM control region
1316  *	@adap: the adapter
1317  *	@addr: the start address within the CIM control region
1318  *	@n: number of words to read
1319  *	@valp: where to store the result
1320  *
1321  *	Reads a block of 4-byte words from the CIM control region.
1322  */
1323 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1324 		    unsigned int *valp)
1325 {
1326 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1327 }
1328 
1329 /**
1330  *	t4_cim_read_la - read CIM LA capture buffer
1331  *	@adap: the adapter
1332  *	@la_buf: where to store the LA data
1333  *	@wrptr: the HW write pointer within the capture buffer
1334  *
1335  *	Reads the contents of the CIM LA buffer with the most recent entry at
1336  *	the end	of the returned data and with the entry at @wrptr first.
1337  *	We try to leave the LA in the running state we find it in.
1338  */
1339 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1340 {
1341 	int i, ret;
1342 	unsigned int cfg, val, idx;
1343 
1344 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1345 	if (ret)
1346 		return ret;
1347 
1348 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1349 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1350 		if (ret)
1351 			return ret;
1352 	}
1353 
1354 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1355 	if (ret)
1356 		goto restart;
1357 
1358 	idx = G_UPDBGLAWRPTR(val);
1359 	if (wrptr)
1360 		*wrptr = idx;
1361 
1362 	for (i = 0; i < adap->params.cim_la_size; i++) {
1363 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1364 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1365 		if (ret)
1366 			break;
1367 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1368 		if (ret)
1369 			break;
1370 		if (val & F_UPDBGLARDEN) {
1371 			ret = -ETIMEDOUT;
1372 			break;
1373 		}
1374 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1375 		if (ret)
1376 			break;
1377 		idx = (idx + 1) & M_UPDBGLARDPTR;
1378 	}
1379 restart:
1380 	if (cfg & F_UPDBGLAEN) {
1381 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1382 				      cfg & ~F_UPDBGLARDEN);
1383 		if (!ret)
1384 			ret = r;
1385 	}
1386 	return ret;
1387 }
1388 
1389 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1390 			unsigned int *pif_req_wrptr,
1391 			unsigned int *pif_rsp_wrptr)
1392 {
1393 	int i, j;
1394 	u32 cfg, val, req, rsp;
1395 
1396 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1397 	if (cfg & F_LADBGEN)
1398 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1399 
1400 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1401 	req = G_POLADBGWRPTR(val);
1402 	rsp = G_PILADBGWRPTR(val);
1403 	if (pif_req_wrptr)
1404 		*pif_req_wrptr = req;
1405 	if (pif_rsp_wrptr)
1406 		*pif_rsp_wrptr = rsp;
1407 
1408 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1409 		for (j = 0; j < 6; j++) {
1410 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1411 				     V_PILADBGRDPTR(rsp));
1412 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1413 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1414 			req++;
1415 			rsp++;
1416 		}
1417 		req = (req + 2) & M_POLADBGRDPTR;
1418 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1419 	}
1420 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1421 }
1422 
1423 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1424 {
1425 	u32 cfg;
1426 	int i, j, idx;
1427 
1428 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1429 	if (cfg & F_LADBGEN)
1430 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1431 
1432 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1433 		for (j = 0; j < 5; j++) {
1434 			idx = 8 * i + j;
1435 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1436 				     V_PILADBGRDPTR(idx));
1437 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1438 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1439 		}
1440 	}
1441 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1442 }
1443 
1444 /**
1445  *	t4_tp_read_la - read TP LA capture buffer
1446  *	@adap: the adapter
1447  *	@la_buf: where to store the LA data
1448  *	@wrptr: the HW write pointer within the capture buffer
1449  *
1450  *	Reads the contents of the TP LA buffer with the most recent entry at
1451  *	the end	of the returned data and with the entry at @wrptr first.
1452  *	We leave the LA in the running state we find it in.
1453  */
1454 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1455 {
1456 	bool last_incomplete;
1457 	unsigned int i, cfg, val, idx;
1458 
1459 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1460 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1461 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1462 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1463 
1464 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1465 	idx = G_DBGLAWPTR(val);
1466 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1467 	if (last_incomplete)
1468 		idx = (idx + 1) & M_DBGLARPTR;
1469 	if (wrptr)
1470 		*wrptr = idx;
1471 
1472 	val &= 0xffff;
1473 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1474 	val |= adap->params.tp.la_mask;
1475 
1476 	for (i = 0; i < TPLA_SIZE; i++) {
1477 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1478 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1479 		idx = (idx + 1) & M_DBGLARPTR;
1480 	}
1481 
1482 	/* Wipe out last entry if it isn't valid */
1483 	if (last_incomplete)
1484 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1485 
1486 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1487 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1488 			     cfg | adap->params.tp.la_mask);
1489 }
1490 
1491 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1492 {
1493 	unsigned int i, j;
1494 
1495 	for (i = 0; i < 8; i++) {
1496 		u32 *p = la_buf + i;
1497 
1498 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1499 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1500 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1501 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1502 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1503 	}
1504 }
1505 
1506 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1507 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1508 
1509 /**
1510  *	t4_link_start - apply link configuration to MAC/PHY
1511  *	@phy: the PHY to setup
1512  *	@mac: the MAC to setup
1513  *	@lc: the requested link configuration
1514  *
1515  *	Set up a port's MAC and PHY according to a desired link configuration.
1516  *	- If the PHY can auto-negotiate first decide what to advertise, then
1517  *	  enable/disable auto-negotiation as desired, and reset.
1518  *	- If the PHY does not auto-negotiate just reset it.
1519  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1520  *	  otherwise do it later based on the outcome of auto-negotiation.
1521  */
1522 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1523 		  struct link_config *lc)
1524 {
1525 	struct fw_port_cmd c;
1526 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1527 
1528 	lc->link_ok = 0;
1529 	if (lc->requested_fc & PAUSE_RX)
1530 		fc |= FW_PORT_CAP_FC_RX;
1531 	if (lc->requested_fc & PAUSE_TX)
1532 		fc |= FW_PORT_CAP_FC_TX;
1533 
1534 	memset(&c, 0, sizeof(c));
1535 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1536 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1537 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1538 				  FW_LEN16(c));
1539 
1540 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1541 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1542 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1543 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1544 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1545 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1546 	} else
1547 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1548 
1549 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1550 }
1551 
1552 /**
1553  *	t4_restart_aneg - restart autonegotiation
1554  *	@adap: the adapter
1555  *	@mbox: mbox to use for the FW command
1556  *	@port: the port id
1557  *
1558  *	Restarts autonegotiation for the selected port.
1559  */
1560 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1561 {
1562 	struct fw_port_cmd c;
1563 
1564 	memset(&c, 0, sizeof(c));
1565 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1566 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1567 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1568 				  FW_LEN16(c));
1569 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1570 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1571 }
1572 
1573 struct intr_info {
1574 	unsigned int mask;       /* bits to check in interrupt status */
1575 	const char *msg;         /* message to print or NULL */
1576 	short stat_idx;          /* stat counter to increment or -1 */
1577 	unsigned short fatal;    /* whether the condition reported is fatal */
1578 };
1579 
1580 /**
1581  *	t4_handle_intr_status - table driven interrupt handler
1582  *	@adapter: the adapter that generated the interrupt
1583  *	@reg: the interrupt status register to process
1584  *	@acts: table of interrupt actions
1585  *
1586  *	A table driven interrupt handler that applies a set of masks to an
1587  *	interrupt status word and performs the corresponding actions if the
1588  *	interrupts described by the mask have occured.  The actions include
1589  *	optionally emitting a warning or alert message.  The table is terminated
1590  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1591  *	conditions.
1592  */
1593 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1594 				 const struct intr_info *acts)
1595 {
1596 	int fatal = 0;
1597 	unsigned int mask = 0;
1598 	unsigned int status = t4_read_reg(adapter, reg);
1599 
1600 	for ( ; acts->mask; ++acts) {
1601 		if (!(status & acts->mask))
1602 			continue;
1603 		if (acts->fatal) {
1604 			fatal++;
1605 			CH_ALERT(adapter, "%s (0x%x)\n",
1606 				 acts->msg, status & acts->mask);
1607 		} else if (acts->msg)
1608 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1609 					  acts->msg, status & acts->mask);
1610 		mask |= acts->mask;
1611 	}
1612 	status &= mask;
1613 	if (status)                           /* clear processed interrupts */
1614 		t4_write_reg(adapter, reg, status);
1615 	return fatal;
1616 }
1617 
1618 /*
1619  * Interrupt handler for the PCIE module.
1620  */
1621 static void pcie_intr_handler(struct adapter *adapter)
1622 {
1623 	static struct intr_info sysbus_intr_info[] = {
1624 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1625 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1626 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1627 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1628 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1629 		{ 0 }
1630 	};
1631 	static struct intr_info pcie_port_intr_info[] = {
1632 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1633 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1634 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1635 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1636 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1637 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1638 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1639 		{ F_RDPE, "Rx data parity error", -1, 1 },
1640 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1641 		{ 0 }
1642 	};
1643 	static struct intr_info pcie_intr_info[] = {
1644 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1645 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1646 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1647 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1648 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1649 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1650 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1651 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1652 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1653 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1654 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1655 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1656 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1657 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1658 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1659 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1660 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1661 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1662 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1663 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1664 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1665 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1666 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1667 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1668 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1669 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1670 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1671 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
1672 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
1673 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1674 		  0 },
1675 		{ 0 }
1676 	};
1677 
1678 	int fat;
1679 
1680 	fat = t4_handle_intr_status(adapter,
1681 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1682 				    sysbus_intr_info) +
1683 	      t4_handle_intr_status(adapter,
1684 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1685 				    pcie_port_intr_info) +
1686 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1687 	if (fat)
1688 		t4_fatal_err(adapter);
1689 }
1690 
1691 /*
1692  * TP interrupt handler.
1693  */
1694 static void tp_intr_handler(struct adapter *adapter)
1695 {
1696 	static struct intr_info tp_intr_info[] = {
1697 		{ 0x3fffffff, "TP parity error", -1, 1 },
1698 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1699 		{ 0 }
1700 	};
1701 
1702 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1703 		t4_fatal_err(adapter);
1704 }
1705 
1706 /*
1707  * SGE interrupt handler.
1708  */
1709 static void sge_intr_handler(struct adapter *adapter)
1710 {
1711 	u64 v;
1712 	u32 err;
1713 
1714 	static struct intr_info sge_intr_info[] = {
1715 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
1716 		  "SGE received CPL exceeding IQE size", -1, 1 },
1717 		{ F_ERR_INVALID_CIDX_INC,
1718 		  "SGE GTS CIDX increment too large", -1, 0 },
1719 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1720 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1721 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1722 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1723 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1724 		  0 },
1725 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1726 		  0 },
1727 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1728 		  0 },
1729 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1730 		  0 },
1731 		{ F_ERR_ING_CTXT_PRIO,
1732 		  "SGE too many priority ingress contexts", -1, 0 },
1733 		{ F_ERR_EGR_CTXT_PRIO,
1734 		  "SGE too many priority egress contexts", -1, 0 },
1735 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1736 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1737 		{ 0 }
1738 	};
1739 
1740 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1741 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1742 	if (v) {
1743 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
1744 			 (unsigned long long)v);
1745 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1746 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1747 	}
1748 
1749 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1750 
1751 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1752 	if (err & F_ERROR_QID_VALID) {
1753 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1754 		if (err & F_UNCAPTURED_ERROR)
1755 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
1756 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
1757 			     F_UNCAPTURED_ERROR);
1758 	}
1759 
1760 	if (v != 0)
1761 		t4_fatal_err(adapter);
1762 }
1763 
1764 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1765 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1766 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1767 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1768 
1769 /*
1770  * CIM interrupt handler.
1771  */
1772 static void cim_intr_handler(struct adapter *adapter)
1773 {
1774 	static struct intr_info cim_intr_info[] = {
1775 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1776 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1777 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1778 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1779 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1780 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1781 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1782 		{ 0 }
1783 	};
1784 	static struct intr_info cim_upintr_info[] = {
1785 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1786 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1787 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
1788 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
1789 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1790 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1791 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1792 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1793 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1794 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1795 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1796 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1797 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1798 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1799 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1800 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1801 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1802 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1803 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1804 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1805 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1806 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1807 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1808 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1809 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1810 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1811 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1812 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1813 		{ 0 }
1814 	};
1815 
1816 	int fat;
1817 
1818 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
1819 				    cim_intr_info) +
1820 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
1821 				    cim_upintr_info);
1822 	if (fat)
1823 		t4_fatal_err(adapter);
1824 }
1825 
1826 /*
1827  * ULP RX interrupt handler.
1828  */
1829 static void ulprx_intr_handler(struct adapter *adapter)
1830 {
1831 	static struct intr_info ulprx_intr_info[] = {
1832 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
1833 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
1834 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1835 		{ 0 }
1836 	};
1837 
1838 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
1839 		t4_fatal_err(adapter);
1840 }
1841 
1842 /*
1843  * ULP TX interrupt handler.
1844  */
1845 static void ulptx_intr_handler(struct adapter *adapter)
1846 {
1847 	static struct intr_info ulptx_intr_info[] = {
1848 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1849 		  0 },
1850 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1851 		  0 },
1852 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1853 		  0 },
1854 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1855 		  0 },
1856 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1857 		{ 0 }
1858 	};
1859 
1860 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
1861 		t4_fatal_err(adapter);
1862 }
1863 
1864 /*
1865  * PM TX interrupt handler.
1866  */
1867 static void pmtx_intr_handler(struct adapter *adapter)
1868 {
1869 	static struct intr_info pmtx_intr_info[] = {
1870 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1871 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1872 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1873 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1874 		{ 0xffffff0, "PMTX framing error", -1, 1 },
1875 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1876 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
1877 		  1 },
1878 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1879 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1880 		{ 0 }
1881 	};
1882 
1883 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
1884 		t4_fatal_err(adapter);
1885 }
1886 
1887 /*
1888  * PM RX interrupt handler.
1889  */
1890 static void pmrx_intr_handler(struct adapter *adapter)
1891 {
1892 	static struct intr_info pmrx_intr_info[] = {
1893 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1894 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
1895 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1896 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
1897 		  1 },
1898 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1899 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1900 		{ 0 }
1901 	};
1902 
1903 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
1904 		t4_fatal_err(adapter);
1905 }
1906 
1907 /*
1908  * CPL switch interrupt handler.
1909  */
1910 static void cplsw_intr_handler(struct adapter *adapter)
1911 {
1912 	static struct intr_info cplsw_intr_info[] = {
1913 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1914 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1915 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1916 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1917 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1918 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1919 		{ 0 }
1920 	};
1921 
1922 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
1923 		t4_fatal_err(adapter);
1924 }
1925 
1926 /*
1927  * LE interrupt handler.
1928  */
1929 static void le_intr_handler(struct adapter *adap)
1930 {
1931 	static struct intr_info le_intr_info[] = {
1932 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
1933 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
1934 		{ F_PARITYERR, "LE parity error", -1, 1 },
1935 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
1936 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
1937 		{ 0 }
1938 	};
1939 
1940 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
1941 		t4_fatal_err(adap);
1942 }
1943 
1944 /*
1945  * MPS interrupt handler.
1946  */
1947 static void mps_intr_handler(struct adapter *adapter)
1948 {
1949 	static struct intr_info mps_rx_intr_info[] = {
1950 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1951 		{ 0 }
1952 	};
1953 	static struct intr_info mps_tx_intr_info[] = {
1954 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
1955 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1956 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
1957 		  -1, 1 },
1958 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
1959 		  -1, 1 },
1960 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
1961 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1962 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
1963 		{ 0 }
1964 	};
1965 	static struct intr_info mps_trc_intr_info[] = {
1966 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
1967 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
1968 		  1 },
1969 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
1970 		{ 0 }
1971 	};
1972 	static struct intr_info mps_stat_sram_intr_info[] = {
1973 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1974 		{ 0 }
1975 	};
1976 	static struct intr_info mps_stat_tx_intr_info[] = {
1977 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1978 		{ 0 }
1979 	};
1980 	static struct intr_info mps_stat_rx_intr_info[] = {
1981 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1982 		{ 0 }
1983 	};
1984 	static struct intr_info mps_cls_intr_info[] = {
1985 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1986 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1987 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1988 		{ 0 }
1989 	};
1990 
1991 	int fat;
1992 
1993 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
1994 				    mps_rx_intr_info) +
1995 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
1996 				    mps_tx_intr_info) +
1997 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
1998 				    mps_trc_intr_info) +
1999 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2000 				    mps_stat_sram_intr_info) +
2001 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2002 				    mps_stat_tx_intr_info) +
2003 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2004 				    mps_stat_rx_intr_info) +
2005 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2006 				    mps_cls_intr_info);
2007 
2008 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2009 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2010 	if (fat)
2011 		t4_fatal_err(adapter);
2012 }
2013 
2014 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2015 
2016 /*
2017  * EDC/MC interrupt handler.
2018  */
2019 static void mem_intr_handler(struct adapter *adapter, int idx)
2020 {
2021 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2022 
2023 	unsigned int addr, cnt_addr, v;
2024 
2025 	if (idx <= MEM_EDC1) {
2026 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2027 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2028 	} else {
2029 		addr = A_MC_INT_CAUSE;
2030 		cnt_addr = A_MC_ECC_STATUS;
2031 	}
2032 
2033 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2034 	if (v & F_PERR_INT_CAUSE)
2035 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2036 	if (v & F_ECC_CE_INT_CAUSE) {
2037 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2038 
2039 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2040 		CH_WARN_RATELIMIT(adapter,
2041 				  "%u %s correctable ECC data error%s\n",
2042 				  cnt, name[idx], cnt > 1 ? "s" : "");
2043 	}
2044 	if (v & F_ECC_UE_INT_CAUSE)
2045 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2046 			 name[idx]);
2047 
2048 	t4_write_reg(adapter, addr, v);
2049 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2050 		t4_fatal_err(adapter);
2051 }
2052 
2053 /*
2054  * MA interrupt handler.
2055  */
2056 static void ma_intr_handler(struct adapter *adapter)
2057 {
2058 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2059 
2060 	if (status & F_MEM_PERR_INT_CAUSE)
2061 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2062 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2063 	if (status & F_MEM_WRAP_INT_CAUSE) {
2064 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2065 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2066 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2067 			 G_MEM_WRAP_ADDRESS(v) << 4);
2068 	}
2069 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2070 	t4_fatal_err(adapter);
2071 }
2072 
2073 /*
2074  * SMB interrupt handler.
2075  */
2076 static void smb_intr_handler(struct adapter *adap)
2077 {
2078 	static struct intr_info smb_intr_info[] = {
2079 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2080 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2081 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2082 		{ 0 }
2083 	};
2084 
2085 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2086 		t4_fatal_err(adap);
2087 }
2088 
2089 /*
2090  * NC-SI interrupt handler.
2091  */
2092 static void ncsi_intr_handler(struct adapter *adap)
2093 {
2094 	static struct intr_info ncsi_intr_info[] = {
2095 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2096 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2097 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2098 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2099 		{ 0 }
2100 	};
2101 
2102 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2103 		t4_fatal_err(adap);
2104 }
2105 
2106 /*
2107  * XGMAC interrupt handler.
2108  */
2109 static void xgmac_intr_handler(struct adapter *adap, int port)
2110 {
2111 	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2112 
2113 	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2114 	if (!v)
2115 		return;
2116 
2117 	if (v & F_TXFIFO_PRTY_ERR)
2118 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2119 	if (v & F_RXFIFO_PRTY_ERR)
2120 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2121 	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2122 	t4_fatal_err(adap);
2123 }
2124 
2125 /*
2126  * PL interrupt handler.
2127  */
2128 static void pl_intr_handler(struct adapter *adap)
2129 {
2130 	static struct intr_info pl_intr_info[] = {
2131 		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
2132 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2133 		{ 0 }
2134 	};
2135 
2136 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2137 		t4_fatal_err(adap);
2138 }
2139 
2140 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2141 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2142 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2143 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2144 
2145 /**
2146  *	t4_slow_intr_handler - control path interrupt handler
2147  *	@adapter: the adapter
2148  *
2149  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2150  *	The designation 'slow' is because it involves register reads, while
2151  *	data interrupts typically don't involve any MMIOs.
2152  */
2153 int t4_slow_intr_handler(struct adapter *adapter)
2154 {
2155 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2156 
2157 	if (!(cause & GLBL_INTR_MASK))
2158 		return 0;
2159 	if (cause & F_CIM)
2160 		cim_intr_handler(adapter);
2161 	if (cause & F_MPS)
2162 		mps_intr_handler(adapter);
2163 	if (cause & F_NCSI)
2164 		ncsi_intr_handler(adapter);
2165 	if (cause & F_PL)
2166 		pl_intr_handler(adapter);
2167 	if (cause & F_SMB)
2168 		smb_intr_handler(adapter);
2169 	if (cause & F_XGMAC0)
2170 		xgmac_intr_handler(adapter, 0);
2171 	if (cause & F_XGMAC1)
2172 		xgmac_intr_handler(adapter, 1);
2173 	if (cause & F_XGMAC_KR0)
2174 		xgmac_intr_handler(adapter, 2);
2175 	if (cause & F_XGMAC_KR1)
2176 		xgmac_intr_handler(adapter, 3);
2177 	if (cause & F_PCIE)
2178 		pcie_intr_handler(adapter);
2179 	if (cause & F_MC)
2180 		mem_intr_handler(adapter, MEM_MC);
2181 	if (cause & F_EDC0)
2182 		mem_intr_handler(adapter, MEM_EDC0);
2183 	if (cause & F_EDC1)
2184 		mem_intr_handler(adapter, MEM_EDC1);
2185 	if (cause & F_LE)
2186 		le_intr_handler(adapter);
2187 	if (cause & F_TP)
2188 		tp_intr_handler(adapter);
2189 	if (cause & F_MA)
2190 		ma_intr_handler(adapter);
2191 	if (cause & F_PM_TX)
2192 		pmtx_intr_handler(adapter);
2193 	if (cause & F_PM_RX)
2194 		pmrx_intr_handler(adapter);
2195 	if (cause & F_ULP_RX)
2196 		ulprx_intr_handler(adapter);
2197 	if (cause & F_CPL_SWITCH)
2198 		cplsw_intr_handler(adapter);
2199 	if (cause & F_SGE)
2200 		sge_intr_handler(adapter);
2201 	if (cause & F_ULP_TX)
2202 		ulptx_intr_handler(adapter);
2203 
2204 	/* Clear the interrupts just processed for which we are the master. */
2205 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2206 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2207 	return 1;
2208 }
2209 
2210 /**
2211  *	t4_intr_enable - enable interrupts
2212  *	@adapter: the adapter whose interrupts should be enabled
2213  *
2214  *	Enable PF-specific interrupts for the calling function and the top-level
2215  *	interrupt concentrator for global interrupts.  Interrupts are already
2216  *	enabled at each module,	here we just enable the roots of the interrupt
2217  *	hierarchies.
2218  *
2219  *	Note: this function should be called only when the driver manages
2220  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2221  *	function at a time should be doing this.
2222  */
2223 void t4_intr_enable(struct adapter *adapter)
2224 {
2225 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2226 
2227 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2228 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2229 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2230 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2231 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2232 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2233 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2234 		     F_EGRESS_SIZE_ERR);
2235 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2236 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2237 }
2238 
2239 /**
2240  *	t4_intr_disable - disable interrupts
2241  *	@adapter: the adapter whose interrupts should be disabled
2242  *
2243  *	Disable interrupts.  We only disable the top-level interrupt
2244  *	concentrators.  The caller must be a PCI function managing global
2245  *	interrupts.
2246  */
2247 void t4_intr_disable(struct adapter *adapter)
2248 {
2249 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2250 
2251 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2252 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2253 }
2254 
2255 /**
2256  *	t4_intr_clear - clear all interrupts
2257  *	@adapter: the adapter whose interrupts should be cleared
2258  *
2259  *	Clears all interrupts.  The caller must be a PCI function managing
2260  *	global interrupts.
2261  */
2262 void t4_intr_clear(struct adapter *adapter)
2263 {
2264 	static const unsigned int cause_reg[] = {
2265 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2266 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2267 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2268 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2269 		A_MC_INT_CAUSE,
2270 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2271 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2272 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2273 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2274 		A_TP_INT_CAUSE,
2275 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2276 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2277 		A_MPS_RX_PERR_INT_CAUSE,
2278 		A_CPL_INTR_CAUSE,
2279 		MYPF_REG(A_PL_PF_INT_CAUSE),
2280 		A_PL_PL_INT_CAUSE,
2281 		A_LE_DB_INT_CAUSE,
2282 	};
2283 
2284 	unsigned int i;
2285 
2286 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2287 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2288 
2289 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2290 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2291 }
2292 
2293 /**
2294  *	hash_mac_addr - return the hash value of a MAC address
2295  *	@addr: the 48-bit Ethernet MAC address
2296  *
2297  *	Hashes a MAC address according to the hash function used by HW inexact
2298  *	(hash) address matching.
2299  */
2300 static int hash_mac_addr(const u8 *addr)
2301 {
2302 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2303 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2304 	a ^= b;
2305 	a ^= (a >> 12);
2306 	a ^= (a >> 6);
2307 	return a & 0x3f;
2308 }
2309 
2310 /**
2311  *	t4_config_rss_range - configure a portion of the RSS mapping table
2312  *	@adapter: the adapter
2313  *	@mbox: mbox to use for the FW command
2314  *	@viid: virtual interface whose RSS subtable is to be written
2315  *	@start: start entry in the table to write
2316  *	@n: how many table entries to write
2317  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2318  *	@nrspq: number of values in @rspq
2319  *
2320  *	Programs the selected part of the VI's RSS mapping table with the
2321  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2322  *	until the full table range is populated.
2323  *
2324  *	The caller must ensure the values in @rspq are in the range allowed for
2325  *	@viid.
2326  */
2327 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2328 			int start, int n, const u16 *rspq, unsigned int nrspq)
2329 {
2330 	int ret;
2331 	const u16 *rsp = rspq;
2332 	const u16 *rsp_end = rspq + nrspq;
2333 	struct fw_rss_ind_tbl_cmd cmd;
2334 
2335 	memset(&cmd, 0, sizeof(cmd));
2336 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2337 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2338 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2339 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2340 
2341 
2342 	/*
2343 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2344 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2345 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2346 	 * reserved.
2347 	 */
2348 	while (n > 0) {
2349 		int nq = min(n, 32);
2350 		int nq_packed = 0;
2351 		__be32 *qp = &cmd.iq0_to_iq2;
2352 
2353 		/*
2354 		 * Set up the firmware RSS command header to send the next
2355 		 * "nq" Ingress Queue IDs to the firmware.
2356 		 */
2357 		cmd.niqid = htons(nq);
2358 		cmd.startidx = htons(start);
2359 
2360 		/*
2361 		 * "nq" more done for the start of the next loop.
2362 		 */
2363 		start += nq;
2364 		n -= nq;
2365 
2366 		/*
2367 		 * While there are still Ingress Queue IDs to stuff into the
2368 		 * current firmware RSS command, retrieve them from the
2369 		 * Ingress Queue ID array and insert them into the command.
2370 		 */
2371 		while (nq > 0) {
2372 			/*
2373 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2374 			 * around the Ingress Queue ID array if necessary) and
2375 			 * insert them into the firmware RSS command at the
2376 			 * current 3-tuple position within the commad.
2377 			 */
2378 			u16 qbuf[3];
2379 			u16 *qbp = qbuf;
2380 			int nqbuf = min(3, nq);
2381 
2382 			nq -= nqbuf;
2383 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2384 			while (nqbuf && nq_packed < 32) {
2385 				nqbuf--;
2386 				nq_packed++;
2387 				*qbp++ = *rsp++;
2388 				if (rsp >= rsp_end)
2389 					rsp = rspq;
2390 			}
2391 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2392 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2393 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2394 		}
2395 
2396 		/*
2397 		 * Send this portion of the RRS table update to the firmware;
2398 		 * bail out on any errors.
2399 		 */
2400 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2401 		if (ret)
2402 			return ret;
2403 	}
2404 
2405 	return 0;
2406 }
2407 
2408 /**
2409  *	t4_config_glbl_rss - configure the global RSS mode
2410  *	@adapter: the adapter
2411  *	@mbox: mbox to use for the FW command
2412  *	@mode: global RSS mode
2413  *	@flags: mode-specific flags
2414  *
2415  *	Sets the global RSS mode.
2416  */
2417 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2418 		       unsigned int flags)
2419 {
2420 	struct fw_rss_glb_config_cmd c;
2421 
2422 	memset(&c, 0, sizeof(c));
2423 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2424 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2425 	c.retval_len16 = htonl(FW_LEN16(c));
2426 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2427 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2428 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2429 		c.u.basicvirtual.mode_pkd =
2430 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2431 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2432 	} else
2433 		return -EINVAL;
2434 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2435 }
2436 
2437 /**
2438  *	t4_config_vi_rss - configure per VI RSS settings
2439  *	@adapter: the adapter
2440  *	@mbox: mbox to use for the FW command
2441  *	@viid: the VI id
2442  *	@flags: RSS flags
2443  *	@defq: id of the default RSS queue for the VI.
2444  *
2445  *	Configures VI-specific RSS properties.
2446  */
2447 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2448 		     unsigned int flags, unsigned int defq)
2449 {
2450 	struct fw_rss_vi_config_cmd c;
2451 
2452 	memset(&c, 0, sizeof(c));
2453 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2454 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2455 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2456 	c.retval_len16 = htonl(FW_LEN16(c));
2457 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2458 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2459 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2460 }
2461 
2462 /* Read an RSS table row */
2463 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2464 {
2465 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2466 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2467 				   5, 0, val);
2468 }
2469 
2470 /**
2471  *	t4_read_rss - read the contents of the RSS mapping table
2472  *	@adapter: the adapter
2473  *	@map: holds the contents of the RSS mapping table
2474  *
2475  *	Reads the contents of the RSS hash->queue mapping table.
2476  */
2477 int t4_read_rss(struct adapter *adapter, u16 *map)
2478 {
2479 	u32 val;
2480 	int i, ret;
2481 
2482 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2483 		ret = rd_rss_row(adapter, i, &val);
2484 		if (ret)
2485 			return ret;
2486 		*map++ = G_LKPTBLQUEUE0(val);
2487 		*map++ = G_LKPTBLQUEUE1(val);
2488 	}
2489 	return 0;
2490 }
2491 
2492 /**
2493  *	t4_read_rss_key - read the global RSS key
2494  *	@adap: the adapter
2495  *	@key: 10-entry array holding the 320-bit RSS key
2496  *
2497  *	Reads the global 320-bit RSS key.
2498  */
2499 void t4_read_rss_key(struct adapter *adap, u32 *key)
2500 {
2501 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2502 			 A_TP_RSS_SECRET_KEY0);
2503 }
2504 
2505 /**
2506  *	t4_write_rss_key - program one of the RSS keys
2507  *	@adap: the adapter
2508  *	@key: 10-entry array holding the 320-bit RSS key
2509  *	@idx: which RSS key to write
2510  *
2511  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2512  *	0..15 the corresponding entry in the RSS key table is written,
2513  *	otherwise the global RSS key is written.
2514  */
2515 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2516 {
2517 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2518 			  A_TP_RSS_SECRET_KEY0);
2519 	if (idx >= 0 && idx < 16)
2520 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2521 			     V_KEYWRADDR(idx) | F_KEYWREN);
2522 }
2523 
2524 /**
2525  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2526  *	@adapter: the adapter
2527  *	@index: the entry in the PF RSS table to read
2528  *	@valp: where to store the returned value
2529  *
2530  *	Reads the PF RSS Configuration Table at the specified index and returns
2531  *	the value found there.
2532  */
2533 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2534 {
2535 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2536 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2537 }
2538 
2539 /**
2540  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2541  *	@adapter: the adapter
2542  *	@index: the entry in the VF RSS table to read
2543  *	@val: the value to store
2544  *
2545  *	Writes the PF RSS Configuration Table at the specified index with the
2546  *	specified value.
2547  */
2548 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2549 {
2550 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2551 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2552 }
2553 
2554 /**
2555  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2556  *	@adapter: the adapter
2557  *	@index: the entry in the VF RSS table to read
2558  *	@vfl: where to store the returned VFL
2559  *	@vfh: where to store the returned VFH
2560  *
2561  *	Reads the VF RSS Configuration Table at the specified index and returns
2562  *	the (VFL, VFH) values found there.
2563  */
2564 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2565 			   u32 *vfl, u32 *vfh)
2566 {
2567 	u32 vrt;
2568 
2569 	/*
2570 	 * Request that the index'th VF Table values be read into VFL/VFH.
2571 	 */
2572 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2573 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2574 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2575 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2576 
2577 	/*
2578 	 * Grab the VFL/VFH values ...
2579 	 */
2580 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2581 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2582 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2583 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2584 }
2585 
2586 /**
2587  *	t4_write_rss_vf_config - write VF RSS Configuration Table
2588  *
2589  *	@adapter: the adapter
2590  *	@index: the entry in the VF RSS table to write
2591  *	@vfl: the VFL to store
2592  *	@vfh: the VFH to store
2593  *
2594  *	Writes the VF RSS Configuration Table at the specified index with the
2595  *	specified (VFL, VFH) values.
2596  */
2597 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2598 			    u32 vfl, u32 vfh)
2599 {
2600 	u32 vrt;
2601 
2602 	/*
2603 	 * Load up VFL/VFH with the values to be written ...
2604 	 */
2605 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2606 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
2607 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2608 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
2609 
2610 	/*
2611 	 * Write the VFL/VFH into the VF Table at index'th location.
2612 	 */
2613 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2614 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2615 	vrt |= V_VFWRADDR(index) | F_VFWREN;
2616 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2617 }
2618 
2619 /**
2620  *	t4_read_rss_pf_map - read PF RSS Map
2621  *	@adapter: the adapter
2622  *
2623  *	Reads the PF RSS Map register and returns its value.
2624  */
2625 u32 t4_read_rss_pf_map(struct adapter *adapter)
2626 {
2627 	u32 pfmap;
2628 
2629 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2630 			 &pfmap, 1, A_TP_RSS_PF_MAP);
2631 	return pfmap;
2632 }
2633 
2634 /**
2635  *	t4_write_rss_pf_map - write PF RSS Map
2636  *	@adapter: the adapter
2637  *	@pfmap: PF RSS Map value
2638  *
2639  *	Writes the specified value to the PF RSS Map register.
2640  */
2641 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2642 {
2643 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2644 			  &pfmap, 1, A_TP_RSS_PF_MAP);
2645 }
2646 
2647 /**
2648  *	t4_read_rss_pf_mask - read PF RSS Mask
2649  *	@adapter: the adapter
2650  *
2651  *	Reads the PF RSS Mask register and returns its value.
2652  */
2653 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2654 {
2655 	u32 pfmask;
2656 
2657 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2658 			 &pfmask, 1, A_TP_RSS_PF_MSK);
2659 	return pfmask;
2660 }
2661 
2662 /**
2663  *	t4_write_rss_pf_mask - write PF RSS Mask
2664  *	@adapter: the adapter
2665  *	@pfmask: PF RSS Mask value
2666  *
2667  *	Writes the specified value to the PF RSS Mask register.
2668  */
2669 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2670 {
2671 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2672 			  &pfmask, 1, A_TP_RSS_PF_MSK);
2673 }
2674 
2675 /**
2676  *	t4_set_filter_mode - configure the optional components of filter tuples
2677  *	@adap: the adapter
2678  *	@mode_map: a bitmap selcting which optional filter components to enable
2679  *
2680  *	Sets the filter mode by selecting the optional components to enable
2681  *	in filter tuples.  Returns 0 on success and a negative error if the
2682  *	requested mode needs more bits than are available for optional
2683  *	components.
2684  */
2685 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2686 {
2687 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2688 
2689 	int i, nbits = 0;
2690 
2691 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2692 		if (mode_map & (1 << i))
2693 			nbits += width[i];
2694 	if (nbits > FILTER_OPT_LEN)
2695 		return -EINVAL;
2696 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2697 			  A_TP_VLAN_PRI_MAP);
2698 	return 0;
2699 }
2700 
2701 /**
2702  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2703  *	@adap: the adapter
2704  *	@v4: holds the TCP/IP counter values
2705  *	@v6: holds the TCP/IPv6 counter values
2706  *
2707  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2708  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2709  */
2710 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2711 			 struct tp_tcp_stats *v6)
2712 {
2713 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2714 
2715 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2716 #define STAT(x)     val[STAT_IDX(x)]
2717 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2718 
2719 	if (v4) {
2720 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2721 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2722 		v4->tcpOutRsts = STAT(OUT_RST);
2723 		v4->tcpInSegs  = STAT64(IN_SEG);
2724 		v4->tcpOutSegs = STAT64(OUT_SEG);
2725 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2726 	}
2727 	if (v6) {
2728 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2729 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2730 		v6->tcpOutRsts = STAT(OUT_RST);
2731 		v6->tcpInSegs  = STAT64(IN_SEG);
2732 		v6->tcpOutSegs = STAT64(OUT_SEG);
2733 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2734 	}
2735 #undef STAT64
2736 #undef STAT
2737 #undef STAT_IDX
2738 }
2739 
2740 /**
2741  *	t4_tp_get_err_stats - read TP's error MIB counters
2742  *	@adap: the adapter
2743  *	@st: holds the counter values
2744  *
2745  *	Returns the values of TP's error counters.
2746  */
2747 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2748 {
2749 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2750 			 12, A_TP_MIB_MAC_IN_ERR_0);
2751 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2752 			 8, A_TP_MIB_TNL_CNG_DROP_0);
2753 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2754 			 4, A_TP_MIB_TNL_DROP_0);
2755 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2756 			 4, A_TP_MIB_OFD_VLN_DROP_0);
2757 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2758 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
2759 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2760 			 2, A_TP_MIB_OFD_ARP_DROP);
2761 }
2762 
2763 /**
2764  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
2765  *	@adap: the adapter
2766  *	@st: holds the counter values
2767  *
2768  *	Returns the values of TP's proxy counters.
2769  */
2770 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2771 {
2772 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2773 			 4, A_TP_MIB_TNL_LPBK_0);
2774 }
2775 
2776 /**
2777  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
2778  *	@adap: the adapter
2779  *	@st: holds the counter values
2780  *
2781  *	Returns the values of TP's CPL counters.
2782  */
2783 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
2784 {
2785 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
2786 			 8, A_TP_MIB_CPL_IN_REQ_0);
2787 }
2788 
2789 /**
2790  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
2791  *	@adap: the adapter
2792  *	@st: holds the counter values
2793  *
2794  *	Returns the values of TP's RDMA counters.
2795  */
2796 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
2797 {
2798 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
2799 			 2, A_TP_MIB_RQE_DFR_MOD);
2800 }
2801 
2802 /**
2803  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
2804  *	@adap: the adapter
2805  *	@idx: the port index
2806  *	@st: holds the counter values
2807  *
2808  *	Returns the values of TP's FCoE counters for the selected port.
2809  */
2810 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
2811 		       struct tp_fcoe_stats *st)
2812 {
2813 	u32 val[2];
2814 
2815 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
2816 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
2817 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
2818 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
2819 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2820 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
2821 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
2822 }
2823 
2824 /**
2825  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
2826  *	@adap: the adapter
2827  *	@st: holds the counter values
2828  *
2829  *	Returns the values of TP's counters for non-TCP directly-placed packets.
2830  */
2831 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
2832 {
2833 	u32 val[4];
2834 
2835 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
2836 			 A_TP_MIB_USM_PKTS);
2837 	st->frames = val[0];
2838 	st->drops = val[1];
2839 	st->octets = ((u64)val[2] << 32) | val[3];
2840 }
2841 
2842 /**
2843  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
2844  *	@adap: the adapter
2845  *	@mtus: where to store the MTU values
2846  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
2847  *
2848  *	Reads the HW path MTU table.
2849  */
2850 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2851 {
2852 	u32 v;
2853 	int i;
2854 
2855 	for (i = 0; i < NMTUS; ++i) {
2856 		t4_write_reg(adap, A_TP_MTU_TABLE,
2857 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
2858 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
2859 		mtus[i] = G_MTUVALUE(v);
2860 		if (mtu_log)
2861 			mtu_log[i] = G_MTUWIDTH(v);
2862 	}
2863 }
2864 
2865 /**
2866  *	t4_read_cong_tbl - reads the congestion control table
2867  *	@adap: the adapter
2868  *	@incr: where to store the alpha values
2869  *
2870  *	Reads the additive increments programmed into the HW congestion
2871  *	control table.
2872  */
2873 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2874 {
2875 	unsigned int mtu, w;
2876 
2877 	for (mtu = 0; mtu < NMTUS; ++mtu)
2878 		for (w = 0; w < NCCTRL_WIN; ++w) {
2879 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
2880 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
2881 			incr[mtu][w] = (u16)t4_read_reg(adap,
2882 						A_TP_CCTRL_TABLE) & 0x1fff;
2883 		}
2884 }
2885 
2886 /**
2887  *	t4_read_pace_tbl - read the pace table
2888  *	@adap: the adapter
2889  *	@pace_vals: holds the returned values
2890  *
2891  *	Returns the values of TP's pace table in microseconds.
2892  */
2893 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
2894 {
2895 	unsigned int i, v;
2896 
2897 	for (i = 0; i < NTX_SCHED; i++) {
2898 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2899 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
2900 		pace_vals[i] = dack_ticks_to_usec(adap, v);
2901 	}
2902 }
2903 
2904 /**
2905  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2906  *	@adap: the adapter
2907  *	@addr: the indirect TP register address
2908  *	@mask: specifies the field within the register to modify
2909  *	@val: new value for the field
2910  *
2911  *	Sets a field of an indirect TP register to the given value.
2912  */
2913 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2914 			    unsigned int mask, unsigned int val)
2915 {
2916 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
2917 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2918 	t4_write_reg(adap, A_TP_PIO_DATA, val);
2919 }
2920 
2921 /**
2922  *	init_cong_ctrl - initialize congestion control parameters
2923  *	@a: the alpha values for congestion control
2924  *	@b: the beta values for congestion control
2925  *
2926  *	Initialize the congestion control parameters.
2927  */
2928 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2929 {
2930 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2931 	a[9] = 2;
2932 	a[10] = 3;
2933 	a[11] = 4;
2934 	a[12] = 5;
2935 	a[13] = 6;
2936 	a[14] = 7;
2937 	a[15] = 8;
2938 	a[16] = 9;
2939 	a[17] = 10;
2940 	a[18] = 14;
2941 	a[19] = 17;
2942 	a[20] = 21;
2943 	a[21] = 25;
2944 	a[22] = 30;
2945 	a[23] = 35;
2946 	a[24] = 45;
2947 	a[25] = 60;
2948 	a[26] = 80;
2949 	a[27] = 100;
2950 	a[28] = 200;
2951 	a[29] = 300;
2952 	a[30] = 400;
2953 	a[31] = 500;
2954 
2955 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2956 	b[9] = b[10] = 1;
2957 	b[11] = b[12] = 2;
2958 	b[13] = b[14] = b[15] = b[16] = 3;
2959 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2960 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2961 	b[28] = b[29] = 6;
2962 	b[30] = b[31] = 7;
2963 }
2964 
2965 /* The minimum additive increment value for the congestion control table */
2966 #define CC_MIN_INCR 2U
2967 
2968 /**
2969  *	t4_load_mtus - write the MTU and congestion control HW tables
2970  *	@adap: the adapter
2971  *	@mtus: the values for the MTU table
2972  *	@alpha: the values for the congestion control alpha parameter
2973  *	@beta: the values for the congestion control beta parameter
2974  *
2975  *	Write the HW MTU table with the supplied MTUs and the high-speed
2976  *	congestion control table with the supplied alpha, beta, and MTUs.
2977  *	We write the two tables together because the additive increments
2978  *	depend on the MTUs.
2979  */
2980 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2981 		  const unsigned short *alpha, const unsigned short *beta)
2982 {
2983 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2984 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2985 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2986 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2987 	};
2988 
2989 	unsigned int i, w;
2990 
2991 	for (i = 0; i < NMTUS; ++i) {
2992 		unsigned int mtu = mtus[i];
2993 		unsigned int log2 = fls(mtu);
2994 
2995 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2996 			log2--;
2997 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
2998 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
2999 
3000 		for (w = 0; w < NCCTRL_WIN; ++w) {
3001 			unsigned int inc;
3002 
3003 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3004 				  CC_MIN_INCR);
3005 
3006 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3007 				     (w << 16) | (beta[w] << 13) | inc);
3008 		}
3009 	}
3010 }
3011 
3012 /**
3013  *	t4_set_pace_tbl - set the pace table
3014  *	@adap: the adapter
3015  *	@pace_vals: the pace values in microseconds
3016  *	@start: index of the first entry in the HW pace table to set
3017  *	@n: how many entries to set
3018  *
3019  *	Sets (a subset of the) HW pace table.
3020  */
3021 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3022 		     unsigned int start, unsigned int n)
3023 {
3024 	unsigned int vals[NTX_SCHED], i;
3025 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3026 
3027 	if (n > NTX_SCHED)
3028 	    return -ERANGE;
3029 
3030 	/* convert values from us to dack ticks, rounding to closest value */
3031 	for (i = 0; i < n; i++, pace_vals++) {
3032 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3033 		if (vals[i] > 0x7ff)
3034 			return -ERANGE;
3035 		if (*pace_vals && vals[i] == 0)
3036 			return -ERANGE;
3037 	}
3038 	for (i = 0; i < n; i++, start++)
3039 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3040 	return 0;
3041 }
3042 
3043 /**
3044  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3045  *	@adap: the adapter
3046  *	@kbps: target rate in Kbps
3047  *	@sched: the scheduler index
3048  *
3049  *	Configure a Tx HW scheduler for the target rate.
3050  */
3051 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3052 {
3053 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3054 	unsigned int clk = adap->params.vpd.cclk * 1000;
3055 	unsigned int selected_cpt = 0, selected_bpt = 0;
3056 
3057 	if (kbps > 0) {
3058 		kbps *= 125;     /* -> bytes */
3059 		for (cpt = 1; cpt <= 255; cpt++) {
3060 			tps = clk / cpt;
3061 			bpt = (kbps + tps / 2) / tps;
3062 			if (bpt > 0 && bpt <= 255) {
3063 				v = bpt * tps;
3064 				delta = v >= kbps ? v - kbps : kbps - v;
3065 				if (delta < mindelta) {
3066 					mindelta = delta;
3067 					selected_cpt = cpt;
3068 					selected_bpt = bpt;
3069 				}
3070 			} else if (selected_cpt)
3071 				break;
3072 		}
3073 		if (!selected_cpt)
3074 			return -EINVAL;
3075 	}
3076 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3077 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3078 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3079 	if (sched & 1)
3080 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3081 	else
3082 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3083 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3084 	return 0;
3085 }
3086 
3087 /**
3088  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3089  *	@adap: the adapter
3090  *	@sched: the scheduler index
3091  *	@ipg: the interpacket delay in tenths of nanoseconds
3092  *
3093  *	Set the interpacket delay for a HW packet rate scheduler.
3094  */
3095 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3096 {
3097 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3098 
3099 	/* convert ipg to nearest number of core clocks */
3100 	ipg *= core_ticks_per_usec(adap);
3101 	ipg = (ipg + 5000) / 10000;
3102 	if (ipg > M_TXTIMERSEPQ0)
3103 		return -EINVAL;
3104 
3105 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3106 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3107 	if (sched & 1)
3108 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3109 	else
3110 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3111 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3112 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3113 	return 0;
3114 }
3115 
3116 /**
3117  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3118  *	@adap: the adapter
3119  *	@sched: the scheduler index
3120  *	@kbps: the byte rate in Kbps
3121  *	@ipg: the interpacket delay in tenths of nanoseconds
3122  *
3123  *	Return the current configuration of a HW Tx scheduler.
3124  */
3125 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3126 		     unsigned int *ipg)
3127 {
3128 	unsigned int v, addr, bpt, cpt;
3129 
3130 	if (kbps) {
3131 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3132 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3133 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3134 		if (sched & 1)
3135 			v >>= 16;
3136 		bpt = (v >> 8) & 0xff;
3137 		cpt = v & 0xff;
3138 		if (!cpt)
3139 			*kbps = 0;        /* scheduler disabled */
3140 		else {
3141 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3142 			*kbps = (v * bpt) / 125;
3143 		}
3144 	}
3145 	if (ipg) {
3146 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3147 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3148 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3149 		if (sched & 1)
3150 			v >>= 16;
3151 		v &= 0xffff;
3152 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3153 	}
3154 }
3155 
3156 /*
3157  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3158  * clocks.  The formula is
3159  *
3160  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3161  *
3162  * which is equivalent to
3163  *
3164  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3165  */
3166 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3167 {
3168 	u64 v = bytes256 * adap->params.vpd.cclk;
3169 
3170 	return v * 62 + v / 2;
3171 }
3172 
3173 /**
3174  *	t4_get_chan_txrate - get the current per channel Tx rates
3175  *	@adap: the adapter
3176  *	@nic_rate: rates for NIC traffic
3177  *	@ofld_rate: rates for offloaded traffic
3178  *
3179  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3180  *	for each channel.
3181  */
3182 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3183 {
3184 	u32 v;
3185 
3186 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3187 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3188 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3189 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3190 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3191 
3192 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3193 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3194 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3195 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3196 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3197 }
3198 
3199 /**
3200  *	t4_set_trace_filter - configure one of the tracing filters
3201  *	@adap: the adapter
3202  *	@tp: the desired trace filter parameters
3203  *	@idx: which filter to configure
3204  *	@enable: whether to enable or disable the filter
3205  *
3206  *	Configures one of the tracing filters available in HW.  If @enable is
3207  *	%0 @tp is not examined and may be %NULL.
3208  */
3209 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3210 			int enable)
3211 {
3212 	int i, ofst = idx * 4;
3213 	u32 data_reg, mask_reg, cfg;
3214 	u32 multitrc = F_TRCMULTIFILTER;
3215 
3216 	if (!enable) {
3217 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3218 		goto out;
3219 	}
3220 
3221 	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3222 	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE ||
3223 	    tp->snap_len > 9600 || (idx && tp->snap_len > 256))
3224 		return -EINVAL;
3225 
3226 	if (tp->snap_len > 256) {            /* must be tracer 0 */
3227 		if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) |
3228 		     t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) |
3229 		     t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) &
3230 		    F_TFEN)
3231 			return -EINVAL;  /* other tracers are enabled */
3232 		multitrc = 0;
3233 	} else if (idx) {
3234 		i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B);
3235 		if (G_TFCAPTUREMAX(i) > 256 &&
3236 		    (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN))
3237 			return -EINVAL;
3238 	}
3239 
3240 	/* stop the tracer we'll be changing */
3241 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3242 
3243 	/* disable tracing globally if running in the wrong single/multi mode */
3244 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3245 	if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) {
3246 		t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN);
3247 		t4_read_reg(adap, A_MPS_TRC_CFG);                  /* flush */
3248 		msleep(1);
3249 		if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY))
3250 			return -ETIMEDOUT;
3251 	}
3252 	/*
3253 	 * At this point either the tracing is enabled and in the right mode or
3254 	 * disabled.
3255 	 */
3256 
3257 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3258 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3259 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3260 
3261 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3262 		t4_write_reg(adap, data_reg, tp->data[i]);
3263 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3264 	}
3265 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3266 		     V_TFCAPTUREMAX(tp->snap_len) |
3267 		     V_TFMINPKTSIZE(tp->min_len));
3268 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3269 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3270 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3271 
3272 	cfg &= ~F_TRCMULTIFILTER;
3273 	t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc);
3274 out:	t4_read_reg(adap, A_MPS_TRC_CFG);  /* flush */
3275 	return 0;
3276 }
3277 
3278 /**
3279  *	t4_get_trace_filter - query one of the tracing filters
3280  *	@adap: the adapter
3281  *	@tp: the current trace filter parameters
3282  *	@idx: which trace filter to query
3283  *	@enabled: non-zero if the filter is enabled
3284  *
3285  *	Returns the current settings of one of the HW tracing filters.
3286  */
3287 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3288 			 int *enabled)
3289 {
3290 	u32 ctla, ctlb;
3291 	int i, ofst = idx * 4;
3292 	u32 data_reg, mask_reg;
3293 
3294 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3295 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3296 
3297 	*enabled = !!(ctla & F_TFEN);
3298 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3299 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3300 	tp->skip_ofst = G_TFOFFSET(ctla);
3301 	tp->skip_len = G_TFLENGTH(ctla);
3302 	tp->invert = !!(ctla & F_TFINVERTMATCH);
3303 	tp->port = G_TFPORT(ctla);
3304 
3305 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3306 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3307 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3308 
3309 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3310 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3311 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3312 	}
3313 }
3314 
3315 /**
3316  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3317  *	@adap: the adapter
3318  *	@cnt: where to store the count statistics
3319  *	@cycles: where to store the cycle statistics
3320  *
3321  *	Returns performance statistics from PMTX.
3322  */
3323 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3324 {
3325 	int i;
3326 
3327 	for (i = 0; i < PM_NSTATS; i++) {
3328 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3329 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3330 		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3331 	}
3332 }
3333 
3334 /**
3335  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3336  *	@adap: the adapter
3337  *	@cnt: where to store the count statistics
3338  *	@cycles: where to store the cycle statistics
3339  *
3340  *	Returns performance statistics from PMRX.
3341  */
3342 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3343 {
3344 	int i;
3345 
3346 	for (i = 0; i < PM_NSTATS; i++) {
3347 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3348 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3349 		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3350 	}
3351 }
3352 
3353 /**
3354  *	get_mps_bg_map - return the buffer groups associated with a port
3355  *	@adap: the adapter
3356  *	@idx: the port index
3357  *
3358  *	Returns a bitmap indicating which MPS buffer groups are associated
3359  *	with the given port.  Bit i is set if buffer group i is used by the
3360  *	port.
3361  */
3362 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3363 {
3364 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3365 
3366 	if (n == 0)
3367 		return idx == 0 ? 0xf : 0;
3368 	if (n == 1)
3369 		return idx < 2 ? (3 << (2 * idx)) : 0;
3370 	return 1 << idx;
3371 }
3372 
3373 /**
3374  *	t4_get_port_stats - collect port statistics
3375  *	@adap: the adapter
3376  *	@idx: the port index
3377  *	@p: the stats structure to fill
3378  *
3379  *	Collect statistics related to the given port from HW.
3380  */
3381 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3382 {
3383 	u32 bgmap = get_mps_bg_map(adap, idx);
3384 
3385 #define GET_STAT(name) \
3386 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3387 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3388 
3389 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3390 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3391 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3392 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3393 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3394 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3395 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3396 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3397 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3398 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3399 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3400 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3401 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3402 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3403 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3404 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3405 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3406 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3407 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3408 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3409 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3410 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3411 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3412 
3413 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3414 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3415 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3416 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3417 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3418 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3419 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3420 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3421 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3422 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3423 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3424 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3425 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3426 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3427 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3428 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3429 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3430 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3431 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3432 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3433 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3434 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3435 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3436 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3437 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3438 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3439 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3440 
3441 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3442 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3443 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3444 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3445 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3446 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3447 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3448 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3449 
3450 #undef GET_STAT
3451 #undef GET_STAT_COM
3452 }
3453 
3454 /**
3455  *	t4_clr_port_stats - clear port statistics
3456  *	@adap: the adapter
3457  *	@idx: the port index
3458  *
3459  *	Clear HW statistics for the given port.
3460  */
3461 void t4_clr_port_stats(struct adapter *adap, int idx)
3462 {
3463 	unsigned int i;
3464 	u32 bgmap = get_mps_bg_map(adap, idx);
3465 
3466 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3467 	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3468 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3469 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3470 	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3471 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3472 	for (i = 0; i < 4; i++)
3473 		if (bgmap & (1 << i)) {
3474 			t4_write_reg(adap,
3475 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3476 			t4_write_reg(adap,
3477 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3478 		}
3479 }
3480 
3481 /**
3482  *	t4_get_lb_stats - collect loopback port statistics
3483  *	@adap: the adapter
3484  *	@idx: the loopback port index
3485  *	@p: the stats structure to fill
3486  *
3487  *	Return HW statistics for the given loopback port.
3488  */
3489 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3490 {
3491 	u32 bgmap = get_mps_bg_map(adap, idx);
3492 
3493 #define GET_STAT(name) \
3494 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3495 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3496 
3497 	p->octets           = GET_STAT(BYTES);
3498 	p->frames           = GET_STAT(FRAMES);
3499 	p->bcast_frames     = GET_STAT(BCAST);
3500 	p->mcast_frames     = GET_STAT(MCAST);
3501 	p->ucast_frames     = GET_STAT(UCAST);
3502 	p->error_frames     = GET_STAT(ERROR);
3503 
3504 	p->frames_64        = GET_STAT(64B);
3505 	p->frames_65_127    = GET_STAT(65B_127B);
3506 	p->frames_128_255   = GET_STAT(128B_255B);
3507 	p->frames_256_511   = GET_STAT(256B_511B);
3508 	p->frames_512_1023  = GET_STAT(512B_1023B);
3509 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3510 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3511 	p->drop             = t4_read_reg(adap, PORT_REG(idx,
3512 					  A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3513 
3514 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3515 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3516 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3517 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3518 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3519 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3520 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3521 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3522 
3523 #undef GET_STAT
3524 #undef GET_STAT_COM
3525 }
3526 
3527 /**
3528  *	t4_wol_magic_enable - enable/disable magic packet WoL
3529  *	@adap: the adapter
3530  *	@port: the physical port index
3531  *	@addr: MAC address expected in magic packets, %NULL to disable
3532  *
3533  *	Enables/disables magic packet wake-on-LAN for the selected port.
3534  */
3535 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3536 			 const u8 *addr)
3537 {
3538 	if (addr) {
3539 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3540 			     (addr[2] << 24) | (addr[3] << 16) |
3541 			     (addr[4] << 8) | addr[5]);
3542 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3543 			     (addr[0] << 8) | addr[1]);
3544 	}
3545 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3546 			 V_MAGICEN(addr != NULL));
3547 }
3548 
3549 /**
3550  *	t4_wol_pat_enable - enable/disable pattern-based WoL
3551  *	@adap: the adapter
3552  *	@port: the physical port index
3553  *	@map: bitmap of which HW pattern filters to set
3554  *	@mask0: byte mask for bytes 0-63 of a packet
3555  *	@mask1: byte mask for bytes 64-127 of a packet
3556  *	@crc: Ethernet CRC for selected bytes
3557  *	@enable: enable/disable switch
3558  *
3559  *	Sets the pattern filters indicated in @map to mask out the bytes
3560  *	specified in @mask0/@mask1 in received packets and compare the CRC of
3561  *	the resulting packet against @crc.  If @enable is %true pattern-based
3562  *	WoL is enabled, otherwise disabled.
3563  */
3564 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3565 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
3566 {
3567 	int i;
3568 
3569 	if (!enable) {
3570 		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3571 				 F_PATEN, 0);
3572 		return 0;
3573 	}
3574 	if (map > 0xff)
3575 		return -EINVAL;
3576 
3577 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3578 
3579 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3580 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3581 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3582 
3583 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3584 		if (!(map & 1))
3585 			continue;
3586 
3587 		/* write byte masks */
3588 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3589 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3590 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3591 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3592 			return -ETIMEDOUT;
3593 
3594 		/* write CRC */
3595 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
3596 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3597 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3598 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3599 			return -ETIMEDOUT;
3600 	}
3601 #undef EPIO_REG
3602 
3603 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3604 	return 0;
3605 }
3606 
3607 /**
3608  *	t4_mk_filtdelwr - create a delete filter WR
3609  *	@ftid: the filter ID
3610  *	@wr: the filter work request to populate
3611  *	@qid: ingress queue to receive the delete notification
3612  *
3613  *	Creates a filter work request to delete the supplied filter.  If @qid is
3614  *	negative the delete notification is suppressed.
3615  */
3616 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3617 {
3618 	memset(wr, 0, sizeof(*wr));
3619 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3620 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3621 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3622 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
3623 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3624 	if (qid >= 0)
3625 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3626 }
3627 
3628 #define INIT_CMD(var, cmd, rd_wr) do { \
3629 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3630 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3631 	(var).retval_len16 = htonl(FW_LEN16(var)); \
3632 } while (0)
3633 
3634 /**
3635  *	t4_mdio_rd - read a PHY register through MDIO
3636  *	@adap: the adapter
3637  *	@mbox: mailbox to use for the FW command
3638  *	@phy_addr: the PHY address
3639  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3640  *	@reg: the register to read
3641  *	@valp: where to store the value
3642  *
3643  *	Issues a FW command through the given mailbox to read a PHY register.
3644  */
3645 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3646 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
3647 {
3648 	int ret;
3649 	struct fw_ldst_cmd c;
3650 
3651 	memset(&c, 0, sizeof(c));
3652 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3653 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3654 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3655 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3656 				   V_FW_LDST_CMD_MMD(mmd));
3657 	c.u.mdio.raddr = htons(reg);
3658 
3659 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3660 	if (ret == 0)
3661 		*valp = ntohs(c.u.mdio.rval);
3662 	return ret;
3663 }
3664 
3665 /**
3666  *	t4_mdio_wr - write a PHY register through MDIO
3667  *	@adap: the adapter
3668  *	@mbox: mailbox to use for the FW command
3669  *	@phy_addr: the PHY address
3670  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3671  *	@reg: the register to write
3672  *	@valp: value to write
3673  *
3674  *	Issues a FW command through the given mailbox to write a PHY register.
3675  */
3676 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3677 	       unsigned int mmd, unsigned int reg, unsigned int val)
3678 {
3679 	struct fw_ldst_cmd c;
3680 
3681 	memset(&c, 0, sizeof(c));
3682 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3683 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3684 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3685 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3686 				   V_FW_LDST_CMD_MMD(mmd));
3687 	c.u.mdio.raddr = htons(reg);
3688 	c.u.mdio.rval = htons(val);
3689 
3690 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3691 }
3692 
3693 /**
3694  *	t4_sge_ctxt_rd - read an SGE context through FW
3695  *	@adap: the adapter
3696  *	@mbox: mailbox to use for the FW command
3697  *	@cid: the context id
3698  *	@ctype: the context type
3699  *	@data: where to store the context data
3700  *
3701  *	Issues a FW command through the given mailbox to read an SGE context.
3702  */
3703 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
3704 		   enum ctxt_type ctype, u32 *data)
3705 {
3706 	int ret;
3707 	struct fw_ldst_cmd c;
3708 
3709 	if (ctype == CTXT_EGRESS)
3710 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
3711 	else if (ctype == CTXT_INGRESS)
3712 		ret = FW_LDST_ADDRSPC_SGE_INGC;
3713 	else if (ctype == CTXT_FLM)
3714 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
3715 	else
3716 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
3717 
3718 	memset(&c, 0, sizeof(c));
3719 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3720 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
3721 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3722 	c.u.idctxt.physid = htonl(cid);
3723 
3724 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3725 	if (ret == 0) {
3726 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
3727 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
3728 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
3729 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
3730 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
3731 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
3732 	}
3733 	return ret;
3734 }
3735 
3736 /**
3737  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
3738  *	@adap: the adapter
3739  *	@cid: the context id
3740  *	@ctype: the context type
3741  *	@data: where to store the context data
3742  *
3743  *	Reads an SGE context directly, bypassing FW.  This is only for
3744  *	debugging when FW is unavailable.
3745  */
3746 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
3747 		      u32 *data)
3748 {
3749 	int i, ret;
3750 
3751 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
3752 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
3753 	if (!ret)
3754 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
3755 			*data++ = t4_read_reg(adap, i);
3756 	return ret;
3757 }
3758 
3759 /**
3760  *	t4_fw_hello - establish communication with FW
3761  *	@adap: the adapter
3762  *	@mbox: mailbox to use for the FW command
3763  *	@evt_mbox: mailbox to receive async FW events
3764  *	@master: specifies the caller's willingness to be the device master
3765  *	@state: returns the current device state
3766  *
3767  *	Issues a command to establish communication with FW.
3768  */
3769 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3770 		enum dev_master master, enum dev_state *state)
3771 {
3772 	int ret;
3773 	struct fw_hello_cmd c;
3774 	u32 v;
3775 	unsigned int master_mbox;
3776 	int retries = FW_CMD_HELLO_RETRIES;
3777 
3778 retry:
3779 	memset(&c, 0, sizeof(c));
3780 	INIT_CMD(c, HELLO, WRITE);
3781 	c.err_to_clearinit = htonl(
3782 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3783 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3784 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3785 			M_FW_HELLO_CMD_MBMASTER) |
3786 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3787 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3788 		F_FW_HELLO_CMD_CLEARINIT);
3789 
3790 	/*
3791 	 * Issue the HELLO command to the firmware.  If it's not successful
3792 	 * but indicates that we got a "busy" or "timeout" condition, retry
3793 	 * the HELLO until we exhaust our retry limit.
3794 	 */
3795 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3796 	if (ret != FW_SUCCESS) {
3797 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3798 			goto retry;
3799 		return ret;
3800 	}
3801 
3802 	v = ntohl(c.err_to_clearinit);
3803 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3804 	if (state) {
3805 		if (v & F_FW_HELLO_CMD_ERR)
3806 			*state = DEV_STATE_ERR;
3807 		else if (v & F_FW_HELLO_CMD_INIT)
3808 			*state = DEV_STATE_INIT;
3809 		else
3810 			*state = DEV_STATE_UNINIT;
3811 	}
3812 
3813 	/*
3814 	 * If we're not the Master PF then we need to wait around for the
3815 	 * Master PF Driver to finish setting up the adapter.
3816 	 *
3817 	 * Note that we also do this wait if we're a non-Master-capable PF and
3818 	 * there is no current Master PF; a Master PF may show up momentarily
3819 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
3820 	 * OS loads lots of different drivers rapidly at the same time).  In
3821 	 * this case, the Master PF returned by the firmware will be
3822 	 * M_PCIE_FW_MASTER so the test below will work ...
3823 	 */
3824 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
3825 	    master_mbox != mbox) {
3826 		int waiting = FW_CMD_HELLO_TIMEOUT;
3827 
3828 		/*
3829 		 * Wait for the firmware to either indicate an error or
3830 		 * initialized state.  If we see either of these we bail out
3831 		 * and report the issue to the caller.  If we exhaust the
3832 		 * "hello timeout" and we haven't exhausted our retries, try
3833 		 * again.  Otherwise bail with a timeout error.
3834 		 */
3835 		for (;;) {
3836 			u32 pcie_fw;
3837 
3838 			msleep(50);
3839 			waiting -= 50;
3840 
3841 			/*
3842 			 * If neither Error nor Initialialized are indicated
3843 			 * by the firmware keep waiting till we exhaust our
3844 			 * timeout ... and then retry if we haven't exhausted
3845 			 * our retries ...
3846 			 */
3847 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3848 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
3849 				if (waiting <= 0) {
3850 					if (retries-- > 0)
3851 						goto retry;
3852 
3853 					return -ETIMEDOUT;
3854 				}
3855 				continue;
3856 			}
3857 
3858 			/*
3859 			 * We either have an Error or Initialized condition
3860 			 * report errors preferentially.
3861 			 */
3862 			if (state) {
3863 				if (pcie_fw & F_PCIE_FW_ERR)
3864 					*state = DEV_STATE_ERR;
3865 				else if (pcie_fw & F_PCIE_FW_INIT)
3866 					*state = DEV_STATE_INIT;
3867 			}
3868 
3869 			/*
3870 			 * If we arrived before a Master PF was selected and
3871 			 * there's not a valid Master PF, grab its identity
3872 			 * for our caller.
3873 			 */
3874 			if (master_mbox == M_PCIE_FW_MASTER &&
3875 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
3876 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3877 			break;
3878 		}
3879 	}
3880 
3881 	return master_mbox;
3882 }
3883 
3884 /**
3885  *	t4_fw_bye - end communication with FW
3886  *	@adap: the adapter
3887  *	@mbox: mailbox to use for the FW command
3888  *
3889  *	Issues a command to terminate communication with FW.
3890  */
3891 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3892 {
3893 	struct fw_bye_cmd c;
3894 
3895 	memset(&c, 0, sizeof(c));
3896 	INIT_CMD(c, BYE, WRITE);
3897 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3898 }
3899 
3900 /**
3901  *	t4_fw_reset - issue a reset to FW
3902  *	@adap: the adapter
3903  *	@mbox: mailbox to use for the FW command
3904  *	@reset: specifies the type of reset to perform
3905  *
3906  *	Issues a reset command of the specified type to FW.
3907  */
3908 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3909 {
3910 	struct fw_reset_cmd c;
3911 
3912 	memset(&c, 0, sizeof(c));
3913 	INIT_CMD(c, RESET, WRITE);
3914 	c.val = htonl(reset);
3915 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3916 }
3917 
3918 /**
3919  *	t4_fw_initialize - ask FW to initialize the device
3920  *	@adap: the adapter
3921  *	@mbox: mailbox to use for the FW command
3922  *
3923  *	Issues a command to FW to partially initialize the device.  This
3924  *	performs initialization that generally doesn't depend on user input.
3925  */
3926 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3927 {
3928 	struct fw_initialize_cmd c;
3929 
3930 	memset(&c, 0, sizeof(c));
3931 	INIT_CMD(c, INITIALIZE, WRITE);
3932 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3933 }
3934 
3935 /**
3936  *	t4_query_params - query FW or device parameters
3937  *	@adap: the adapter
3938  *	@mbox: mailbox to use for the FW command
3939  *	@pf: the PF
3940  *	@vf: the VF
3941  *	@nparams: the number of parameters
3942  *	@params: the parameter names
3943  *	@val: the parameter values
3944  *
3945  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
3946  *	queried at once.
3947  */
3948 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3949 		    unsigned int vf, unsigned int nparams, const u32 *params,
3950 		    u32 *val)
3951 {
3952 	int i, ret;
3953 	struct fw_params_cmd c;
3954 	__be32 *p = &c.param[0].mnem;
3955 
3956 	if (nparams > 7)
3957 		return -EINVAL;
3958 
3959 	memset(&c, 0, sizeof(c));
3960 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
3961 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
3962 			    V_FW_PARAMS_CMD_VFN(vf));
3963 	c.retval_len16 = htonl(FW_LEN16(c));
3964 
3965 	for (i = 0; i < nparams; i++, p += 2)
3966 		*p = htonl(*params++);
3967 
3968 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3969 	if (ret == 0)
3970 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3971 			*val++ = ntohl(*p);
3972 	return ret;
3973 }
3974 
3975 /**
3976  *	t4_set_params - sets FW or device parameters
3977  *	@adap: the adapter
3978  *	@mbox: mailbox to use for the FW command
3979  *	@pf: the PF
3980  *	@vf: the VF
3981  *	@nparams: the number of parameters
3982  *	@params: the parameter names
3983  *	@val: the parameter values
3984  *
3985  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
3986  *	specified at once.
3987  */
3988 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3989 		  unsigned int vf, unsigned int nparams, const u32 *params,
3990 		  const u32 *val)
3991 {
3992 	struct fw_params_cmd c;
3993 	__be32 *p = &c.param[0].mnem;
3994 
3995 	if (nparams > 7)
3996 		return -EINVAL;
3997 
3998 	memset(&c, 0, sizeof(c));
3999 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4000 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4001 			    V_FW_PARAMS_CMD_VFN(vf));
4002 	c.retval_len16 = htonl(FW_LEN16(c));
4003 
4004 	while (nparams--) {
4005 		*p++ = htonl(*params++);
4006 		*p++ = htonl(*val++);
4007 	}
4008 
4009 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4010 }
4011 
4012 /**
4013  *	t4_cfg_pfvf - configure PF/VF resource limits
4014  *	@adap: the adapter
4015  *	@mbox: mailbox to use for the FW command
4016  *	@pf: the PF being configured
4017  *	@vf: the VF being configured
4018  *	@txq: the max number of egress queues
4019  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4020  *	@rxqi: the max number of interrupt-capable ingress queues
4021  *	@rxq: the max number of interruptless ingress queues
4022  *	@tc: the PCI traffic class
4023  *	@vi: the max number of virtual interfaces
4024  *	@cmask: the channel access rights mask for the PF/VF
4025  *	@pmask: the port access rights mask for the PF/VF
4026  *	@nexact: the maximum number of exact MPS filters
4027  *	@rcaps: read capabilities
4028  *	@wxcaps: write/execute capabilities
4029  *
4030  *	Configures resource limits and capabilities for a physical or virtual
4031  *	function.
4032  */
4033 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4034 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4035 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4036 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4037 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4038 {
4039 	struct fw_pfvf_cmd c;
4040 
4041 	memset(&c, 0, sizeof(c));
4042 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4043 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4044 			    V_FW_PFVF_CMD_VFN(vf));
4045 	c.retval_len16 = htonl(FW_LEN16(c));
4046 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4047 			       V_FW_PFVF_CMD_NIQ(rxq));
4048 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4049 			      V_FW_PFVF_CMD_PMASK(pmask) |
4050 			      V_FW_PFVF_CMD_NEQ(txq));
4051 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4052 				V_FW_PFVF_CMD_NEXACTF(nexact));
4053 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4054 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4055 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4056 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4057 }
4058 
4059 /**
4060  *	t4_alloc_vi - allocate a virtual interface
4061  *	@adap: the adapter
4062  *	@mbox: mailbox to use for the FW command
4063  *	@port: physical port associated with the VI
4064  *	@pf: the PF owning the VI
4065  *	@vf: the VF owning the VI
4066  *	@nmac: number of MAC addresses needed (1 to 5)
4067  *	@mac: the MAC addresses of the VI
4068  *	@rss_size: size of RSS table slice associated with this VI
4069  *
4070  *	Allocates a virtual interface for the given physical port.  If @mac is
4071  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4072  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4073  *	stored consecutively so the space needed is @nmac * 6 bytes.
4074  *	Returns a negative error number or the non-negative VI id.
4075  */
4076 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4077 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4078 		unsigned int *rss_size)
4079 {
4080 	int ret;
4081 	struct fw_vi_cmd c;
4082 
4083 	memset(&c, 0, sizeof(c));
4084 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4085 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4086 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4087 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4088 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4089 	c.nmac = nmac - 1;
4090 
4091 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4092 	if (ret)
4093 		return ret;
4094 
4095 	if (mac) {
4096 		memcpy(mac, c.mac, sizeof(c.mac));
4097 		switch (nmac) {
4098 		case 5:
4099 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4100 		case 4:
4101 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4102 		case 3:
4103 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4104 		case 2:
4105 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4106 		}
4107 	}
4108 	if (rss_size)
4109 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
4110 	return G_FW_VI_CMD_VIID(ntohs(c.type_to_viid));
4111 }
4112 
4113 /**
4114  *	t4_free_vi - free a virtual interface
4115  *	@adap: the adapter
4116  *	@mbox: mailbox to use for the FW command
4117  *	@pf: the PF owning the VI
4118  *	@vf: the VF owning the VI
4119  *	@viid: virtual interface identifiler
4120  *
4121  *	Free a previously allocated virtual interface.
4122  */
4123 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4124 	       unsigned int vf, unsigned int viid)
4125 {
4126 	struct fw_vi_cmd c;
4127 
4128 	memset(&c, 0, sizeof(c));
4129 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4130 			    F_FW_CMD_REQUEST |
4131 			    F_FW_CMD_EXEC |
4132 			    V_FW_VI_CMD_PFN(pf) |
4133 			    V_FW_VI_CMD_VFN(vf));
4134 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4135 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4136 
4137 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4138 }
4139 
4140 /**
4141  *	t4_set_rxmode - set Rx properties of a virtual interface
4142  *	@adap: the adapter
4143  *	@mbox: mailbox to use for the FW command
4144  *	@viid: the VI id
4145  *	@mtu: the new MTU or -1
4146  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4147  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4148  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4149  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4150  *	@sleep_ok: if true we may sleep while awaiting command completion
4151  *
4152  *	Sets Rx properties of a virtual interface.
4153  */
4154 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4155 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4156 		  bool sleep_ok)
4157 {
4158 	struct fw_vi_rxmode_cmd c;
4159 
4160 	/* convert to FW values */
4161 	if (mtu < 0)
4162 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4163 	if (promisc < 0)
4164 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4165 	if (all_multi < 0)
4166 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4167 	if (bcast < 0)
4168 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4169 	if (vlanex < 0)
4170 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4171 
4172 	memset(&c, 0, sizeof(c));
4173 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4174 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4175 	c.retval_len16 = htonl(FW_LEN16(c));
4176 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4177 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4178 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4179 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4180 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4181 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4182 }
4183 
4184 /**
4185  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4186  *	@adap: the adapter
4187  *	@mbox: mailbox to use for the FW command
4188  *	@viid: the VI id
4189  *	@free: if true any existing filters for this VI id are first removed
4190  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4191  *	@addr: the MAC address(es)
4192  *	@idx: where to store the index of each allocated filter
4193  *	@hash: pointer to hash address filter bitmap
4194  *	@sleep_ok: call is allowed to sleep
4195  *
4196  *	Allocates an exact-match filter for each of the supplied addresses and
4197  *	sets it to the corresponding address.  If @idx is not %NULL it should
4198  *	have at least @naddr entries, each of which will be set to the index of
4199  *	the filter allocated for the corresponding MAC address.  If a filter
4200  *	could not be allocated for an address its index is set to 0xffff.
4201  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4202  *	are hashed and update the hash filter bitmap pointed at by @hash.
4203  *
4204  *	Returns a negative error number or the number of filters allocated.
4205  */
4206 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4207 		      unsigned int viid, bool free, unsigned int naddr,
4208 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4209 {
4210 	int offset, ret = 0;
4211 	struct fw_vi_mac_cmd c;
4212 	unsigned int nfilters = 0;
4213 	unsigned int rem = naddr;
4214 
4215 	if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
4216 		return -EINVAL;
4217 
4218 	for (offset = 0; offset < naddr ; /**/) {
4219 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4220 					 ? rem
4221 					 : ARRAY_SIZE(c.u.exact));
4222 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4223 						     u.exact[fw_naddr]), 16);
4224 		struct fw_vi_mac_exact *p;
4225 		int i;
4226 
4227 		memset(&c, 0, sizeof(c));
4228 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4229 				     F_FW_CMD_REQUEST |
4230 				     F_FW_CMD_WRITE |
4231 				     V_FW_CMD_EXEC(free) |
4232 				     V_FW_VI_MAC_CMD_VIID(viid));
4233 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4234 					    V_FW_CMD_LEN16(len16));
4235 
4236 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4237 			p->valid_to_idx = htons(
4238 				F_FW_VI_MAC_CMD_VALID |
4239 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4240 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4241 		}
4242 
4243 		/*
4244 		 * It's okay if we run out of space in our MAC address arena.
4245 		 * Some of the addresses we submit may get stored so we need
4246 		 * to run through the reply to see what the results were ...
4247 		 */
4248 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4249 		if (ret && ret != -FW_ENOMEM)
4250 			break;
4251 
4252 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4253 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4254 
4255 			if (idx)
4256 				idx[offset+i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
4257 						 ? 0xffff
4258 						 : index);
4259 			if (index < FW_CLS_TCAM_NUM_ENTRIES)
4260 				nfilters++;
4261 			else if (hash)
4262 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4263 		}
4264 
4265 		free = false;
4266 		offset += fw_naddr;
4267 		rem -= fw_naddr;
4268 	}
4269 
4270 	if (ret == 0 || ret == -FW_ENOMEM)
4271 		ret = nfilters;
4272 	return ret;
4273 }
4274 
4275 /**
4276  *	t4_change_mac - modifies the exact-match filter for a MAC address
4277  *	@adap: the adapter
4278  *	@mbox: mailbox to use for the FW command
4279  *	@viid: the VI id
4280  *	@idx: index of existing filter for old value of MAC address, or -1
4281  *	@addr: the new MAC address value
4282  *	@persist: whether a new MAC allocation should be persistent
4283  *	@add_smt: if true also add the address to the HW SMT
4284  *
4285  *	Modifies an exact-match filter and sets it to the new MAC address if
4286  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4287  *	latter case the address is added persistently if @persist is %true.
4288  *
4289  *	Note that in general it is not possible to modify the value of a given
4290  *	filter so the generic way to modify an address filter is to free the one
4291  *	being used by the old address value and allocate a new filter for the
4292  *	new address value.
4293  *
4294  *	Returns a negative error number or the index of the filter with the new
4295  *	MAC value.  Note that this index may differ from @idx.
4296  */
4297 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4298 		  int idx, const u8 *addr, bool persist, bool add_smt)
4299 {
4300 	int ret, mode;
4301 	struct fw_vi_mac_cmd c;
4302 	struct fw_vi_mac_exact *p = c.u.exact;
4303 
4304 	if (idx < 0)                             /* new allocation */
4305 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4306 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4307 
4308 	memset(&c, 0, sizeof(c));
4309 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4310 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4311 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4312 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4313 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4314 				V_FW_VI_MAC_CMD_IDX(idx));
4315 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
4316 
4317 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4318 	if (ret == 0) {
4319 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4320 		if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
4321 			ret = -ENOMEM;
4322 	}
4323 	return ret;
4324 }
4325 
4326 /**
4327  *	t4_set_addr_hash - program the MAC inexact-match hash filter
4328  *	@adap: the adapter
4329  *	@mbox: mailbox to use for the FW command
4330  *	@viid: the VI id
4331  *	@ucast: whether the hash filter should also match unicast addresses
4332  *	@vec: the value to be written to the hash filter
4333  *	@sleep_ok: call is allowed to sleep
4334  *
4335  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
4336  */
4337 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4338 		     bool ucast, u64 vec, bool sleep_ok)
4339 {
4340 	struct fw_vi_mac_cmd c;
4341 
4342 	memset(&c, 0, sizeof(c));
4343 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4344 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4345 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4346 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4347 				    V_FW_CMD_LEN16(1));
4348 	c.u.hash.hashvec = cpu_to_be64(vec);
4349 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4350 }
4351 
4352 /**
4353  *	t4_enable_vi - enable/disable a virtual interface
4354  *	@adap: the adapter
4355  *	@mbox: mailbox to use for the FW command
4356  *	@viid: the VI id
4357  *	@rx_en: 1=enable Rx, 0=disable Rx
4358  *	@tx_en: 1=enable Tx, 0=disable Tx
4359  *
4360  *	Enables/disables a virtual interface.
4361  */
4362 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4363 		 bool rx_en, bool tx_en)
4364 {
4365 	struct fw_vi_enable_cmd c;
4366 
4367 	memset(&c, 0, sizeof(c));
4368 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4369 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4370 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4371 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4372 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4373 }
4374 
4375 /**
4376  *	t4_identify_port - identify a VI's port by blinking its LED
4377  *	@adap: the adapter
4378  *	@mbox: mailbox to use for the FW command
4379  *	@viid: the VI id
4380  *	@nblinks: how many times to blink LED at 2.5 Hz
4381  *
4382  *	Identifies a VI's port by blinking its LED.
4383  */
4384 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4385 		     unsigned int nblinks)
4386 {
4387 	struct fw_vi_enable_cmd c;
4388 
4389 	memset(&c, 0, sizeof(c));
4390 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4391 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4392 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4393 	c.blinkdur = htons(nblinks);
4394 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4395 }
4396 
4397 /**
4398  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
4399  *	@adap: the adapter
4400  *	@mbox: mailbox to use for the FW command
4401  *	@start: %true to enable the queues, %false to disable them
4402  *	@pf: the PF owning the queues
4403  *	@vf: the VF owning the queues
4404  *	@iqid: ingress queue id
4405  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4406  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4407  *
4408  *	Starts or stops an ingress queue and its associated FLs, if any.
4409  */
4410 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4411 		     unsigned int pf, unsigned int vf, unsigned int iqid,
4412 		     unsigned int fl0id, unsigned int fl1id)
4413 {
4414 	struct fw_iq_cmd c;
4415 
4416 	memset(&c, 0, sizeof(c));
4417 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4418 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4419 			    V_FW_IQ_CMD_VFN(vf));
4420 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4421 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4422 	c.iqid = htons(iqid);
4423 	c.fl0id = htons(fl0id);
4424 	c.fl1id = htons(fl1id);
4425 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4426 }
4427 
4428 /**
4429  *	t4_iq_free - free an ingress queue and its FLs
4430  *	@adap: the adapter
4431  *	@mbox: mailbox to use for the FW command
4432  *	@pf: the PF owning the queues
4433  *	@vf: the VF owning the queues
4434  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4435  *	@iqid: ingress queue id
4436  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4437  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4438  *
4439  *	Frees an ingress queue and its associated FLs, if any.
4440  */
4441 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4442 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4443 	       unsigned int fl0id, unsigned int fl1id)
4444 {
4445 	struct fw_iq_cmd c;
4446 
4447 	memset(&c, 0, sizeof(c));
4448 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4449 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4450 			    V_FW_IQ_CMD_VFN(vf));
4451 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4452 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4453 	c.iqid = htons(iqid);
4454 	c.fl0id = htons(fl0id);
4455 	c.fl1id = htons(fl1id);
4456 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4457 }
4458 
4459 /**
4460  *	t4_eth_eq_free - free an Ethernet egress queue
4461  *	@adap: the adapter
4462  *	@mbox: mailbox to use for the FW command
4463  *	@pf: the PF owning the queue
4464  *	@vf: the VF owning the queue
4465  *	@eqid: egress queue id
4466  *
4467  *	Frees an Ethernet egress queue.
4468  */
4469 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4470 		   unsigned int vf, unsigned int eqid)
4471 {
4472 	struct fw_eq_eth_cmd c;
4473 
4474 	memset(&c, 0, sizeof(c));
4475 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4476 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4477 			    V_FW_EQ_ETH_CMD_VFN(vf));
4478 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4479 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4480 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4481 }
4482 
4483 /**
4484  *	t4_ctrl_eq_free - free a control egress queue
4485  *	@adap: the adapter
4486  *	@mbox: mailbox to use for the FW command
4487  *	@pf: the PF owning the queue
4488  *	@vf: the VF owning the queue
4489  *	@eqid: egress queue id
4490  *
4491  *	Frees a control egress queue.
4492  */
4493 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4494 		    unsigned int vf, unsigned int eqid)
4495 {
4496 	struct fw_eq_ctrl_cmd c;
4497 
4498 	memset(&c, 0, sizeof(c));
4499 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4500 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
4501 			    V_FW_EQ_CTRL_CMD_VFN(vf));
4502 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4503 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
4504 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4505 }
4506 
4507 /**
4508  *	t4_ofld_eq_free - free an offload egress queue
4509  *	@adap: the adapter
4510  *	@mbox: mailbox to use for the FW command
4511  *	@pf: the PF owning the queue
4512  *	@vf: the VF owning the queue
4513  *	@eqid: egress queue id
4514  *
4515  *	Frees a control egress queue.
4516  */
4517 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4518 		    unsigned int vf, unsigned int eqid)
4519 {
4520 	struct fw_eq_ofld_cmd c;
4521 
4522 	memset(&c, 0, sizeof(c));
4523 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4524 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
4525 			    V_FW_EQ_OFLD_CMD_VFN(vf));
4526 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
4527 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
4528 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4529 }
4530 
4531 /**
4532  *	t4_handle_fw_rpl - process a FW reply message
4533  *	@adap: the adapter
4534  *	@rpl: start of the FW message
4535  *
4536  *	Processes a FW message, such as link state change messages.
4537  */
4538 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4539 {
4540 	u8 opcode = *(const u8 *)rpl;
4541 
4542 	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
4543 		int speed = 0, fc = 0, i;
4544 		const struct fw_port_cmd *p = (const void *)rpl;
4545 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
4546 		struct port_info *pi = NULL;
4547 		struct link_config *lc;
4548 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
4549 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
4550 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
4551 
4552 		if (stat & F_FW_PORT_CMD_RXPAUSE)
4553 			fc |= PAUSE_RX;
4554 		if (stat & F_FW_PORT_CMD_TXPAUSE)
4555 			fc |= PAUSE_TX;
4556 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4557 			speed = SPEED_100;
4558 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4559 			speed = SPEED_1000;
4560 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4561 			speed = SPEED_10000;
4562 
4563 		for_each_port(adap, i) {
4564 			pi = adap2pinfo(adap, i);
4565 			if (pi->tx_chan == chan)
4566 				break;
4567 		}
4568 		lc = &pi->link_cfg;
4569 
4570 		if (link_ok != lc->link_ok || speed != lc->speed ||
4571 		    fc != lc->fc) {                    /* something changed */
4572 			lc->link_ok = link_ok;
4573 			lc->speed = speed;
4574 			lc->fc = fc;
4575 			t4_os_link_changed(adap, i, link_ok);
4576 		}
4577 		if (mod != pi->mod_type) {
4578 			pi->mod_type = mod;
4579 			t4_os_portmod_changed(adap, i);
4580 		}
4581 	}
4582 	return 0;
4583 }
4584 
4585 /**
4586  *	get_pci_mode - determine a card's PCI mode
4587  *	@adapter: the adapter
4588  *	@p: where to store the PCI settings
4589  *
4590  *	Determines a card's PCI mode and associated parameters, such as speed
4591  *	and width.
4592  */
4593 static void __devinit get_pci_mode(struct adapter *adapter,
4594 				   struct pci_params *p)
4595 {
4596 	u16 val;
4597 	u32 pcie_cap;
4598 
4599 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4600 	if (pcie_cap) {
4601 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
4602 		p->speed = val & PCI_EXP_LNKSTA_CLS;
4603 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
4604 	}
4605 }
4606 
4607 /**
4608  *	init_link_config - initialize a link's SW state
4609  *	@lc: structure holding the link state
4610  *	@caps: link capabilities
4611  *
4612  *	Initializes the SW state maintained for each link, including the link's
4613  *	capabilities and default speed/flow-control/autonegotiation settings.
4614  */
4615 static void __devinit init_link_config(struct link_config *lc,
4616 				       unsigned int caps)
4617 {
4618 	lc->supported = caps;
4619 	lc->requested_speed = 0;
4620 	lc->speed = 0;
4621 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4622 	if (lc->supported & FW_PORT_CAP_ANEG) {
4623 		lc->advertising = lc->supported & ADVERT_MASK;
4624 		lc->autoneg = AUTONEG_ENABLE;
4625 		lc->requested_fc |= PAUSE_AUTONEG;
4626 	} else {
4627 		lc->advertising = 0;
4628 		lc->autoneg = AUTONEG_DISABLE;
4629 	}
4630 }
4631 
4632 static int __devinit wait_dev_ready(struct adapter *adap)
4633 {
4634 	u32 whoami;
4635 
4636 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
4637 
4638 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4639 		return 0;
4640 
4641 	msleep(500);
4642 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
4643 	return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
4644 		? 0 : -EIO);
4645 }
4646 
4647 static int __devinit get_flash_params(struct adapter *adapter)
4648 {
4649 	int ret;
4650 	u32 info = 0;
4651 
4652 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4653 	if (!ret)
4654 		ret = sf1_read(adapter, 3, 0, 1, &info);
4655 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
4656 	if (ret < 0)
4657 		return ret;
4658 
4659 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
4660 		return -EINVAL;
4661 	info >>= 16;                           /* log2 of size */
4662 	if (info >= 0x14 && info < 0x18)
4663 		adapter->params.sf_nsec = 1 << (info - 16);
4664 	else if (info == 0x18)
4665 		adapter->params.sf_nsec = 64;
4666 	else
4667 		return -EINVAL;
4668 	adapter->params.sf_size = 1 << info;
4669 	return 0;
4670 }
4671 
4672 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
4673 						  u8 range)
4674 {
4675 	u16 val;
4676 	u32 pcie_cap;
4677 
4678 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4679 	if (pcie_cap) {
4680 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4681 		val &= 0xfff0;
4682 		val |= range ;
4683 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4684 	}
4685 }
4686 
4687 /**
4688  *	t4_prep_adapter - prepare SW and HW for operation
4689  *	@adapter: the adapter
4690  *	@reset: if true perform a HW reset
4691  *
4692  *	Initialize adapter SW state for the various HW modules, set initial
4693  *	values for some adapter tunables, take PHYs out of reset, and
4694  *	initialize the MDIO interface.
4695  */
4696 int __devinit t4_prep_adapter(struct adapter *adapter)
4697 {
4698 	int ret;
4699 
4700 	ret = wait_dev_ready(adapter);
4701 	if (ret < 0)
4702 		return ret;
4703 
4704 	get_pci_mode(adapter, &adapter->params.pci);
4705 
4706 	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
4707 	adapter->params.pci.vpd_cap_addr =
4708 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4709 
4710 	ret = get_flash_params(adapter);
4711 	if (ret < 0)
4712 		return ret;
4713 
4714 	ret = get_vpd_params(adapter, &adapter->params.vpd);
4715 	if (ret < 0)
4716 		return ret;
4717 
4718 	if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff) {
4719 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
4720 	} else {
4721 		adapter->params.cim_la_size = CIMLA_SIZE;
4722 	}
4723 
4724 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4725 
4726 	/*
4727 	 * Default port and clock for debugging in case we can't reach FW.
4728 	 */
4729 	adapter->params.nports = 1;
4730 	adapter->params.portvec = 1;
4731 	adapter->params.vpd.cclk = 50000;
4732 
4733 	/* Set pci completion timeout value to 4 seconds. */
4734 	set_pcie_completion_timeout(adapter, 0xd);
4735 	return 0;
4736 }
4737 
4738 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
4739 {
4740 	u8 addr[6];
4741 	int ret, i, j;
4742 	struct fw_port_cmd c;
4743 	unsigned int rss_size;
4744 	adapter_t *adap = p->adapter;
4745 
4746 	memset(&c, 0, sizeof(c));
4747 
4748 	for (i = 0, j = -1; i <= p->port_id; i++) {
4749 		do {
4750 			j++;
4751 		} while ((adap->params.portvec & (1 << j)) == 0);
4752 	}
4753 
4754 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
4755 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
4756 			       V_FW_PORT_CMD_PORTID(j));
4757 	c.action_to_len16 = htonl(
4758 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4759 		FW_LEN16(c));
4760 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4761 	if (ret)
4762 		return ret;
4763 
4764 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4765 	if (ret < 0)
4766 		return ret;
4767 
4768 	p->viid = ret;
4769 	p->tx_chan = j;
4770 	p->lport = j;
4771 	p->rss_size = rss_size;
4772 	t4_os_set_hw_addr(adap, p->port_id, addr);
4773 
4774 	ret = ntohl(c.u.info.lstatus_to_modtype);
4775 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
4776 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
4777 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
4778 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
4779 
4780 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4781 
4782 	return 0;
4783 }
4784