xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision 955c8cbb4960e6cf3602de144b1b9154a5092968)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include "common.h"
33 #include "t4_regs.h"
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
36 
37 #undef msleep
38 #define msleep(x) pause("t4hw", (x) * hz / 1000)
39 
40 /**
41  *	t4_wait_op_done_val - wait until an operation is completed
42  *	@adapter: the adapter performing the operation
43  *	@reg: the register to check for completion
44  *	@mask: a single-bit field within @reg that indicates completion
45  *	@polarity: the value of the field when the operation is completed
46  *	@attempts: number of check iterations
47  *	@delay: delay in usecs between iterations
48  *	@valp: where to store the value of the register at completion time
49  *
50  *	Wait until an operation is completed by checking a bit in a register
51  *	up to @attempts times.  If @valp is not NULL the value of the register
52  *	at the time it indicated completion is stored there.  Returns 0 if the
53  *	operation completes and	-EAGAIN	otherwise.
54  */
55 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 		        int polarity, int attempts, int delay, u32 *valp)
57 {
58 	while (1) {
59 		u32 val = t4_read_reg(adapter, reg);
60 
61 		if (!!(val & mask) == polarity) {
62 			if (valp)
63 				*valp = val;
64 			return 0;
65 		}
66 		if (--attempts == 0)
67 			return -EAGAIN;
68 		if (delay)
69 			udelay(delay);
70 	}
71 }
72 
73 /**
74  *	t4_set_reg_field - set a register field to a value
75  *	@adapter: the adapter to program
76  *	@addr: the register address
77  *	@mask: specifies the portion of the register to modify
78  *	@val: the new value for the register field
79  *
80  *	Sets a register field specified by the supplied mask to the
81  *	given value.
82  */
83 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
84 		      u32 val)
85 {
86 	u32 v = t4_read_reg(adapter, addr) & ~mask;
87 
88 	t4_write_reg(adapter, addr, v | val);
89 	(void) t4_read_reg(adapter, addr);      /* flush */
90 }
91 
92 /**
93  *	t4_read_indirect - read indirectly addressed registers
94  *	@adap: the adapter
95  *	@addr_reg: register holding the indirect address
96  *	@data_reg: register holding the value of the indirect register
97  *	@vals: where the read register values are stored
98  *	@nregs: how many indirect registers to read
99  *	@start_idx: index of first indirect register to read
100  *
101  *	Reads registers that are accessed indirectly through an address/data
102  *	register pair.
103  */
104 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
105 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
106 		      unsigned int start_idx)
107 {
108 	while (nregs--) {
109 		t4_write_reg(adap, addr_reg, start_idx);
110 		*vals++ = t4_read_reg(adap, data_reg);
111 		start_idx++;
112 	}
113 }
114 
115 /**
116  *	t4_write_indirect - write indirectly addressed registers
117  *	@adap: the adapter
118  *	@addr_reg: register holding the indirect addresses
119  *	@data_reg: register holding the value for the indirect registers
120  *	@vals: values to write
121  *	@nregs: how many indirect registers to write
122  *	@start_idx: address of first indirect register to write
123  *
124  *	Writes a sequential block of registers that are accessed indirectly
125  *	through an address/data register pair.
126  */
127 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
128 		       unsigned int data_reg, const u32 *vals,
129 		       unsigned int nregs, unsigned int start_idx)
130 {
131 	while (nregs--) {
132 		t4_write_reg(adap, addr_reg, start_idx++);
133 		t4_write_reg(adap, data_reg, *vals++);
134 	}
135 }
136 
137 /*
138  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
139  * mechanism.  This guarantees that we get the real value even if we're
140  * operating within a Virtual Machine and the Hypervisor is trapping our
141  * Configuration Space accesses.
142  */
143 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
144 {
145 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
146 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
147 		     V_REGISTER(reg));
148 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
149 }
150 
151 /*
152  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
153  */
154 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
155 			 u32 mbox_addr)
156 {
157 	for ( ; nflit; nflit--, mbox_addr += 8)
158 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
159 }
160 
161 /*
162  * Handle a FW assertion reported in a mailbox.
163  */
164 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
165 {
166 	struct fw_debug_cmd asrt;
167 
168 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
169 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
170 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
171 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
172 }
173 
174 #define X_CIM_PF_NOACCESS 0xeeeeeeee
175 /**
176  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
177  *	@adap: the adapter
178  *	@mbox: index of the mailbox to use
179  *	@cmd: the command to write
180  *	@size: command length in bytes
181  *	@rpl: where to optionally store the reply
182  *	@sleep_ok: if true we may sleep while awaiting command completion
183  *
184  *	Sends the given command to FW through the selected mailbox and waits
185  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
186  *	store the FW's reply to the command.  The command and its optional
187  *	reply are of the same length.  Some FW commands like RESET and
188  *	INITIALIZE can take a considerable amount of time to execute.
189  *	@sleep_ok determines whether we may sleep while awaiting the response.
190  *	If sleeping is allowed we use progressive backoff otherwise we spin.
191  *
192  *	The return value is 0 on success or a negative errno on failure.  A
193  *	failure can happen either because we are not able to execute the
194  *	command or FW executes it but signals an error.  In the latter case
195  *	the return value is the error code indicated by FW (negated).
196  */
197 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
198 		    void *rpl, bool sleep_ok)
199 {
200 	/*
201 	 * We delay in small increments at first in an effort to maintain
202 	 * responsiveness for simple, fast executing commands but then back
203 	 * off to larger delays to a maximum retry delay.
204 	 */
205 	static const int delay[] = {
206 		1, 1, 3, 5, 10, 10, 20, 50, 100
207 	};
208 
209 	u32 v;
210 	u64 res;
211 	int i, ms, delay_idx;
212 	const __be64 *p = cmd;
213 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
214 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
215 
216 	if ((size & 15) || size > MBOX_LEN)
217 		return -EINVAL;
218 
219 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
220 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
221 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
222 
223 	if (v != X_MBOWNER_PL)
224 		return v ? -EBUSY : -ETIMEDOUT;
225 
226 	for (i = 0; i < size; i += 8, p++)
227 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
228 
229 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
230 	t4_read_reg(adap, ctl_reg);          /* flush write */
231 
232 	delay_idx = 0;
233 	ms = delay[0];
234 
235 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
236 		if (sleep_ok) {
237 			ms = delay[delay_idx];  /* last element may repeat */
238 			if (delay_idx < ARRAY_SIZE(delay) - 1)
239 				delay_idx++;
240 			msleep(ms);
241 		} else
242 			mdelay(ms);
243 
244 		v = t4_read_reg(adap, ctl_reg);
245 		if (v == X_CIM_PF_NOACCESS)
246 			continue;
247 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
248 			if (!(v & F_MBMSGVALID)) {
249 				t4_write_reg(adap, ctl_reg,
250 					     V_MBOWNER(X_MBOWNER_NONE));
251 				continue;
252 			}
253 
254 			res = t4_read_reg64(adap, data_reg);
255 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
256 				fw_asrt(adap, data_reg);
257 				res = V_FW_CMD_RETVAL(EIO);
258 			} else if (rpl)
259 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
260 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
261 			return -G_FW_CMD_RETVAL((int)res);
262 		}
263 	}
264 
265 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
266 	       *(const u8 *)cmd, mbox);
267 	return -ETIMEDOUT;
268 }
269 
270 /**
271  *	t4_mc_read - read from MC through backdoor accesses
272  *	@adap: the adapter
273  *	@addr: address of first byte requested
274  *	@data: 64 bytes of data containing the requested address
275  *	@ecc: where to store the corresponding 64-bit ECC word
276  *
277  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
278  *	that covers the requested address @addr.  If @parity is not %NULL it
279  *	is assigned the 64-bit ECC word for the read data.
280  */
281 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
282 {
283 	int i;
284 
285 	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
286 		return -EBUSY;
287 	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
288 	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
289 	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
290 	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
291 		     V_BIST_CMD_GAP(1));
292 	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
293 	if (i)
294 		return i;
295 
296 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
297 
298 	for (i = 15; i >= 0; i--)
299 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
300 	if (ecc)
301 		*ecc = t4_read_reg64(adap, MC_DATA(16));
302 #undef MC_DATA
303 	return 0;
304 }
305 
306 /**
307  *	t4_edc_read - read from EDC through backdoor accesses
308  *	@adap: the adapter
309  *	@idx: which EDC to access
310  *	@addr: address of first byte requested
311  *	@data: 64 bytes of data containing the requested address
312  *	@ecc: where to store the corresponding 64-bit ECC word
313  *
314  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
315  *	that covers the requested address @addr.  If @parity is not %NULL it
316  *	is assigned the 64-bit ECC word for the read data.
317  */
318 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
319 {
320 	int i;
321 
322 	idx *= EDC_STRIDE;
323 	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
324 		return -EBUSY;
325 	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
326 	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
327 	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
328 	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
329 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
330 	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
331 	if (i)
332 		return i;
333 
334 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
335 
336 	for (i = 15; i >= 0; i--)
337 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
338 	if (ecc)
339 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
340 #undef EDC_DATA
341 	return 0;
342 }
343 
344 /**
345  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
346  *	@adap: the adapter
347  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
348  *	@addr: address within indicated memory type
349  *	@len: amount of memory to read
350  *	@buf: host memory buffer
351  *
352  *	Reads an [almost] arbitrary memory region in the firmware: the
353  *	firmware memory address, length and host buffer must be aligned on
354  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
355  *	the firmware's memory.  If this memory contains data structures which
356  *	contain multi-byte integers, it's the callers responsibility to
357  *	perform appropriate byte order conversions.
358  */
359 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
360 		__be32 *buf)
361 {
362 	u32 pos, start, end, offset;
363 	int ret;
364 
365 	/*
366 	 * Argument sanity checks ...
367 	 */
368 	if ((addr & 0x3) || (len & 0x3))
369 		return -EINVAL;
370 
371 	/*
372 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
373 	 * need to round down the start and round up the end.  We'll start
374 	 * copying out of the first line at (addr - start) a word at a time.
375 	 */
376 	start = addr & ~(64-1);
377 	end = (addr + len + 64-1) & ~(64-1);
378 	offset = (addr - start)/sizeof(__be32);
379 
380 	for (pos = start; pos < end; pos += 64, offset = 0) {
381 		__be32 data[16];
382 
383 		/*
384 		 * Read the chip's memory block and bail if there's an error.
385 		 */
386 		if (mtype == MEM_MC)
387 			ret = t4_mc_read(adap, pos, data, NULL);
388 		else
389 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
390 		if (ret)
391 			return ret;
392 
393 		/*
394 		 * Copy the data into the caller's memory buffer.
395 		 */
396 		while (offset < 16 && len > 0) {
397 			*buf++ = data[offset++];
398 			len -= sizeof(__be32);
399 		}
400 	}
401 
402 	return 0;
403 }
404 
405 /*
406  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
407  * VPD-R header.
408  */
409 struct t4_vpd_hdr {
410 	u8  id_tag;
411 	u8  id_len[2];
412 	u8  id_data[ID_LEN];
413 	u8  vpdr_tag;
414 	u8  vpdr_len[2];
415 };
416 
417 /*
418  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
419  */
420 #define EEPROM_MAX_RD_POLL 40
421 #define EEPROM_MAX_WR_POLL 6
422 #define EEPROM_STAT_ADDR   0x7bfc
423 #define VPD_BASE           0x400
424 #define VPD_BASE_OLD       0
425 #define VPD_LEN            512
426 #define VPD_INFO_FLD_HDR_SIZE	3
427 
428 /**
429  *	t4_seeprom_read - read a serial EEPROM location
430  *	@adapter: adapter to read
431  *	@addr: EEPROM virtual address
432  *	@data: where to store the read data
433  *
434  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
435  *	VPD capability.  Note that this function must be called with a virtual
436  *	address.
437  */
438 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
439 {
440 	u16 val;
441 	int attempts = EEPROM_MAX_RD_POLL;
442 	unsigned int base = adapter->params.pci.vpd_cap_addr;
443 
444 	if (addr >= EEPROMVSIZE || (addr & 3))
445 		return -EINVAL;
446 
447 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
448 	do {
449 		udelay(10);
450 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
451 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
452 
453 	if (!(val & PCI_VPD_ADDR_F)) {
454 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
455 		return -EIO;
456 	}
457 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
458 	*data = le32_to_cpu(*data);
459 	return 0;
460 }
461 
462 /**
463  *	t4_seeprom_write - write a serial EEPROM location
464  *	@adapter: adapter to write
465  *	@addr: virtual EEPROM address
466  *	@data: value to write
467  *
468  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
469  *	VPD capability.  Note that this function must be called with a virtual
470  *	address.
471  */
472 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
473 {
474 	u16 val;
475 	int attempts = EEPROM_MAX_WR_POLL;
476 	unsigned int base = adapter->params.pci.vpd_cap_addr;
477 
478 	if (addr >= EEPROMVSIZE || (addr & 3))
479 		return -EINVAL;
480 
481 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
482 				 cpu_to_le32(data));
483 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
484 				 (u16)addr | PCI_VPD_ADDR_F);
485 	do {
486 		msleep(1);
487 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
488 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
489 
490 	if (val & PCI_VPD_ADDR_F) {
491 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
492 		return -EIO;
493 	}
494 	return 0;
495 }
496 
497 /**
498  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
499  *	@phys_addr: the physical EEPROM address
500  *	@fn: the PCI function number
501  *	@sz: size of function-specific area
502  *
503  *	Translate a physical EEPROM address to virtual.  The first 1K is
504  *	accessed through virtual addresses starting at 31K, the rest is
505  *	accessed through virtual addresses starting at 0.
506  *
507  *	The mapping is as follows:
508  *	[0..1K) -> [31K..32K)
509  *	[1K..1K+A) -> [ES-A..ES)
510  *	[1K+A..ES) -> [0..ES-A-1K)
511  *
512  *	where A = @fn * @sz, and ES = EEPROM size.
513  */
514 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
515 {
516 	fn *= sz;
517 	if (phys_addr < 1024)
518 		return phys_addr + (31 << 10);
519 	if (phys_addr < 1024 + fn)
520 		return EEPROMSIZE - fn + phys_addr - 1024;
521 	if (phys_addr < EEPROMSIZE)
522 		return phys_addr - 1024 - fn;
523 	return -EINVAL;
524 }
525 
526 /**
527  *	t4_seeprom_wp - enable/disable EEPROM write protection
528  *	@adapter: the adapter
529  *	@enable: whether to enable or disable write protection
530  *
531  *	Enables or disables write protection on the serial EEPROM.
532  */
533 int t4_seeprom_wp(struct adapter *adapter, int enable)
534 {
535 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
536 }
537 
538 /**
539  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
540  *	@v: Pointer to buffered vpd data structure
541  *	@kw: The keyword to search for
542  *
543  *	Returns the value of the information field keyword or
544  *	-ENOENT otherwise.
545  */
546 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
547 {
548          int i;
549 	 unsigned int offset , len;
550 	 const u8 *buf = &v->id_tag;
551 	 const u8 *vpdr_len = &v->vpdr_tag;
552 	 offset = sizeof(struct t4_vpd_hdr);
553 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
554 
555 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
556 		 return -ENOENT;
557 	 }
558 
559          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
560 		 if(memcmp(buf + i , kw , 2) == 0){
561 			 i += VPD_INFO_FLD_HDR_SIZE;
562                          return i;
563 		  }
564 
565                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
566          }
567 
568          return -ENOENT;
569 }
570 
571 
572 /**
573  *	get_vpd_params - read VPD parameters from VPD EEPROM
574  *	@adapter: adapter to read
575  *	@p: where to store the parameters
576  *
577  *	Reads card parameters stored in VPD EEPROM.
578  */
579 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
580 {
581 	int i, ret, addr;
582 	int ec, sn, pn, na;
583 	u8 vpd[VPD_LEN], csum;
584 	const struct t4_vpd_hdr *v;
585 
586 	/*
587 	 * Card information normally starts at VPD_BASE but early cards had
588 	 * it at 0.
589 	 */
590 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
591 	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
592 
593 	for (i = 0; i < sizeof(vpd); i += 4) {
594 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
595 		if (ret)
596 			return ret;
597 	}
598  	v = (const struct t4_vpd_hdr *)vpd;
599 
600 #define FIND_VPD_KW(var,name) do { \
601 	var = get_vpd_keyword_val(v , name); \
602 	if (var < 0) { \
603 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
604 		return -EINVAL; \
605 	} \
606 } while (0)
607 
608 	FIND_VPD_KW(i, "RV");
609 	for (csum = 0; i >= 0; i--)
610 		csum += vpd[i];
611 
612 	if (csum) {
613 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
614 		return -EINVAL;
615 	}
616 	FIND_VPD_KW(ec, "EC");
617 	FIND_VPD_KW(sn, "SN");
618 	FIND_VPD_KW(pn, "PN");
619 	FIND_VPD_KW(na, "NA");
620 #undef FIND_VPD_KW
621 
622 	memcpy(p->id, v->id_data, ID_LEN);
623 	strstrip(p->id);
624 	memcpy(p->ec, vpd + ec, EC_LEN);
625 	strstrip(p->ec);
626 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
627 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
628 	strstrip(p->sn);
629 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
630 	strstrip((char *)p->pn);
631 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
632 	strstrip((char *)p->na);
633 
634 	return 0;
635 }
636 
637 /* serial flash and firmware constants and flash config file constants */
638 enum {
639 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
640 
641 	/* flash command opcodes */
642 	SF_PROG_PAGE    = 2,          /* program page */
643 	SF_WR_DISABLE   = 4,          /* disable writes */
644 	SF_RD_STATUS    = 5,          /* read status register */
645 	SF_WR_ENABLE    = 6,          /* enable writes */
646 	SF_RD_DATA_FAST = 0xb,        /* read flash */
647 	SF_RD_ID        = 0x9f,       /* read ID */
648 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
649 };
650 
651 /**
652  *	sf1_read - read data from the serial flash
653  *	@adapter: the adapter
654  *	@byte_cnt: number of bytes to read
655  *	@cont: whether another operation will be chained
656  *	@lock: whether to lock SF for PL access only
657  *	@valp: where to store the read data
658  *
659  *	Reads up to 4 bytes of data from the serial flash.  The location of
660  *	the read needs to be specified prior to calling this by issuing the
661  *	appropriate commands to the serial flash.
662  */
663 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
664 		    int lock, u32 *valp)
665 {
666 	int ret;
667 
668 	if (!byte_cnt || byte_cnt > 4)
669 		return -EINVAL;
670 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
671 		return -EBUSY;
672 	t4_write_reg(adapter, A_SF_OP,
673 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
674 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
675 	if (!ret)
676 		*valp = t4_read_reg(adapter, A_SF_DATA);
677 	return ret;
678 }
679 
680 /**
681  *	sf1_write - write data to the serial flash
682  *	@adapter: the adapter
683  *	@byte_cnt: number of bytes to write
684  *	@cont: whether another operation will be chained
685  *	@lock: whether to lock SF for PL access only
686  *	@val: value to write
687  *
688  *	Writes up to 4 bytes of data to the serial flash.  The location of
689  *	the write needs to be specified prior to calling this by issuing the
690  *	appropriate commands to the serial flash.
691  */
692 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
693 		     int lock, u32 val)
694 {
695 	if (!byte_cnt || byte_cnt > 4)
696 		return -EINVAL;
697 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
698 		return -EBUSY;
699 	t4_write_reg(adapter, A_SF_DATA, val);
700 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
701 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
702 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
703 }
704 
705 /**
706  *	flash_wait_op - wait for a flash operation to complete
707  *	@adapter: the adapter
708  *	@attempts: max number of polls of the status register
709  *	@delay: delay between polls in ms
710  *
711  *	Wait for a flash operation to complete by polling the status register.
712  */
713 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
714 {
715 	int ret;
716 	u32 status;
717 
718 	while (1) {
719 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
720 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
721 			return ret;
722 		if (!(status & 1))
723 			return 0;
724 		if (--attempts == 0)
725 			return -EAGAIN;
726 		if (delay)
727 			msleep(delay);
728 	}
729 }
730 
731 /**
732  *	t4_read_flash - read words from serial flash
733  *	@adapter: the adapter
734  *	@addr: the start address for the read
735  *	@nwords: how many 32-bit words to read
736  *	@data: where to store the read data
737  *	@byte_oriented: whether to store data as bytes or as words
738  *
739  *	Read the specified number of 32-bit words from the serial flash.
740  *	If @byte_oriented is set the read data is stored as a byte array
741  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
742  *	natural endianess.
743  */
744 int t4_read_flash(struct adapter *adapter, unsigned int addr,
745 		  unsigned int nwords, u32 *data, int byte_oriented)
746 {
747 	int ret;
748 
749 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
750 		return -EINVAL;
751 
752 	addr = swab32(addr) | SF_RD_DATA_FAST;
753 
754 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
755 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
756 		return ret;
757 
758 	for ( ; nwords; nwords--, data++) {
759 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
760 		if (nwords == 1)
761 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
762 		if (ret)
763 			return ret;
764 		if (byte_oriented)
765 			*data = htonl(*data);
766 	}
767 	return 0;
768 }
769 
770 /**
771  *	t4_write_flash - write up to a page of data to the serial flash
772  *	@adapter: the adapter
773  *	@addr: the start address to write
774  *	@n: length of data to write in bytes
775  *	@data: the data to write
776  *	@byte_oriented: whether to store data as bytes or as words
777  *
778  *	Writes up to a page of data (256 bytes) to the serial flash starting
779  *	at the given address.  All the data must be written to the same page.
780  *	If @byte_oriented is set the write data is stored as byte stream
781  *	(i.e. matches what on disk), otherwise in big-endian.
782  */
783 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
784 			  unsigned int n, const u8 *data, int byte_oriented)
785 {
786 	int ret;
787 	u32 buf[SF_PAGE_SIZE / 4];
788 	unsigned int i, c, left, val, offset = addr & 0xff;
789 
790 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
791 		return -EINVAL;
792 
793 	val = swab32(addr) | SF_PROG_PAGE;
794 
795 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
796 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
797 		goto unlock;
798 
799 	for (left = n; left; left -= c) {
800 		c = min(left, 4U);
801 		for (val = 0, i = 0; i < c; ++i)
802 			val = (val << 8) + *data++;
803 
804 		if (!byte_oriented)
805 			val = htonl(val);
806 
807 		ret = sf1_write(adapter, c, c != left, 1, val);
808 		if (ret)
809 			goto unlock;
810 	}
811 	ret = flash_wait_op(adapter, 8, 1);
812 	if (ret)
813 		goto unlock;
814 
815 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
816 
817 	/* Read the page to verify the write succeeded */
818 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
819 			    byte_oriented);
820 	if (ret)
821 		return ret;
822 
823 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
824 		CH_ERR(adapter, "failed to correctly write the flash page "
825 		       "at %#x\n", addr);
826 		return -EIO;
827 	}
828 	return 0;
829 
830 unlock:
831 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
832 	return ret;
833 }
834 
835 /**
836  *	t4_get_fw_version - read the firmware version
837  *	@adapter: the adapter
838  *	@vers: where to place the version
839  *
840  *	Reads the FW version from flash.
841  */
842 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
843 {
844 	return t4_read_flash(adapter,
845 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
846 			     vers, 0);
847 }
848 
849 /**
850  *	t4_get_tp_version - read the TP microcode version
851  *	@adapter: the adapter
852  *	@vers: where to place the version
853  *
854  *	Reads the TP microcode version from flash.
855  */
856 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
857 {
858 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
859 							      tp_microcode_ver),
860 			     1, vers, 0);
861 }
862 
863 /**
864  *	t4_check_fw_version - check if the FW is compatible with this driver
865  *	@adapter: the adapter
866  *
867  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
868  *	if there's exact match, a negative error if the version could not be
869  *	read or there's a major version mismatch, and a positive value if the
870  *	expected major version is found but there's a minor version mismatch.
871  */
872 int t4_check_fw_version(struct adapter *adapter)
873 {
874 	int ret, major, minor, micro;
875 
876 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
877 	if (!ret)
878 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
879 	if (ret)
880 		return ret;
881 
882 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
883 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
884 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
885 
886 	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
887 		CH_ERR(adapter, "card FW has major version %u, driver wants "
888 		       "%u\n", major, FW_VERSION_MAJOR);
889 		return -EINVAL;
890 	}
891 
892 	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
893 		return 0;                                   /* perfect match */
894 
895 	/* Minor/micro version mismatch.  Report it but often it's OK. */
896 	return 1;
897 }
898 
899 /**
900  *	t4_flash_erase_sectors - erase a range of flash sectors
901  *	@adapter: the adapter
902  *	@start: the first sector to erase
903  *	@end: the last sector to erase
904  *
905  *	Erases the sectors in the given inclusive range.
906  */
907 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
908 {
909 	int ret = 0;
910 
911 	while (start <= end) {
912 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
913 		    (ret = sf1_write(adapter, 4, 0, 1,
914 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
915 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
916 			CH_ERR(adapter, "erase of flash sector %d failed, "
917 			       "error %d\n", start, ret);
918 			break;
919 		}
920 		start++;
921 	}
922 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
923 	return ret;
924 }
925 
926 /**
927  *	t4_flash_cfg_addr - return the address of the flash configuration file
928  *	@adapter: the adapter
929  *
930  *	Return the address within the flash where the Firmware Configuration
931  *	File is stored.
932  */
933 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
934 {
935 	if (adapter->params.sf_size == 0x100000)
936 		return FLASH_FPGA_CFG_START;
937 	else
938 		return FLASH_CFG_START;
939 }
940 
941 /**
942  *	t4_load_cfg - download config file
943  *	@adap: the adapter
944  *	@cfg_data: the cfg text file to write
945  *	@size: text file size
946  *
947  *	Write the supplied config text file to the card's serial flash.
948  */
949 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
950 {
951 	int ret, i, n;
952 	unsigned int addr;
953 	unsigned int flash_cfg_start_sec;
954 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
955 
956 	addr = t4_flash_cfg_addr(adap);
957 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
958 
959 	if (size > FLASH_CFG_MAX_SIZE) {
960 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
961 		       FLASH_CFG_MAX_SIZE);
962 		return -EFBIG;
963 	}
964 
965 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
966 			 sf_sec_size);
967 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
968 				     flash_cfg_start_sec + i - 1);
969 	/*
970 	 * If size == 0 then we're simply erasing the FLASH sectors associated
971 	 * with the on-adapter Firmware Configuration File.
972 	 */
973 	if (ret || size == 0)
974 		goto out;
975 
976 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
977 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
978 		if ( (size - i) <  SF_PAGE_SIZE)
979 			n = size - i;
980 		else
981 			n = SF_PAGE_SIZE;
982 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
983 		if (ret)
984 			goto out;
985 
986 		addr += SF_PAGE_SIZE;
987 		cfg_data += SF_PAGE_SIZE;
988 	}
989 
990 out:
991 	if (ret)
992 		CH_ERR(adap, "config file %s failed %d\n",
993 		       (size == 0 ? "clear" : "download"), ret);
994 	return ret;
995 }
996 
997 
998 /**
999  *	t4_load_fw - download firmware
1000  *	@adap: the adapter
1001  *	@fw_data: the firmware image to write
1002  *	@size: image size
1003  *
1004  *	Write the supplied firmware image to the card's serial flash.
1005  */
1006 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1007 {
1008 	u32 csum;
1009 	int ret, addr;
1010 	unsigned int i;
1011 	u8 first_page[SF_PAGE_SIZE];
1012 	const u32 *p = (const u32 *)fw_data;
1013 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1014 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1015 
1016 	if (!size) {
1017 		CH_ERR(adap, "FW image has no data\n");
1018 		return -EINVAL;
1019 	}
1020 	if (size & 511) {
1021 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1022 		return -EINVAL;
1023 	}
1024 	if (ntohs(hdr->len512) * 512 != size) {
1025 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1026 		return -EINVAL;
1027 	}
1028 	if (size > FLASH_FW_MAX_SIZE) {
1029 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1030 		       FLASH_FW_MAX_SIZE);
1031 		return -EFBIG;
1032 	}
1033 
1034 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1035 		csum += ntohl(p[i]);
1036 
1037 	if (csum != 0xffffffff) {
1038 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1039 		       csum);
1040 		return -EINVAL;
1041 	}
1042 
1043 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1044 	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1045 	    FLASH_FW_START_SEC + i - 1);
1046 	if (ret)
1047 		goto out;
1048 
1049 	/*
1050 	 * We write the correct version at the end so the driver can see a bad
1051 	 * version if the FW write fails.  Start by writing a copy of the
1052 	 * first page with a bad version.
1053 	 */
1054 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1055 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1056 	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1057 	if (ret)
1058 		goto out;
1059 
1060 	addr = FLASH_FW_START;
1061 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1062 		addr += SF_PAGE_SIZE;
1063 		fw_data += SF_PAGE_SIZE;
1064 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1065 		if (ret)
1066 			goto out;
1067 	}
1068 
1069 	ret = t4_write_flash(adap,
1070 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1071 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1072 out:
1073 	if (ret)
1074 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1075 	return ret;
1076 }
1077 
1078 /* BIOS boot headers */
1079 typedef struct pci_expansion_rom_header {
1080 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1081 	u8	reserved[22]; /* Reserved per processor Architecture data */
1082 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1083 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1084 
1085 /* Legacy PCI Expansion ROM Header */
1086 typedef struct legacy_pci_expansion_rom_header {
1087 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1088 	u8	size512; /* Current Image Size in units of 512 bytes */
1089 	u8	initentry_point[4];
1090 	u8	cksum; /* Checksum computed on the entire Image */
1091 	u8	reserved[16]; /* Reserved */
1092 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1093 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1094 
1095 /* EFI PCI Expansion ROM Header */
1096 typedef struct efi_pci_expansion_rom_header {
1097 	u8	signature[2]; // ROM signature. The value 0xaa55
1098 	u8	initialization_size[2]; /* Units 512. Includes this header */
1099 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1100 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1101 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1102 	u8	compression_type[2]; /* Compression type. */
1103 		/*
1104 		 * Compression type definition
1105 		 * 0x0: uncompressed
1106 		 * 0x1: Compressed
1107 		 * 0x2-0xFFFF: Reserved
1108 		 */
1109 	u8	reserved[8]; /* Reserved */
1110 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1111 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1112 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1113 
1114 /* PCI Data Structure Format */
1115 typedef struct pcir_data_structure { /* PCI Data Structure */
1116 	u8	signature[4]; /* Signature. The string "PCIR" */
1117 	u8	vendor_id[2]; /* Vendor Identification */
1118 	u8	device_id[2]; /* Device Identification */
1119 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1120 	u8	length[2]; /* PCIR Data Structure Length */
1121 	u8	revision; /* PCIR Data Structure Revision */
1122 	u8	class_code[3]; /* Class Code */
1123 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1124 	u8	code_revision[2]; /* Revision Level of Code/Data */
1125 	u8	code_type; /* Code Type. */
1126 		/*
1127 		 * PCI Expansion ROM Code Types
1128 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1129 		 * 0x01: Open Firmware standard for PCI. FCODE
1130 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1131 		 * 0x03: EFI Image. EFI
1132 		 * 0x04-0xFF: Reserved.
1133 		 */
1134 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1135 	u8	reserved[2]; /* Reserved */
1136 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1137 
1138 /* BOOT constants */
1139 enum {
1140 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1141 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1142 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1143 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1144 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1145 	VENDOR_ID = 0x1425, /* Vendor ID */
1146 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1147 };
1148 
1149 /*
1150  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1151  *	@adatper: the device ID to write.
1152  *	@boot_data: the boot image to modify.
1153  *
1154  *	Write the supplied device ID to the boot BIOS image.
1155  */
1156 static void modify_device_id(int device_id, u8 *boot_data)
1157 {
1158 	legacy_pci_exp_rom_header_t *header;
1159 	pcir_data_t *pcir_header;
1160 	u32 cur_header = 0;
1161 
1162 	/*
1163 	 * Loop through all chained images and change the device ID's
1164 	 */
1165 	while (1) {
1166 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1167 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1168 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1169 
1170 		/*
1171 		 * Only modify the Device ID if code type is Legacy or HP.
1172 		 * 0x00: Okay to modify
1173 		 * 0x01: FCODE. Do not be modify
1174 		 * 0x03: Okay to modify
1175 		 * 0x04-0xFF: Do not modify
1176 		 */
1177 		if (pcir_header->code_type == 0x00) {
1178 			u8 csum = 0;
1179 			int i;
1180 
1181 			/*
1182 			 * Modify Device ID to match current adatper
1183 			 */
1184 			*(u16*) pcir_header->device_id = device_id;
1185 
1186 			/*
1187 			 * Set checksum temporarily to 0.
1188 			 * We will recalculate it later.
1189 			 */
1190 			header->cksum = 0x0;
1191 
1192 			/*
1193 			 * Calculate and update checksum
1194 			 */
1195 			for (i = 0; i < (header->size512 * 512); i++)
1196 				csum += (u8)boot_data[cur_header + i];
1197 
1198 			/*
1199 			 * Invert summed value to create the checksum
1200 			 * Writing new checksum value directly to the boot data
1201 			 */
1202 			boot_data[cur_header + 7] = -csum;
1203 
1204 		} else if (pcir_header->code_type == 0x03) {
1205 
1206 			/*
1207 			 * Modify Device ID to match current adatper
1208 			 */
1209 			*(u16*) pcir_header->device_id = device_id;
1210 
1211 		}
1212 
1213 
1214 		/*
1215 		 * Check indicator element to identify if this is the last
1216 		 * image in the ROM.
1217 		 */
1218 		if (pcir_header->indicator & 0x80)
1219 			break;
1220 
1221 		/*
1222 		 * Move header pointer up to the next image in the ROM.
1223 		 */
1224 		cur_header += header->size512 * 512;
1225 	}
1226 }
1227 
1228 /*
1229  *	t4_load_boot - download boot flash
1230  *	@adapter: the adapter
1231  *	@boot_data: the boot image to write
1232  *	@boot_addr: offset in flash to write boot_data
1233  *	@size: image size
1234  *
1235  *	Write the supplied boot image to the card's serial flash.
1236  *	The boot image has the following sections: a 28-byte header and the
1237  *	boot image.
1238  */
1239 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1240 		 unsigned int boot_addr, unsigned int size)
1241 {
1242 	pci_exp_rom_header_t *header;
1243 	int pcir_offset ;
1244 	pcir_data_t *pcir_header;
1245 	int ret, addr;
1246 	uint16_t device_id;
1247 	unsigned int i;
1248 	unsigned int boot_sector = boot_addr * 1024;
1249 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1250 
1251 	/*
1252 	 * Make sure the boot image does not encroach on the firmware region
1253 	 */
1254 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1255 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1256 		return -EFBIG;
1257 	}
1258 
1259 	/*
1260 	 * Number of sectors spanned
1261 	 */
1262 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1263 			sf_sec_size);
1264 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1265 				     (boot_sector >> 16) + i - 1);
1266 
1267 	/*
1268 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1269 	 * with the on-adapter option ROM file
1270 	 */
1271 	if (ret || (size == 0))
1272 		goto out;
1273 
1274 	/* Get boot header */
1275 	header = (pci_exp_rom_header_t *)boot_data;
1276 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1277 	/* PCIR Data Structure */
1278 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1279 
1280 	/*
1281 	 * Perform some primitive sanity testing to avoid accidentally
1282 	 * writing garbage over the boot sectors.  We ought to check for
1283 	 * more but it's not worth it for now ...
1284 	 */
1285 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1286 		CH_ERR(adap, "boot image too small/large\n");
1287 		return -EFBIG;
1288 	}
1289 
1290 	/*
1291 	 * Check BOOT ROM header signature
1292 	 */
1293 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1294 		CH_ERR(adap, "Boot image missing signature\n");
1295 		return -EINVAL;
1296 	}
1297 
1298 	/*
1299 	 * Check PCI header signature
1300 	 */
1301 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1302 		CH_ERR(adap, "PCI header missing signature\n");
1303 		return -EINVAL;
1304 	}
1305 
1306 	/*
1307 	 * Check Vendor ID matches Chelsio ID
1308 	 */
1309 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1310 		CH_ERR(adap, "Vendor ID missing signature\n");
1311 		return -EINVAL;
1312 	}
1313 
1314 	/*
1315 	 * Retrieve adapter's device ID
1316 	 */
1317 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1318 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1319 	device_id = (device_id & 0xff) | 0x4000;
1320 
1321 	/*
1322 	 * Check PCIE Device ID
1323 	 */
1324 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1325 		/*
1326 		 * Change the device ID in the Boot BIOS image to match
1327 		 * the Device ID of the current adapter.
1328 		 */
1329 		modify_device_id(device_id, boot_data);
1330 	}
1331 
1332 	/*
1333 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1334 	 * we finish copying the rest of the boot image. This will ensure
1335 	 * that the BIOS boot header will only be written if the boot image
1336 	 * was written in full.
1337 	 */
1338 	addr = boot_sector;
1339 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1340 		addr += SF_PAGE_SIZE;
1341 		boot_data += SF_PAGE_SIZE;
1342 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1343 		if (ret)
1344 			goto out;
1345 	}
1346 
1347 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1348 
1349 out:
1350 	if (ret)
1351 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1352 	return ret;
1353 }
1354 
1355 /**
1356  *	t4_read_cimq_cfg - read CIM queue configuration
1357  *	@adap: the adapter
1358  *	@base: holds the queue base addresses in bytes
1359  *	@size: holds the queue sizes in bytes
1360  *	@thres: holds the queue full thresholds in bytes
1361  *
1362  *	Returns the current configuration of the CIM queues, starting with
1363  *	the IBQs, then the OBQs.
1364  */
1365 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1366 {
1367 	unsigned int i, v;
1368 
1369 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1370 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1371 			     V_QUENUMSELECT(i));
1372 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1373 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1374 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1375 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1376 	}
1377 	for (i = 0; i < CIM_NUM_OBQ; i++) {
1378 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1379 			     V_QUENUMSELECT(i));
1380 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1381 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1382 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1383 	}
1384 }
1385 
1386 /**
1387  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1388  *	@adap: the adapter
1389  *	@qid: the queue index
1390  *	@data: where to store the queue contents
1391  *	@n: capacity of @data in 32-bit words
1392  *
1393  *	Reads the contents of the selected CIM queue starting at address 0 up
1394  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1395  *	error and the number of 32-bit words actually read on success.
1396  */
1397 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1398 {
1399 	int i, err;
1400 	unsigned int addr;
1401 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1402 
1403 	if (qid > 5 || (n & 3))
1404 		return -EINVAL;
1405 
1406 	addr = qid * nwords;
1407 	if (n > nwords)
1408 		n = nwords;
1409 
1410 	for (i = 0; i < n; i++, addr++) {
1411 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1412 			     F_IBQDBGEN);
1413 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1414 				      2, 1);
1415 		if (err)
1416 			return err;
1417 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1418 	}
1419 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1420 	return i;
1421 }
1422 
1423 /**
1424  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1425  *	@adap: the adapter
1426  *	@qid: the queue index
1427  *	@data: where to store the queue contents
1428  *	@n: capacity of @data in 32-bit words
1429  *
1430  *	Reads the contents of the selected CIM queue starting at address 0 up
1431  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1432  *	error and the number of 32-bit words actually read on success.
1433  */
1434 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1435 {
1436 	int i, err;
1437 	unsigned int addr, v, nwords;
1438 
1439 	if (qid > 5 || (n & 3))
1440 		return -EINVAL;
1441 
1442 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1443 		     V_QUENUMSELECT(qid));
1444 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1445 
1446 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1447 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1448 	if (n > nwords)
1449 		n = nwords;
1450 
1451 	for (i = 0; i < n; i++, addr++) {
1452 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1453 			     F_OBQDBGEN);
1454 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1455 				      2, 1);
1456 		if (err)
1457 			return err;
1458 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1459 	}
1460 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1461 	return i;
1462 }
1463 
1464 enum {
1465 	CIM_QCTL_BASE     = 0,
1466 	CIM_CTL_BASE      = 0x2000,
1467 	CIM_PBT_ADDR_BASE = 0x2800,
1468 	CIM_PBT_LRF_BASE  = 0x3000,
1469 	CIM_PBT_DATA_BASE = 0x3800
1470 };
1471 
1472 /**
1473  *	t4_cim_read - read a block from CIM internal address space
1474  *	@adap: the adapter
1475  *	@addr: the start address within the CIM address space
1476  *	@n: number of words to read
1477  *	@valp: where to store the result
1478  *
1479  *	Reads a block of 4-byte words from the CIM intenal address space.
1480  */
1481 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1482 		unsigned int *valp)
1483 {
1484 	int ret = 0;
1485 
1486 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1487 		return -EBUSY;
1488 
1489 	for ( ; !ret && n--; addr += 4) {
1490 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1491 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1492 				      0, 5, 2);
1493 		if (!ret)
1494 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1495 	}
1496 	return ret;
1497 }
1498 
1499 /**
1500  *	t4_cim_write - write a block into CIM internal address space
1501  *	@adap: the adapter
1502  *	@addr: the start address within the CIM address space
1503  *	@n: number of words to write
1504  *	@valp: set of values to write
1505  *
1506  *	Writes a block of 4-byte words into the CIM intenal address space.
1507  */
1508 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1509 		 const unsigned int *valp)
1510 {
1511 	int ret = 0;
1512 
1513 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1514 		return -EBUSY;
1515 
1516 	for ( ; !ret && n--; addr += 4) {
1517 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1518 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1519 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1520 				      0, 5, 2);
1521 	}
1522 	return ret;
1523 }
1524 
1525 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1526 {
1527 	return t4_cim_write(adap, addr, 1, &val);
1528 }
1529 
1530 /**
1531  *	t4_cim_ctl_read - read a block from CIM control region
1532  *	@adap: the adapter
1533  *	@addr: the start address within the CIM control region
1534  *	@n: number of words to read
1535  *	@valp: where to store the result
1536  *
1537  *	Reads a block of 4-byte words from the CIM control region.
1538  */
1539 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1540 		    unsigned int *valp)
1541 {
1542 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1543 }
1544 
1545 /**
1546  *	t4_cim_read_la - read CIM LA capture buffer
1547  *	@adap: the adapter
1548  *	@la_buf: where to store the LA data
1549  *	@wrptr: the HW write pointer within the capture buffer
1550  *
1551  *	Reads the contents of the CIM LA buffer with the most recent entry at
1552  *	the end	of the returned data and with the entry at @wrptr first.
1553  *	We try to leave the LA in the running state we find it in.
1554  */
1555 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1556 {
1557 	int i, ret;
1558 	unsigned int cfg, val, idx;
1559 
1560 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1561 	if (ret)
1562 		return ret;
1563 
1564 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1565 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1566 		if (ret)
1567 			return ret;
1568 	}
1569 
1570 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1571 	if (ret)
1572 		goto restart;
1573 
1574 	idx = G_UPDBGLAWRPTR(val);
1575 	if (wrptr)
1576 		*wrptr = idx;
1577 
1578 	for (i = 0; i < adap->params.cim_la_size; i++) {
1579 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1580 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1581 		if (ret)
1582 			break;
1583 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1584 		if (ret)
1585 			break;
1586 		if (val & F_UPDBGLARDEN) {
1587 			ret = -ETIMEDOUT;
1588 			break;
1589 		}
1590 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1591 		if (ret)
1592 			break;
1593 		idx = (idx + 1) & M_UPDBGLARDPTR;
1594 	}
1595 restart:
1596 	if (cfg & F_UPDBGLAEN) {
1597 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1598 				      cfg & ~F_UPDBGLARDEN);
1599 		if (!ret)
1600 			ret = r;
1601 	}
1602 	return ret;
1603 }
1604 
1605 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1606 			unsigned int *pif_req_wrptr,
1607 			unsigned int *pif_rsp_wrptr)
1608 {
1609 	int i, j;
1610 	u32 cfg, val, req, rsp;
1611 
1612 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1613 	if (cfg & F_LADBGEN)
1614 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1615 
1616 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1617 	req = G_POLADBGWRPTR(val);
1618 	rsp = G_PILADBGWRPTR(val);
1619 	if (pif_req_wrptr)
1620 		*pif_req_wrptr = req;
1621 	if (pif_rsp_wrptr)
1622 		*pif_rsp_wrptr = rsp;
1623 
1624 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1625 		for (j = 0; j < 6; j++) {
1626 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1627 				     V_PILADBGRDPTR(rsp));
1628 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1629 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1630 			req++;
1631 			rsp++;
1632 		}
1633 		req = (req + 2) & M_POLADBGRDPTR;
1634 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1635 	}
1636 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1637 }
1638 
1639 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1640 {
1641 	u32 cfg;
1642 	int i, j, idx;
1643 
1644 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1645 	if (cfg & F_LADBGEN)
1646 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1647 
1648 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1649 		for (j = 0; j < 5; j++) {
1650 			idx = 8 * i + j;
1651 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1652 				     V_PILADBGRDPTR(idx));
1653 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1654 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1655 		}
1656 	}
1657 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1658 }
1659 
1660 /**
1661  *	t4_tp_read_la - read TP LA capture buffer
1662  *	@adap: the adapter
1663  *	@la_buf: where to store the LA data
1664  *	@wrptr: the HW write pointer within the capture buffer
1665  *
1666  *	Reads the contents of the TP LA buffer with the most recent entry at
1667  *	the end	of the returned data and with the entry at @wrptr first.
1668  *	We leave the LA in the running state we find it in.
1669  */
1670 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1671 {
1672 	bool last_incomplete;
1673 	unsigned int i, cfg, val, idx;
1674 
1675 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1676 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1677 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1678 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1679 
1680 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1681 	idx = G_DBGLAWPTR(val);
1682 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1683 	if (last_incomplete)
1684 		idx = (idx + 1) & M_DBGLARPTR;
1685 	if (wrptr)
1686 		*wrptr = idx;
1687 
1688 	val &= 0xffff;
1689 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1690 	val |= adap->params.tp.la_mask;
1691 
1692 	for (i = 0; i < TPLA_SIZE; i++) {
1693 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1694 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1695 		idx = (idx + 1) & M_DBGLARPTR;
1696 	}
1697 
1698 	/* Wipe out last entry if it isn't valid */
1699 	if (last_incomplete)
1700 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1701 
1702 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1703 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1704 			     cfg | adap->params.tp.la_mask);
1705 }
1706 
1707 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1708 {
1709 	unsigned int i, j;
1710 
1711 	for (i = 0; i < 8; i++) {
1712 		u32 *p = la_buf + i;
1713 
1714 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1715 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1716 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1717 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1718 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1719 	}
1720 }
1721 
1722 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1723 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1724 
1725 /**
1726  *	t4_link_start - apply link configuration to MAC/PHY
1727  *	@phy: the PHY to setup
1728  *	@mac: the MAC to setup
1729  *	@lc: the requested link configuration
1730  *
1731  *	Set up a port's MAC and PHY according to a desired link configuration.
1732  *	- If the PHY can auto-negotiate first decide what to advertise, then
1733  *	  enable/disable auto-negotiation as desired, and reset.
1734  *	- If the PHY does not auto-negotiate just reset it.
1735  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1736  *	  otherwise do it later based on the outcome of auto-negotiation.
1737  */
1738 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1739 		  struct link_config *lc)
1740 {
1741 	struct fw_port_cmd c;
1742 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1743 
1744 	lc->link_ok = 0;
1745 	if (lc->requested_fc & PAUSE_RX)
1746 		fc |= FW_PORT_CAP_FC_RX;
1747 	if (lc->requested_fc & PAUSE_TX)
1748 		fc |= FW_PORT_CAP_FC_TX;
1749 
1750 	memset(&c, 0, sizeof(c));
1751 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1752 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1753 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1754 				  FW_LEN16(c));
1755 
1756 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1757 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1758 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1759 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1760 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1761 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1762 	} else
1763 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1764 
1765 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1766 }
1767 
1768 /**
1769  *	t4_restart_aneg - restart autonegotiation
1770  *	@adap: the adapter
1771  *	@mbox: mbox to use for the FW command
1772  *	@port: the port id
1773  *
1774  *	Restarts autonegotiation for the selected port.
1775  */
1776 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1777 {
1778 	struct fw_port_cmd c;
1779 
1780 	memset(&c, 0, sizeof(c));
1781 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1782 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1783 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1784 				  FW_LEN16(c));
1785 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1786 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1787 }
1788 
1789 struct intr_info {
1790 	unsigned int mask;       /* bits to check in interrupt status */
1791 	const char *msg;         /* message to print or NULL */
1792 	short stat_idx;          /* stat counter to increment or -1 */
1793 	unsigned short fatal;    /* whether the condition reported is fatal */
1794 };
1795 
1796 /**
1797  *	t4_handle_intr_status - table driven interrupt handler
1798  *	@adapter: the adapter that generated the interrupt
1799  *	@reg: the interrupt status register to process
1800  *	@acts: table of interrupt actions
1801  *
1802  *	A table driven interrupt handler that applies a set of masks to an
1803  *	interrupt status word and performs the corresponding actions if the
1804  *	interrupts described by the mask have occured.  The actions include
1805  *	optionally emitting a warning or alert message.  The table is terminated
1806  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1807  *	conditions.
1808  */
1809 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1810 				 const struct intr_info *acts)
1811 {
1812 	int fatal = 0;
1813 	unsigned int mask = 0;
1814 	unsigned int status = t4_read_reg(adapter, reg);
1815 
1816 	for ( ; acts->mask; ++acts) {
1817 		if (!(status & acts->mask))
1818 			continue;
1819 		if (acts->fatal) {
1820 			fatal++;
1821 			CH_ALERT(adapter, "%s (0x%x)\n",
1822 				 acts->msg, status & acts->mask);
1823 		} else if (acts->msg)
1824 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1825 					  acts->msg, status & acts->mask);
1826 		mask |= acts->mask;
1827 	}
1828 	status &= mask;
1829 	if (status)                           /* clear processed interrupts */
1830 		t4_write_reg(adapter, reg, status);
1831 	return fatal;
1832 }
1833 
1834 /*
1835  * Interrupt handler for the PCIE module.
1836  */
1837 static void pcie_intr_handler(struct adapter *adapter)
1838 {
1839 	static struct intr_info sysbus_intr_info[] = {
1840 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1841 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1842 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1843 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1844 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1845 		{ 0 }
1846 	};
1847 	static struct intr_info pcie_port_intr_info[] = {
1848 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1849 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1850 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1851 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1852 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1853 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1854 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1855 		{ F_RDPE, "Rx data parity error", -1, 1 },
1856 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1857 		{ 0 }
1858 	};
1859 	static struct intr_info pcie_intr_info[] = {
1860 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1861 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1862 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1863 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1864 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1865 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1866 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1867 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1868 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1869 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1870 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1871 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1872 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1873 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1874 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1875 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1876 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1877 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1878 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1879 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1880 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1881 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1882 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1883 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1884 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1885 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1886 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1887 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
1888 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
1889 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1890 		  0 },
1891 		{ 0 }
1892 	};
1893 
1894 	int fat;
1895 
1896 	fat = t4_handle_intr_status(adapter,
1897 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1898 				    sysbus_intr_info) +
1899 	      t4_handle_intr_status(adapter,
1900 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1901 				    pcie_port_intr_info) +
1902 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1903 	if (fat)
1904 		t4_fatal_err(adapter);
1905 }
1906 
1907 /*
1908  * TP interrupt handler.
1909  */
1910 static void tp_intr_handler(struct adapter *adapter)
1911 {
1912 	static struct intr_info tp_intr_info[] = {
1913 		{ 0x3fffffff, "TP parity error", -1, 1 },
1914 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1915 		{ 0 }
1916 	};
1917 
1918 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1919 		t4_fatal_err(adapter);
1920 }
1921 
1922 /*
1923  * SGE interrupt handler.
1924  */
1925 static void sge_intr_handler(struct adapter *adapter)
1926 {
1927 	u64 v;
1928 	u32 err;
1929 
1930 	static struct intr_info sge_intr_info[] = {
1931 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
1932 		  "SGE received CPL exceeding IQE size", -1, 1 },
1933 		{ F_ERR_INVALID_CIDX_INC,
1934 		  "SGE GTS CIDX increment too large", -1, 0 },
1935 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1936 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1937 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1938 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1939 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1940 		  0 },
1941 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1942 		  0 },
1943 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1944 		  0 },
1945 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1946 		  0 },
1947 		{ F_ERR_ING_CTXT_PRIO,
1948 		  "SGE too many priority ingress contexts", -1, 0 },
1949 		{ F_ERR_EGR_CTXT_PRIO,
1950 		  "SGE too many priority egress contexts", -1, 0 },
1951 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1952 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1953 		{ 0 }
1954 	};
1955 
1956 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1957 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1958 	if (v) {
1959 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
1960 			 (unsigned long long)v);
1961 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1962 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1963 	}
1964 
1965 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1966 
1967 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1968 	if (err & F_ERROR_QID_VALID) {
1969 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1970 		if (err & F_UNCAPTURED_ERROR)
1971 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
1972 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
1973 			     F_UNCAPTURED_ERROR);
1974 	}
1975 
1976 	if (v != 0)
1977 		t4_fatal_err(adapter);
1978 }
1979 
1980 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1981 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1982 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1983 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1984 
1985 /*
1986  * CIM interrupt handler.
1987  */
1988 static void cim_intr_handler(struct adapter *adapter)
1989 {
1990 	static struct intr_info cim_intr_info[] = {
1991 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1992 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1993 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1994 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1995 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1996 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1997 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1998 		{ 0 }
1999 	};
2000 	static struct intr_info cim_upintr_info[] = {
2001 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2002 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2003 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2004 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2005 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2006 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2007 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2008 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2009 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2010 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2011 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2012 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2013 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2014 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2015 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2016 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2017 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2018 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2019 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2020 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2021 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2022 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2023 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2024 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2025 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2026 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2027 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2028 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2029 		{ 0 }
2030 	};
2031 
2032 	int fat;
2033 
2034 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2035 				    cim_intr_info) +
2036 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2037 				    cim_upintr_info);
2038 	if (fat)
2039 		t4_fatal_err(adapter);
2040 }
2041 
2042 /*
2043  * ULP RX interrupt handler.
2044  */
2045 static void ulprx_intr_handler(struct adapter *adapter)
2046 {
2047 	static struct intr_info ulprx_intr_info[] = {
2048 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2049 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2050 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2051 		{ 0 }
2052 	};
2053 
2054 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2055 		t4_fatal_err(adapter);
2056 }
2057 
2058 /*
2059  * ULP TX interrupt handler.
2060  */
2061 static void ulptx_intr_handler(struct adapter *adapter)
2062 {
2063 	static struct intr_info ulptx_intr_info[] = {
2064 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2065 		  0 },
2066 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2067 		  0 },
2068 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2069 		  0 },
2070 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2071 		  0 },
2072 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2073 		{ 0 }
2074 	};
2075 
2076 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2077 		t4_fatal_err(adapter);
2078 }
2079 
2080 /*
2081  * PM TX interrupt handler.
2082  */
2083 static void pmtx_intr_handler(struct adapter *adapter)
2084 {
2085 	static struct intr_info pmtx_intr_info[] = {
2086 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2087 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2088 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2089 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2090 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2091 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2092 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2093 		  1 },
2094 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2095 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2096 		{ 0 }
2097 	};
2098 
2099 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2100 		t4_fatal_err(adapter);
2101 }
2102 
2103 /*
2104  * PM RX interrupt handler.
2105  */
2106 static void pmrx_intr_handler(struct adapter *adapter)
2107 {
2108 	static struct intr_info pmrx_intr_info[] = {
2109 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2110 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2111 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2112 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2113 		  1 },
2114 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2115 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2116 		{ 0 }
2117 	};
2118 
2119 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2120 		t4_fatal_err(adapter);
2121 }
2122 
2123 /*
2124  * CPL switch interrupt handler.
2125  */
2126 static void cplsw_intr_handler(struct adapter *adapter)
2127 {
2128 	static struct intr_info cplsw_intr_info[] = {
2129 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2130 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2131 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2132 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2133 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2134 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2135 		{ 0 }
2136 	};
2137 
2138 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2139 		t4_fatal_err(adapter);
2140 }
2141 
2142 /*
2143  * LE interrupt handler.
2144  */
2145 static void le_intr_handler(struct adapter *adap)
2146 {
2147 	static struct intr_info le_intr_info[] = {
2148 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2149 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2150 		{ F_PARITYERR, "LE parity error", -1, 1 },
2151 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2152 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2153 		{ 0 }
2154 	};
2155 
2156 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2157 		t4_fatal_err(adap);
2158 }
2159 
2160 /*
2161  * MPS interrupt handler.
2162  */
2163 static void mps_intr_handler(struct adapter *adapter)
2164 {
2165 	static struct intr_info mps_rx_intr_info[] = {
2166 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2167 		{ 0 }
2168 	};
2169 	static struct intr_info mps_tx_intr_info[] = {
2170 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2171 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2172 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2173 		  -1, 1 },
2174 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2175 		  -1, 1 },
2176 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2177 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2178 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2179 		{ 0 }
2180 	};
2181 	static struct intr_info mps_trc_intr_info[] = {
2182 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2183 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2184 		  1 },
2185 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2186 		{ 0 }
2187 	};
2188 	static struct intr_info mps_stat_sram_intr_info[] = {
2189 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2190 		{ 0 }
2191 	};
2192 	static struct intr_info mps_stat_tx_intr_info[] = {
2193 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2194 		{ 0 }
2195 	};
2196 	static struct intr_info mps_stat_rx_intr_info[] = {
2197 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2198 		{ 0 }
2199 	};
2200 	static struct intr_info mps_cls_intr_info[] = {
2201 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2202 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2203 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2204 		{ 0 }
2205 	};
2206 
2207 	int fat;
2208 
2209 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2210 				    mps_rx_intr_info) +
2211 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2212 				    mps_tx_intr_info) +
2213 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2214 				    mps_trc_intr_info) +
2215 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2216 				    mps_stat_sram_intr_info) +
2217 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2218 				    mps_stat_tx_intr_info) +
2219 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2220 				    mps_stat_rx_intr_info) +
2221 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2222 				    mps_cls_intr_info);
2223 
2224 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2225 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2226 	if (fat)
2227 		t4_fatal_err(adapter);
2228 }
2229 
2230 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2231 
2232 /*
2233  * EDC/MC interrupt handler.
2234  */
2235 static void mem_intr_handler(struct adapter *adapter, int idx)
2236 {
2237 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2238 
2239 	unsigned int addr, cnt_addr, v;
2240 
2241 	if (idx <= MEM_EDC1) {
2242 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2243 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2244 	} else {
2245 		addr = A_MC_INT_CAUSE;
2246 		cnt_addr = A_MC_ECC_STATUS;
2247 	}
2248 
2249 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2250 	if (v & F_PERR_INT_CAUSE)
2251 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2252 	if (v & F_ECC_CE_INT_CAUSE) {
2253 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2254 
2255 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2256 		CH_WARN_RATELIMIT(adapter,
2257 				  "%u %s correctable ECC data error%s\n",
2258 				  cnt, name[idx], cnt > 1 ? "s" : "");
2259 	}
2260 	if (v & F_ECC_UE_INT_CAUSE)
2261 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2262 			 name[idx]);
2263 
2264 	t4_write_reg(adapter, addr, v);
2265 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2266 		t4_fatal_err(adapter);
2267 }
2268 
2269 /*
2270  * MA interrupt handler.
2271  */
2272 static void ma_intr_handler(struct adapter *adapter)
2273 {
2274 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2275 
2276 	if (status & F_MEM_PERR_INT_CAUSE)
2277 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2278 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2279 	if (status & F_MEM_WRAP_INT_CAUSE) {
2280 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2281 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2282 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2283 			 G_MEM_WRAP_ADDRESS(v) << 4);
2284 	}
2285 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2286 	t4_fatal_err(adapter);
2287 }
2288 
2289 /*
2290  * SMB interrupt handler.
2291  */
2292 static void smb_intr_handler(struct adapter *adap)
2293 {
2294 	static struct intr_info smb_intr_info[] = {
2295 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2296 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2297 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2298 		{ 0 }
2299 	};
2300 
2301 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2302 		t4_fatal_err(adap);
2303 }
2304 
2305 /*
2306  * NC-SI interrupt handler.
2307  */
2308 static void ncsi_intr_handler(struct adapter *adap)
2309 {
2310 	static struct intr_info ncsi_intr_info[] = {
2311 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2312 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2313 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2314 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2315 		{ 0 }
2316 	};
2317 
2318 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2319 		t4_fatal_err(adap);
2320 }
2321 
2322 /*
2323  * XGMAC interrupt handler.
2324  */
2325 static void xgmac_intr_handler(struct adapter *adap, int port)
2326 {
2327 	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2328 
2329 	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2330 	if (!v)
2331 		return;
2332 
2333 	if (v & F_TXFIFO_PRTY_ERR)
2334 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2335 	if (v & F_RXFIFO_PRTY_ERR)
2336 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2337 	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2338 	t4_fatal_err(adap);
2339 }
2340 
2341 /*
2342  * PL interrupt handler.
2343  */
2344 static void pl_intr_handler(struct adapter *adap)
2345 {
2346 	static struct intr_info pl_intr_info[] = {
2347 		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
2348 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2349 		{ 0 }
2350 	};
2351 
2352 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2353 		t4_fatal_err(adap);
2354 }
2355 
2356 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2357 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2358 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2359 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2360 
2361 /**
2362  *	t4_slow_intr_handler - control path interrupt handler
2363  *	@adapter: the adapter
2364  *
2365  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2366  *	The designation 'slow' is because it involves register reads, while
2367  *	data interrupts typically don't involve any MMIOs.
2368  */
2369 int t4_slow_intr_handler(struct adapter *adapter)
2370 {
2371 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2372 
2373 	if (!(cause & GLBL_INTR_MASK))
2374 		return 0;
2375 	if (cause & F_CIM)
2376 		cim_intr_handler(adapter);
2377 	if (cause & F_MPS)
2378 		mps_intr_handler(adapter);
2379 	if (cause & F_NCSI)
2380 		ncsi_intr_handler(adapter);
2381 	if (cause & F_PL)
2382 		pl_intr_handler(adapter);
2383 	if (cause & F_SMB)
2384 		smb_intr_handler(adapter);
2385 	if (cause & F_XGMAC0)
2386 		xgmac_intr_handler(adapter, 0);
2387 	if (cause & F_XGMAC1)
2388 		xgmac_intr_handler(adapter, 1);
2389 	if (cause & F_XGMAC_KR0)
2390 		xgmac_intr_handler(adapter, 2);
2391 	if (cause & F_XGMAC_KR1)
2392 		xgmac_intr_handler(adapter, 3);
2393 	if (cause & F_PCIE)
2394 		pcie_intr_handler(adapter);
2395 	if (cause & F_MC)
2396 		mem_intr_handler(adapter, MEM_MC);
2397 	if (cause & F_EDC0)
2398 		mem_intr_handler(adapter, MEM_EDC0);
2399 	if (cause & F_EDC1)
2400 		mem_intr_handler(adapter, MEM_EDC1);
2401 	if (cause & F_LE)
2402 		le_intr_handler(adapter);
2403 	if (cause & F_TP)
2404 		tp_intr_handler(adapter);
2405 	if (cause & F_MA)
2406 		ma_intr_handler(adapter);
2407 	if (cause & F_PM_TX)
2408 		pmtx_intr_handler(adapter);
2409 	if (cause & F_PM_RX)
2410 		pmrx_intr_handler(adapter);
2411 	if (cause & F_ULP_RX)
2412 		ulprx_intr_handler(adapter);
2413 	if (cause & F_CPL_SWITCH)
2414 		cplsw_intr_handler(adapter);
2415 	if (cause & F_SGE)
2416 		sge_intr_handler(adapter);
2417 	if (cause & F_ULP_TX)
2418 		ulptx_intr_handler(adapter);
2419 
2420 	/* Clear the interrupts just processed for which we are the master. */
2421 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2422 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2423 	return 1;
2424 }
2425 
2426 /**
2427  *	t4_intr_enable - enable interrupts
2428  *	@adapter: the adapter whose interrupts should be enabled
2429  *
2430  *	Enable PF-specific interrupts for the calling function and the top-level
2431  *	interrupt concentrator for global interrupts.  Interrupts are already
2432  *	enabled at each module,	here we just enable the roots of the interrupt
2433  *	hierarchies.
2434  *
2435  *	Note: this function should be called only when the driver manages
2436  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2437  *	function at a time should be doing this.
2438  */
2439 void t4_intr_enable(struct adapter *adapter)
2440 {
2441 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2442 
2443 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2444 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2445 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2446 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2447 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2448 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2449 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2450 		     F_EGRESS_SIZE_ERR);
2451 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2452 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2453 }
2454 
2455 /**
2456  *	t4_intr_disable - disable interrupts
2457  *	@adapter: the adapter whose interrupts should be disabled
2458  *
2459  *	Disable interrupts.  We only disable the top-level interrupt
2460  *	concentrators.  The caller must be a PCI function managing global
2461  *	interrupts.
2462  */
2463 void t4_intr_disable(struct adapter *adapter)
2464 {
2465 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2466 
2467 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2468 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2469 }
2470 
2471 /**
2472  *	t4_intr_clear - clear all interrupts
2473  *	@adapter: the adapter whose interrupts should be cleared
2474  *
2475  *	Clears all interrupts.  The caller must be a PCI function managing
2476  *	global interrupts.
2477  */
2478 void t4_intr_clear(struct adapter *adapter)
2479 {
2480 	static const unsigned int cause_reg[] = {
2481 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2482 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2483 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2484 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2485 		A_MC_INT_CAUSE,
2486 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2487 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2488 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2489 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2490 		A_TP_INT_CAUSE,
2491 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2492 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2493 		A_MPS_RX_PERR_INT_CAUSE,
2494 		A_CPL_INTR_CAUSE,
2495 		MYPF_REG(A_PL_PF_INT_CAUSE),
2496 		A_PL_PL_INT_CAUSE,
2497 		A_LE_DB_INT_CAUSE,
2498 	};
2499 
2500 	unsigned int i;
2501 
2502 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2503 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2504 
2505 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2506 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2507 }
2508 
2509 /**
2510  *	hash_mac_addr - return the hash value of a MAC address
2511  *	@addr: the 48-bit Ethernet MAC address
2512  *
2513  *	Hashes a MAC address according to the hash function used by HW inexact
2514  *	(hash) address matching.
2515  */
2516 static int hash_mac_addr(const u8 *addr)
2517 {
2518 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2519 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2520 	a ^= b;
2521 	a ^= (a >> 12);
2522 	a ^= (a >> 6);
2523 	return a & 0x3f;
2524 }
2525 
2526 /**
2527  *	t4_config_rss_range - configure a portion of the RSS mapping table
2528  *	@adapter: the adapter
2529  *	@mbox: mbox to use for the FW command
2530  *	@viid: virtual interface whose RSS subtable is to be written
2531  *	@start: start entry in the table to write
2532  *	@n: how many table entries to write
2533  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2534  *	@nrspq: number of values in @rspq
2535  *
2536  *	Programs the selected part of the VI's RSS mapping table with the
2537  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2538  *	until the full table range is populated.
2539  *
2540  *	The caller must ensure the values in @rspq are in the range allowed for
2541  *	@viid.
2542  */
2543 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2544 			int start, int n, const u16 *rspq, unsigned int nrspq)
2545 {
2546 	int ret;
2547 	const u16 *rsp = rspq;
2548 	const u16 *rsp_end = rspq + nrspq;
2549 	struct fw_rss_ind_tbl_cmd cmd;
2550 
2551 	memset(&cmd, 0, sizeof(cmd));
2552 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2553 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2554 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2555 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2556 
2557 
2558 	/*
2559 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2560 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2561 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2562 	 * reserved.
2563 	 */
2564 	while (n > 0) {
2565 		int nq = min(n, 32);
2566 		int nq_packed = 0;
2567 		__be32 *qp = &cmd.iq0_to_iq2;
2568 
2569 		/*
2570 		 * Set up the firmware RSS command header to send the next
2571 		 * "nq" Ingress Queue IDs to the firmware.
2572 		 */
2573 		cmd.niqid = htons(nq);
2574 		cmd.startidx = htons(start);
2575 
2576 		/*
2577 		 * "nq" more done for the start of the next loop.
2578 		 */
2579 		start += nq;
2580 		n -= nq;
2581 
2582 		/*
2583 		 * While there are still Ingress Queue IDs to stuff into the
2584 		 * current firmware RSS command, retrieve them from the
2585 		 * Ingress Queue ID array and insert them into the command.
2586 		 */
2587 		while (nq > 0) {
2588 			/*
2589 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2590 			 * around the Ingress Queue ID array if necessary) and
2591 			 * insert them into the firmware RSS command at the
2592 			 * current 3-tuple position within the commad.
2593 			 */
2594 			u16 qbuf[3];
2595 			u16 *qbp = qbuf;
2596 			int nqbuf = min(3, nq);
2597 
2598 			nq -= nqbuf;
2599 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2600 			while (nqbuf && nq_packed < 32) {
2601 				nqbuf--;
2602 				nq_packed++;
2603 				*qbp++ = *rsp++;
2604 				if (rsp >= rsp_end)
2605 					rsp = rspq;
2606 			}
2607 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2608 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2609 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2610 		}
2611 
2612 		/*
2613 		 * Send this portion of the RRS table update to the firmware;
2614 		 * bail out on any errors.
2615 		 */
2616 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2617 		if (ret)
2618 			return ret;
2619 	}
2620 
2621 	return 0;
2622 }
2623 
2624 /**
2625  *	t4_config_glbl_rss - configure the global RSS mode
2626  *	@adapter: the adapter
2627  *	@mbox: mbox to use for the FW command
2628  *	@mode: global RSS mode
2629  *	@flags: mode-specific flags
2630  *
2631  *	Sets the global RSS mode.
2632  */
2633 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2634 		       unsigned int flags)
2635 {
2636 	struct fw_rss_glb_config_cmd c;
2637 
2638 	memset(&c, 0, sizeof(c));
2639 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2640 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2641 	c.retval_len16 = htonl(FW_LEN16(c));
2642 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2643 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2644 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2645 		c.u.basicvirtual.mode_pkd =
2646 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2647 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2648 	} else
2649 		return -EINVAL;
2650 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2651 }
2652 
2653 /**
2654  *	t4_config_vi_rss - configure per VI RSS settings
2655  *	@adapter: the adapter
2656  *	@mbox: mbox to use for the FW command
2657  *	@viid: the VI id
2658  *	@flags: RSS flags
2659  *	@defq: id of the default RSS queue for the VI.
2660  *
2661  *	Configures VI-specific RSS properties.
2662  */
2663 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2664 		     unsigned int flags, unsigned int defq)
2665 {
2666 	struct fw_rss_vi_config_cmd c;
2667 
2668 	memset(&c, 0, sizeof(c));
2669 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2670 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2671 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2672 	c.retval_len16 = htonl(FW_LEN16(c));
2673 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2674 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2675 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2676 }
2677 
2678 /* Read an RSS table row */
2679 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2680 {
2681 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2682 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2683 				   5, 0, val);
2684 }
2685 
2686 /**
2687  *	t4_read_rss - read the contents of the RSS mapping table
2688  *	@adapter: the adapter
2689  *	@map: holds the contents of the RSS mapping table
2690  *
2691  *	Reads the contents of the RSS hash->queue mapping table.
2692  */
2693 int t4_read_rss(struct adapter *adapter, u16 *map)
2694 {
2695 	u32 val;
2696 	int i, ret;
2697 
2698 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2699 		ret = rd_rss_row(adapter, i, &val);
2700 		if (ret)
2701 			return ret;
2702 		*map++ = G_LKPTBLQUEUE0(val);
2703 		*map++ = G_LKPTBLQUEUE1(val);
2704 	}
2705 	return 0;
2706 }
2707 
2708 /**
2709  *	t4_read_rss_key - read the global RSS key
2710  *	@adap: the adapter
2711  *	@key: 10-entry array holding the 320-bit RSS key
2712  *
2713  *	Reads the global 320-bit RSS key.
2714  */
2715 void t4_read_rss_key(struct adapter *adap, u32 *key)
2716 {
2717 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2718 			 A_TP_RSS_SECRET_KEY0);
2719 }
2720 
2721 /**
2722  *	t4_write_rss_key - program one of the RSS keys
2723  *	@adap: the adapter
2724  *	@key: 10-entry array holding the 320-bit RSS key
2725  *	@idx: which RSS key to write
2726  *
2727  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2728  *	0..15 the corresponding entry in the RSS key table is written,
2729  *	otherwise the global RSS key is written.
2730  */
2731 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2732 {
2733 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2734 			  A_TP_RSS_SECRET_KEY0);
2735 	if (idx >= 0 && idx < 16)
2736 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2737 			     V_KEYWRADDR(idx) | F_KEYWREN);
2738 }
2739 
2740 /**
2741  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2742  *	@adapter: the adapter
2743  *	@index: the entry in the PF RSS table to read
2744  *	@valp: where to store the returned value
2745  *
2746  *	Reads the PF RSS Configuration Table at the specified index and returns
2747  *	the value found there.
2748  */
2749 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2750 {
2751 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2752 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2753 }
2754 
2755 /**
2756  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2757  *	@adapter: the adapter
2758  *	@index: the entry in the VF RSS table to read
2759  *	@val: the value to store
2760  *
2761  *	Writes the PF RSS Configuration Table at the specified index with the
2762  *	specified value.
2763  */
2764 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2765 {
2766 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2767 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2768 }
2769 
2770 /**
2771  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2772  *	@adapter: the adapter
2773  *	@index: the entry in the VF RSS table to read
2774  *	@vfl: where to store the returned VFL
2775  *	@vfh: where to store the returned VFH
2776  *
2777  *	Reads the VF RSS Configuration Table at the specified index and returns
2778  *	the (VFL, VFH) values found there.
2779  */
2780 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2781 			   u32 *vfl, u32 *vfh)
2782 {
2783 	u32 vrt;
2784 
2785 	/*
2786 	 * Request that the index'th VF Table values be read into VFL/VFH.
2787 	 */
2788 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2789 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2790 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2791 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2792 
2793 	/*
2794 	 * Grab the VFL/VFH values ...
2795 	 */
2796 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2797 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2798 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2799 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2800 }
2801 
2802 /**
2803  *	t4_write_rss_vf_config - write VF RSS Configuration Table
2804  *
2805  *	@adapter: the adapter
2806  *	@index: the entry in the VF RSS table to write
2807  *	@vfl: the VFL to store
2808  *	@vfh: the VFH to store
2809  *
2810  *	Writes the VF RSS Configuration Table at the specified index with the
2811  *	specified (VFL, VFH) values.
2812  */
2813 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2814 			    u32 vfl, u32 vfh)
2815 {
2816 	u32 vrt;
2817 
2818 	/*
2819 	 * Load up VFL/VFH with the values to be written ...
2820 	 */
2821 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2822 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
2823 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2824 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
2825 
2826 	/*
2827 	 * Write the VFL/VFH into the VF Table at index'th location.
2828 	 */
2829 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2830 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2831 	vrt |= V_VFWRADDR(index) | F_VFWREN;
2832 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2833 }
2834 
2835 /**
2836  *	t4_read_rss_pf_map - read PF RSS Map
2837  *	@adapter: the adapter
2838  *
2839  *	Reads the PF RSS Map register and returns its value.
2840  */
2841 u32 t4_read_rss_pf_map(struct adapter *adapter)
2842 {
2843 	u32 pfmap;
2844 
2845 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2846 			 &pfmap, 1, A_TP_RSS_PF_MAP);
2847 	return pfmap;
2848 }
2849 
2850 /**
2851  *	t4_write_rss_pf_map - write PF RSS Map
2852  *	@adapter: the adapter
2853  *	@pfmap: PF RSS Map value
2854  *
2855  *	Writes the specified value to the PF RSS Map register.
2856  */
2857 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2858 {
2859 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2860 			  &pfmap, 1, A_TP_RSS_PF_MAP);
2861 }
2862 
2863 /**
2864  *	t4_read_rss_pf_mask - read PF RSS Mask
2865  *	@adapter: the adapter
2866  *
2867  *	Reads the PF RSS Mask register and returns its value.
2868  */
2869 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2870 {
2871 	u32 pfmask;
2872 
2873 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2874 			 &pfmask, 1, A_TP_RSS_PF_MSK);
2875 	return pfmask;
2876 }
2877 
2878 /**
2879  *	t4_write_rss_pf_mask - write PF RSS Mask
2880  *	@adapter: the adapter
2881  *	@pfmask: PF RSS Mask value
2882  *
2883  *	Writes the specified value to the PF RSS Mask register.
2884  */
2885 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2886 {
2887 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2888 			  &pfmask, 1, A_TP_RSS_PF_MSK);
2889 }
2890 
2891 /**
2892  *	t4_set_filter_mode - configure the optional components of filter tuples
2893  *	@adap: the adapter
2894  *	@mode_map: a bitmap selcting which optional filter components to enable
2895  *
2896  *	Sets the filter mode by selecting the optional components to enable
2897  *	in filter tuples.  Returns 0 on success and a negative error if the
2898  *	requested mode needs more bits than are available for optional
2899  *	components.
2900  */
2901 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2902 {
2903 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2904 
2905 	int i, nbits = 0;
2906 
2907 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2908 		if (mode_map & (1 << i))
2909 			nbits += width[i];
2910 	if (nbits > FILTER_OPT_LEN)
2911 		return -EINVAL;
2912 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2913 			  A_TP_VLAN_PRI_MAP);
2914 	return 0;
2915 }
2916 
2917 /**
2918  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2919  *	@adap: the adapter
2920  *	@v4: holds the TCP/IP counter values
2921  *	@v6: holds the TCP/IPv6 counter values
2922  *
2923  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2924  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2925  */
2926 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2927 			 struct tp_tcp_stats *v6)
2928 {
2929 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2930 
2931 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2932 #define STAT(x)     val[STAT_IDX(x)]
2933 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2934 
2935 	if (v4) {
2936 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2937 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2938 		v4->tcpOutRsts = STAT(OUT_RST);
2939 		v4->tcpInSegs  = STAT64(IN_SEG);
2940 		v4->tcpOutSegs = STAT64(OUT_SEG);
2941 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2942 	}
2943 	if (v6) {
2944 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2945 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2946 		v6->tcpOutRsts = STAT(OUT_RST);
2947 		v6->tcpInSegs  = STAT64(IN_SEG);
2948 		v6->tcpOutSegs = STAT64(OUT_SEG);
2949 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2950 	}
2951 #undef STAT64
2952 #undef STAT
2953 #undef STAT_IDX
2954 }
2955 
2956 /**
2957  *	t4_tp_get_err_stats - read TP's error MIB counters
2958  *	@adap: the adapter
2959  *	@st: holds the counter values
2960  *
2961  *	Returns the values of TP's error counters.
2962  */
2963 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2964 {
2965 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2966 			 12, A_TP_MIB_MAC_IN_ERR_0);
2967 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2968 			 8, A_TP_MIB_TNL_CNG_DROP_0);
2969 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2970 			 4, A_TP_MIB_TNL_DROP_0);
2971 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2972 			 4, A_TP_MIB_OFD_VLN_DROP_0);
2973 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2974 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
2975 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2976 			 2, A_TP_MIB_OFD_ARP_DROP);
2977 }
2978 
2979 /**
2980  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
2981  *	@adap: the adapter
2982  *	@st: holds the counter values
2983  *
2984  *	Returns the values of TP's proxy counters.
2985  */
2986 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2987 {
2988 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2989 			 4, A_TP_MIB_TNL_LPBK_0);
2990 }
2991 
2992 /**
2993  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
2994  *	@adap: the adapter
2995  *	@st: holds the counter values
2996  *
2997  *	Returns the values of TP's CPL counters.
2998  */
2999 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3000 {
3001 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3002 			 8, A_TP_MIB_CPL_IN_REQ_0);
3003 }
3004 
3005 /**
3006  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3007  *	@adap: the adapter
3008  *	@st: holds the counter values
3009  *
3010  *	Returns the values of TP's RDMA counters.
3011  */
3012 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3013 {
3014 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3015 			 2, A_TP_MIB_RQE_DFR_MOD);
3016 }
3017 
3018 /**
3019  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3020  *	@adap: the adapter
3021  *	@idx: the port index
3022  *	@st: holds the counter values
3023  *
3024  *	Returns the values of TP's FCoE counters for the selected port.
3025  */
3026 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3027 		       struct tp_fcoe_stats *st)
3028 {
3029 	u32 val[2];
3030 
3031 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3032 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3033 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3034 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3035 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3036 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3037 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3038 }
3039 
3040 /**
3041  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3042  *	@adap: the adapter
3043  *	@st: holds the counter values
3044  *
3045  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3046  */
3047 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3048 {
3049 	u32 val[4];
3050 
3051 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3052 			 A_TP_MIB_USM_PKTS);
3053 	st->frames = val[0];
3054 	st->drops = val[1];
3055 	st->octets = ((u64)val[2] << 32) | val[3];
3056 }
3057 
3058 /**
3059  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3060  *	@adap: the adapter
3061  *	@mtus: where to store the MTU values
3062  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3063  *
3064  *	Reads the HW path MTU table.
3065  */
3066 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3067 {
3068 	u32 v;
3069 	int i;
3070 
3071 	for (i = 0; i < NMTUS; ++i) {
3072 		t4_write_reg(adap, A_TP_MTU_TABLE,
3073 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3074 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3075 		mtus[i] = G_MTUVALUE(v);
3076 		if (mtu_log)
3077 			mtu_log[i] = G_MTUWIDTH(v);
3078 	}
3079 }
3080 
3081 /**
3082  *	t4_read_cong_tbl - reads the congestion control table
3083  *	@adap: the adapter
3084  *	@incr: where to store the alpha values
3085  *
3086  *	Reads the additive increments programmed into the HW congestion
3087  *	control table.
3088  */
3089 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3090 {
3091 	unsigned int mtu, w;
3092 
3093 	for (mtu = 0; mtu < NMTUS; ++mtu)
3094 		for (w = 0; w < NCCTRL_WIN; ++w) {
3095 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3096 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3097 			incr[mtu][w] = (u16)t4_read_reg(adap,
3098 						A_TP_CCTRL_TABLE) & 0x1fff;
3099 		}
3100 }
3101 
3102 /**
3103  *	t4_read_pace_tbl - read the pace table
3104  *	@adap: the adapter
3105  *	@pace_vals: holds the returned values
3106  *
3107  *	Returns the values of TP's pace table in microseconds.
3108  */
3109 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3110 {
3111 	unsigned int i, v;
3112 
3113 	for (i = 0; i < NTX_SCHED; i++) {
3114 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3115 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3116 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3117 	}
3118 }
3119 
3120 /**
3121  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3122  *	@adap: the adapter
3123  *	@addr: the indirect TP register address
3124  *	@mask: specifies the field within the register to modify
3125  *	@val: new value for the field
3126  *
3127  *	Sets a field of an indirect TP register to the given value.
3128  */
3129 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3130 			    unsigned int mask, unsigned int val)
3131 {
3132 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3133 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3134 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3135 }
3136 
3137 /**
3138  *	init_cong_ctrl - initialize congestion control parameters
3139  *	@a: the alpha values for congestion control
3140  *	@b: the beta values for congestion control
3141  *
3142  *	Initialize the congestion control parameters.
3143  */
3144 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3145 {
3146 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3147 	a[9] = 2;
3148 	a[10] = 3;
3149 	a[11] = 4;
3150 	a[12] = 5;
3151 	a[13] = 6;
3152 	a[14] = 7;
3153 	a[15] = 8;
3154 	a[16] = 9;
3155 	a[17] = 10;
3156 	a[18] = 14;
3157 	a[19] = 17;
3158 	a[20] = 21;
3159 	a[21] = 25;
3160 	a[22] = 30;
3161 	a[23] = 35;
3162 	a[24] = 45;
3163 	a[25] = 60;
3164 	a[26] = 80;
3165 	a[27] = 100;
3166 	a[28] = 200;
3167 	a[29] = 300;
3168 	a[30] = 400;
3169 	a[31] = 500;
3170 
3171 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3172 	b[9] = b[10] = 1;
3173 	b[11] = b[12] = 2;
3174 	b[13] = b[14] = b[15] = b[16] = 3;
3175 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3176 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3177 	b[28] = b[29] = 6;
3178 	b[30] = b[31] = 7;
3179 }
3180 
3181 /* The minimum additive increment value for the congestion control table */
3182 #define CC_MIN_INCR 2U
3183 
3184 /**
3185  *	t4_load_mtus - write the MTU and congestion control HW tables
3186  *	@adap: the adapter
3187  *	@mtus: the values for the MTU table
3188  *	@alpha: the values for the congestion control alpha parameter
3189  *	@beta: the values for the congestion control beta parameter
3190  *
3191  *	Write the HW MTU table with the supplied MTUs and the high-speed
3192  *	congestion control table with the supplied alpha, beta, and MTUs.
3193  *	We write the two tables together because the additive increments
3194  *	depend on the MTUs.
3195  */
3196 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3197 		  const unsigned short *alpha, const unsigned short *beta)
3198 {
3199 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3200 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3201 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3202 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3203 	};
3204 
3205 	unsigned int i, w;
3206 
3207 	for (i = 0; i < NMTUS; ++i) {
3208 		unsigned int mtu = mtus[i];
3209 		unsigned int log2 = fls(mtu);
3210 
3211 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3212 			log2--;
3213 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3214 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3215 
3216 		for (w = 0; w < NCCTRL_WIN; ++w) {
3217 			unsigned int inc;
3218 
3219 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3220 				  CC_MIN_INCR);
3221 
3222 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3223 				     (w << 16) | (beta[w] << 13) | inc);
3224 		}
3225 	}
3226 }
3227 
3228 /**
3229  *	t4_set_pace_tbl - set the pace table
3230  *	@adap: the adapter
3231  *	@pace_vals: the pace values in microseconds
3232  *	@start: index of the first entry in the HW pace table to set
3233  *	@n: how many entries to set
3234  *
3235  *	Sets (a subset of the) HW pace table.
3236  */
3237 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3238 		     unsigned int start, unsigned int n)
3239 {
3240 	unsigned int vals[NTX_SCHED], i;
3241 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3242 
3243 	if (n > NTX_SCHED)
3244 	    return -ERANGE;
3245 
3246 	/* convert values from us to dack ticks, rounding to closest value */
3247 	for (i = 0; i < n; i++, pace_vals++) {
3248 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3249 		if (vals[i] > 0x7ff)
3250 			return -ERANGE;
3251 		if (*pace_vals && vals[i] == 0)
3252 			return -ERANGE;
3253 	}
3254 	for (i = 0; i < n; i++, start++)
3255 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3256 	return 0;
3257 }
3258 
3259 /**
3260  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3261  *	@adap: the adapter
3262  *	@kbps: target rate in Kbps
3263  *	@sched: the scheduler index
3264  *
3265  *	Configure a Tx HW scheduler for the target rate.
3266  */
3267 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3268 {
3269 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3270 	unsigned int clk = adap->params.vpd.cclk * 1000;
3271 	unsigned int selected_cpt = 0, selected_bpt = 0;
3272 
3273 	if (kbps > 0) {
3274 		kbps *= 125;     /* -> bytes */
3275 		for (cpt = 1; cpt <= 255; cpt++) {
3276 			tps = clk / cpt;
3277 			bpt = (kbps + tps / 2) / tps;
3278 			if (bpt > 0 && bpt <= 255) {
3279 				v = bpt * tps;
3280 				delta = v >= kbps ? v - kbps : kbps - v;
3281 				if (delta < mindelta) {
3282 					mindelta = delta;
3283 					selected_cpt = cpt;
3284 					selected_bpt = bpt;
3285 				}
3286 			} else if (selected_cpt)
3287 				break;
3288 		}
3289 		if (!selected_cpt)
3290 			return -EINVAL;
3291 	}
3292 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3293 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3294 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3295 	if (sched & 1)
3296 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3297 	else
3298 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3299 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3300 	return 0;
3301 }
3302 
3303 /**
3304  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3305  *	@adap: the adapter
3306  *	@sched: the scheduler index
3307  *	@ipg: the interpacket delay in tenths of nanoseconds
3308  *
3309  *	Set the interpacket delay for a HW packet rate scheduler.
3310  */
3311 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3312 {
3313 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3314 
3315 	/* convert ipg to nearest number of core clocks */
3316 	ipg *= core_ticks_per_usec(adap);
3317 	ipg = (ipg + 5000) / 10000;
3318 	if (ipg > M_TXTIMERSEPQ0)
3319 		return -EINVAL;
3320 
3321 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3322 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3323 	if (sched & 1)
3324 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3325 	else
3326 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3327 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3328 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3329 	return 0;
3330 }
3331 
3332 /**
3333  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3334  *	@adap: the adapter
3335  *	@sched: the scheduler index
3336  *	@kbps: the byte rate in Kbps
3337  *	@ipg: the interpacket delay in tenths of nanoseconds
3338  *
3339  *	Return the current configuration of a HW Tx scheduler.
3340  */
3341 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3342 		     unsigned int *ipg)
3343 {
3344 	unsigned int v, addr, bpt, cpt;
3345 
3346 	if (kbps) {
3347 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3348 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3349 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3350 		if (sched & 1)
3351 			v >>= 16;
3352 		bpt = (v >> 8) & 0xff;
3353 		cpt = v & 0xff;
3354 		if (!cpt)
3355 			*kbps = 0;        /* scheduler disabled */
3356 		else {
3357 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3358 			*kbps = (v * bpt) / 125;
3359 		}
3360 	}
3361 	if (ipg) {
3362 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3363 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3364 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3365 		if (sched & 1)
3366 			v >>= 16;
3367 		v &= 0xffff;
3368 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3369 	}
3370 }
3371 
3372 /*
3373  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3374  * clocks.  The formula is
3375  *
3376  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3377  *
3378  * which is equivalent to
3379  *
3380  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3381  */
3382 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3383 {
3384 	u64 v = bytes256 * adap->params.vpd.cclk;
3385 
3386 	return v * 62 + v / 2;
3387 }
3388 
3389 /**
3390  *	t4_get_chan_txrate - get the current per channel Tx rates
3391  *	@adap: the adapter
3392  *	@nic_rate: rates for NIC traffic
3393  *	@ofld_rate: rates for offloaded traffic
3394  *
3395  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3396  *	for each channel.
3397  */
3398 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3399 {
3400 	u32 v;
3401 
3402 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3403 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3404 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3405 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3406 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3407 
3408 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3409 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3410 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3411 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3412 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3413 }
3414 
3415 /**
3416  *	t4_set_trace_filter - configure one of the tracing filters
3417  *	@adap: the adapter
3418  *	@tp: the desired trace filter parameters
3419  *	@idx: which filter to configure
3420  *	@enable: whether to enable or disable the filter
3421  *
3422  *	Configures one of the tracing filters available in HW.  If @enable is
3423  *	%0 @tp is not examined and may be %NULL. The user is responsible to
3424  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3425  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3426  *	docs/readme.txt for a complete description of how to setup traceing on
3427  *	T4.
3428  */
3429 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3430 			int enable)
3431 {
3432 	int i, ofst = idx * 4;
3433 	u32 data_reg, mask_reg, cfg;
3434 	u32 multitrc = F_TRCMULTIFILTER;
3435 
3436 	if (!enable) {
3437 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3438 		return 0;
3439 	}
3440 
3441 	/*
3442 	 * TODO - After T4 data book is updated, specify the exact
3443 	 * section below.
3444 	 *
3445 	 * See T4 data book - MPS section for a complete description
3446 	 * of the below if..else handling of A_MPS_TRC_CFG register
3447 	 * value.
3448 	 */
3449 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3450 	if (cfg & F_TRCMULTIFILTER) {
3451 		/*
3452 		 * If multiple tracers are enabled, then maximum
3453 		 * capture size is 2.5KB (FIFO size of a single channel)
3454 		 * minus 2 flits for CPL_TRACE_PKT header.
3455 		 */
3456 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3457 			return -EINVAL;
3458 	}
3459 	else {
3460 		/*
3461 		 * If multiple tracers are disabled, to avoid deadlocks
3462 		 * maximum packet capture size of 9600 bytes is recommended.
3463 		 * Also in this mode, only trace0 can be enabled and running.
3464 		 */
3465 		multitrc = 0;
3466 		if (tp->snap_len > 9600 || idx)
3467 			return -EINVAL;
3468 	}
3469 
3470 	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3471 	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3472 		return -EINVAL;
3473 
3474 	/* stop the tracer we'll be changing */
3475 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3476 
3477 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3478 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3479 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3480 
3481 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3482 		t4_write_reg(adap, data_reg, tp->data[i]);
3483 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3484 	}
3485 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3486 		     V_TFCAPTUREMAX(tp->snap_len) |
3487 		     V_TFMINPKTSIZE(tp->min_len));
3488 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3489 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3490 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3491 
3492 	return 0;
3493 }
3494 
3495 /**
3496  *	t4_get_trace_filter - query one of the tracing filters
3497  *	@adap: the adapter
3498  *	@tp: the current trace filter parameters
3499  *	@idx: which trace filter to query
3500  *	@enabled: non-zero if the filter is enabled
3501  *
3502  *	Returns the current settings of one of the HW tracing filters.
3503  */
3504 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3505 			 int *enabled)
3506 {
3507 	u32 ctla, ctlb;
3508 	int i, ofst = idx * 4;
3509 	u32 data_reg, mask_reg;
3510 
3511 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3512 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3513 
3514 	*enabled = !!(ctla & F_TFEN);
3515 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3516 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3517 	tp->skip_ofst = G_TFOFFSET(ctla);
3518 	tp->skip_len = G_TFLENGTH(ctla);
3519 	tp->invert = !!(ctla & F_TFINVERTMATCH);
3520 	tp->port = G_TFPORT(ctla);
3521 
3522 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3523 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3524 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3525 
3526 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3527 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3528 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3529 	}
3530 }
3531 
3532 /**
3533  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3534  *	@adap: the adapter
3535  *	@cnt: where to store the count statistics
3536  *	@cycles: where to store the cycle statistics
3537  *
3538  *	Returns performance statistics from PMTX.
3539  */
3540 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3541 {
3542 	int i;
3543 
3544 	for (i = 0; i < PM_NSTATS; i++) {
3545 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3546 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3547 		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3548 	}
3549 }
3550 
3551 /**
3552  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3553  *	@adap: the adapter
3554  *	@cnt: where to store the count statistics
3555  *	@cycles: where to store the cycle statistics
3556  *
3557  *	Returns performance statistics from PMRX.
3558  */
3559 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3560 {
3561 	int i;
3562 
3563 	for (i = 0; i < PM_NSTATS; i++) {
3564 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3565 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3566 		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3567 	}
3568 }
3569 
3570 /**
3571  *	get_mps_bg_map - return the buffer groups associated with a port
3572  *	@adap: the adapter
3573  *	@idx: the port index
3574  *
3575  *	Returns a bitmap indicating which MPS buffer groups are associated
3576  *	with the given port.  Bit i is set if buffer group i is used by the
3577  *	port.
3578  */
3579 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3580 {
3581 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3582 
3583 	if (n == 0)
3584 		return idx == 0 ? 0xf : 0;
3585 	if (n == 1)
3586 		return idx < 2 ? (3 << (2 * idx)) : 0;
3587 	return 1 << idx;
3588 }
3589 
3590 /**
3591  *      t4_get_port_stats_offset - collect port stats relative to a previous
3592  *                                 snapshot
3593  *      @adap: The adapter
3594  *      @idx: The port
3595  *      @stats: Current stats to fill
3596  *      @offset: Previous stats snapshot
3597  */
3598 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3599 		struct port_stats *stats,
3600 		struct port_stats *offset)
3601 {
3602 	u64 *s, *o;
3603 	int i;
3604 
3605 	t4_get_port_stats(adap, idx, stats);
3606 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3607 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3608 			i++, s++, o++)
3609 		*s -= *o;
3610 }
3611 
3612 /**
3613  *	t4_get_port_stats - collect port statistics
3614  *	@adap: the adapter
3615  *	@idx: the port index
3616  *	@p: the stats structure to fill
3617  *
3618  *	Collect statistics related to the given port from HW.
3619  */
3620 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3621 {
3622 	u32 bgmap = get_mps_bg_map(adap, idx);
3623 
3624 #define GET_STAT(name) \
3625 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3626 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3627 
3628 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3629 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3630 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3631 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3632 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3633 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3634 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3635 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3636 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3637 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3638 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3639 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3640 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3641 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3642 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3643 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3644 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3645 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3646 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3647 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3648 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3649 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3650 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3651 
3652 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3653 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3654 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3655 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3656 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3657 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3658 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3659 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3660 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3661 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3662 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3663 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3664 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3665 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3666 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3667 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3668 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3669 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3670 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3671 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3672 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3673 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3674 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3675 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3676 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3677 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3678 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3679 
3680 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3681 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3682 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3683 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3684 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3685 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3686 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3687 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3688 
3689 #undef GET_STAT
3690 #undef GET_STAT_COM
3691 }
3692 
3693 /**
3694  *	t4_clr_port_stats - clear port statistics
3695  *	@adap: the adapter
3696  *	@idx: the port index
3697  *
3698  *	Clear HW statistics for the given port.
3699  */
3700 void t4_clr_port_stats(struct adapter *adap, int idx)
3701 {
3702 	unsigned int i;
3703 	u32 bgmap = get_mps_bg_map(adap, idx);
3704 
3705 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3706 	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3707 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3708 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3709 	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3710 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3711 	for (i = 0; i < 4; i++)
3712 		if (bgmap & (1 << i)) {
3713 			t4_write_reg(adap,
3714 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3715 			t4_write_reg(adap,
3716 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3717 		}
3718 }
3719 
3720 /**
3721  *	t4_get_lb_stats - collect loopback port statistics
3722  *	@adap: the adapter
3723  *	@idx: the loopback port index
3724  *	@p: the stats structure to fill
3725  *
3726  *	Return HW statistics for the given loopback port.
3727  */
3728 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3729 {
3730 	u32 bgmap = get_mps_bg_map(adap, idx);
3731 
3732 #define GET_STAT(name) \
3733 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3734 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3735 
3736 	p->octets           = GET_STAT(BYTES);
3737 	p->frames           = GET_STAT(FRAMES);
3738 	p->bcast_frames     = GET_STAT(BCAST);
3739 	p->mcast_frames     = GET_STAT(MCAST);
3740 	p->ucast_frames     = GET_STAT(UCAST);
3741 	p->error_frames     = GET_STAT(ERROR);
3742 
3743 	p->frames_64        = GET_STAT(64B);
3744 	p->frames_65_127    = GET_STAT(65B_127B);
3745 	p->frames_128_255   = GET_STAT(128B_255B);
3746 	p->frames_256_511   = GET_STAT(256B_511B);
3747 	p->frames_512_1023  = GET_STAT(512B_1023B);
3748 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3749 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3750 	p->drop             = t4_read_reg(adap, PORT_REG(idx,
3751 					  A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3752 
3753 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3754 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3755 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3756 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3757 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3758 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3759 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3760 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3761 
3762 #undef GET_STAT
3763 #undef GET_STAT_COM
3764 }
3765 
3766 /**
3767  *	t4_wol_magic_enable - enable/disable magic packet WoL
3768  *	@adap: the adapter
3769  *	@port: the physical port index
3770  *	@addr: MAC address expected in magic packets, %NULL to disable
3771  *
3772  *	Enables/disables magic packet wake-on-LAN for the selected port.
3773  */
3774 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3775 			 const u8 *addr)
3776 {
3777 	if (addr) {
3778 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3779 			     (addr[2] << 24) | (addr[3] << 16) |
3780 			     (addr[4] << 8) | addr[5]);
3781 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3782 			     (addr[0] << 8) | addr[1]);
3783 	}
3784 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3785 			 V_MAGICEN(addr != NULL));
3786 }
3787 
3788 /**
3789  *	t4_wol_pat_enable - enable/disable pattern-based WoL
3790  *	@adap: the adapter
3791  *	@port: the physical port index
3792  *	@map: bitmap of which HW pattern filters to set
3793  *	@mask0: byte mask for bytes 0-63 of a packet
3794  *	@mask1: byte mask for bytes 64-127 of a packet
3795  *	@crc: Ethernet CRC for selected bytes
3796  *	@enable: enable/disable switch
3797  *
3798  *	Sets the pattern filters indicated in @map to mask out the bytes
3799  *	specified in @mask0/@mask1 in received packets and compare the CRC of
3800  *	the resulting packet against @crc.  If @enable is %true pattern-based
3801  *	WoL is enabled, otherwise disabled.
3802  */
3803 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3804 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
3805 {
3806 	int i;
3807 
3808 	if (!enable) {
3809 		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3810 				 F_PATEN, 0);
3811 		return 0;
3812 	}
3813 	if (map > 0xff)
3814 		return -EINVAL;
3815 
3816 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3817 
3818 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3819 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3820 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3821 
3822 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3823 		if (!(map & 1))
3824 			continue;
3825 
3826 		/* write byte masks */
3827 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3828 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3829 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3830 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3831 			return -ETIMEDOUT;
3832 
3833 		/* write CRC */
3834 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
3835 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3836 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3837 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3838 			return -ETIMEDOUT;
3839 	}
3840 #undef EPIO_REG
3841 
3842 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3843 	return 0;
3844 }
3845 
3846 /**
3847  *	t4_mk_filtdelwr - create a delete filter WR
3848  *	@ftid: the filter ID
3849  *	@wr: the filter work request to populate
3850  *	@qid: ingress queue to receive the delete notification
3851  *
3852  *	Creates a filter work request to delete the supplied filter.  If @qid is
3853  *	negative the delete notification is suppressed.
3854  */
3855 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3856 {
3857 	memset(wr, 0, sizeof(*wr));
3858 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3859 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3860 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3861 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
3862 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3863 	if (qid >= 0)
3864 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3865 }
3866 
3867 #define INIT_CMD(var, cmd, rd_wr) do { \
3868 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3869 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3870 	(var).retval_len16 = htonl(FW_LEN16(var)); \
3871 } while (0)
3872 
3873 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
3874 {
3875 	struct fw_ldst_cmd c;
3876 
3877 	memset(&c, 0, sizeof(c));
3878 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3879 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
3880 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3881 	c.u.addrval.addr = htonl(addr);
3882 	c.u.addrval.val = htonl(val);
3883 
3884 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3885 }
3886 
3887 /**
3888  *	t4_i2c_rd - read a byte from an i2c addressable device
3889  *	@adap: the adapter
3890  *	@mbox: mailbox to use for the FW command
3891  *	@port_id: the port id
3892  *	@dev_addr: the i2c device address
3893  *	@offset: the byte offset to read from
3894  *	@valp: where to store the value
3895  */
3896 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
3897 	       u8 dev_addr, u8 offset, u8 *valp)
3898 {
3899 	int ret;
3900 	struct fw_ldst_cmd c;
3901 
3902 	memset(&c, 0, sizeof(c));
3903 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3904 		F_FW_CMD_READ |
3905 		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
3906 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3907 	c.u.i2c.pid_pkd = V_FW_LDST_CMD_PID(port_id);
3908 	c.u.i2c.base = dev_addr;
3909 	c.u.i2c.boffset = offset;
3910 
3911 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3912 	if (ret == 0)
3913 		*valp = c.u.i2c.data;
3914 	return ret;
3915 }
3916 
3917 /**
3918  *	t4_mdio_rd - read a PHY register through MDIO
3919  *	@adap: the adapter
3920  *	@mbox: mailbox to use for the FW command
3921  *	@phy_addr: the PHY address
3922  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3923  *	@reg: the register to read
3924  *	@valp: where to store the value
3925  *
3926  *	Issues a FW command through the given mailbox to read a PHY register.
3927  */
3928 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3929 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
3930 {
3931 	int ret;
3932 	struct fw_ldst_cmd c;
3933 
3934 	memset(&c, 0, sizeof(c));
3935 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3936 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3937 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3938 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3939 				   V_FW_LDST_CMD_MMD(mmd));
3940 	c.u.mdio.raddr = htons(reg);
3941 
3942 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3943 	if (ret == 0)
3944 		*valp = ntohs(c.u.mdio.rval);
3945 	return ret;
3946 }
3947 
3948 /**
3949  *	t4_mdio_wr - write a PHY register through MDIO
3950  *	@adap: the adapter
3951  *	@mbox: mailbox to use for the FW command
3952  *	@phy_addr: the PHY address
3953  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3954  *	@reg: the register to write
3955  *	@valp: value to write
3956  *
3957  *	Issues a FW command through the given mailbox to write a PHY register.
3958  */
3959 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3960 	       unsigned int mmd, unsigned int reg, unsigned int val)
3961 {
3962 	struct fw_ldst_cmd c;
3963 
3964 	memset(&c, 0, sizeof(c));
3965 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3966 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3967 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3968 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3969 				   V_FW_LDST_CMD_MMD(mmd));
3970 	c.u.mdio.raddr = htons(reg);
3971 	c.u.mdio.rval = htons(val);
3972 
3973 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3974 }
3975 
3976 /**
3977  *	t4_sge_ctxt_flush - flush the SGE context cache
3978  *	@adap: the adapter
3979  *	@mbox: mailbox to use for the FW command
3980  *
3981  *	Issues a FW command through the given mailbox to flush the
3982  *	SGE context cache.
3983  */
3984 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
3985 {
3986 	int ret;
3987 	struct fw_ldst_cmd c;
3988 
3989 	memset(&c, 0, sizeof(c));
3990 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3991 			F_FW_CMD_READ |
3992 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
3993 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3994 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
3995 
3996 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3997 	return ret;
3998 }
3999 
4000 /**
4001  *	t4_sge_ctxt_rd - read an SGE context through FW
4002  *	@adap: the adapter
4003  *	@mbox: mailbox to use for the FW command
4004  *	@cid: the context id
4005  *	@ctype: the context type
4006  *	@data: where to store the context data
4007  *
4008  *	Issues a FW command through the given mailbox to read an SGE context.
4009  */
4010 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4011 		   enum ctxt_type ctype, u32 *data)
4012 {
4013 	int ret;
4014 	struct fw_ldst_cmd c;
4015 
4016 	if (ctype == CTXT_EGRESS)
4017 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4018 	else if (ctype == CTXT_INGRESS)
4019 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4020 	else if (ctype == CTXT_FLM)
4021 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4022 	else
4023 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4024 
4025 	memset(&c, 0, sizeof(c));
4026 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4027 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4028 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4029 	c.u.idctxt.physid = htonl(cid);
4030 
4031 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4032 	if (ret == 0) {
4033 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4034 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4035 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4036 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4037 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4038 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4039 	}
4040 	return ret;
4041 }
4042 
4043 /**
4044  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4045  *	@adap: the adapter
4046  *	@cid: the context id
4047  *	@ctype: the context type
4048  *	@data: where to store the context data
4049  *
4050  *	Reads an SGE context directly, bypassing FW.  This is only for
4051  *	debugging when FW is unavailable.
4052  */
4053 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4054 		      u32 *data)
4055 {
4056 	int i, ret;
4057 
4058 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4059 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4060 	if (!ret)
4061 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4062 			*data++ = t4_read_reg(adap, i);
4063 	return ret;
4064 }
4065 
4066 /**
4067  *	t4_fw_hello - establish communication with FW
4068  *	@adap: the adapter
4069  *	@mbox: mailbox to use for the FW command
4070  *	@evt_mbox: mailbox to receive async FW events
4071  *	@master: specifies the caller's willingness to be the device master
4072  *	@state: returns the current device state (if non-NULL)
4073  *
4074  *	Issues a command to establish communication with FW.  Returns either
4075  *	an error (negative integer) or the mailbox of the Master PF.
4076  */
4077 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4078 		enum dev_master master, enum dev_state *state)
4079 {
4080 	int ret;
4081 	struct fw_hello_cmd c;
4082 	u32 v;
4083 	unsigned int master_mbox;
4084 	int retries = FW_CMD_HELLO_RETRIES;
4085 
4086 retry:
4087 	memset(&c, 0, sizeof(c));
4088 	INIT_CMD(c, HELLO, WRITE);
4089 	c.err_to_clearinit = htonl(
4090 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4091 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4092 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4093 			M_FW_HELLO_CMD_MBMASTER) |
4094 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4095 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4096 		F_FW_HELLO_CMD_CLEARINIT);
4097 
4098 	/*
4099 	 * Issue the HELLO command to the firmware.  If it's not successful
4100 	 * but indicates that we got a "busy" or "timeout" condition, retry
4101 	 * the HELLO until we exhaust our retry limit.
4102 	 */
4103 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4104 	if (ret != FW_SUCCESS) {
4105 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4106 			goto retry;
4107 		return ret;
4108 	}
4109 
4110 	v = ntohl(c.err_to_clearinit);
4111 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4112 	if (state) {
4113 		if (v & F_FW_HELLO_CMD_ERR)
4114 			*state = DEV_STATE_ERR;
4115 		else if (v & F_FW_HELLO_CMD_INIT)
4116 			*state = DEV_STATE_INIT;
4117 		else
4118 			*state = DEV_STATE_UNINIT;
4119 	}
4120 
4121 	/*
4122 	 * If we're not the Master PF then we need to wait around for the
4123 	 * Master PF Driver to finish setting up the adapter.
4124 	 *
4125 	 * Note that we also do this wait if we're a non-Master-capable PF and
4126 	 * there is no current Master PF; a Master PF may show up momentarily
4127 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4128 	 * OS loads lots of different drivers rapidly at the same time).  In
4129 	 * this case, the Master PF returned by the firmware will be
4130 	 * M_PCIE_FW_MASTER so the test below will work ...
4131 	 */
4132 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4133 	    master_mbox != mbox) {
4134 		int waiting = FW_CMD_HELLO_TIMEOUT;
4135 
4136 		/*
4137 		 * Wait for the firmware to either indicate an error or
4138 		 * initialized state.  If we see either of these we bail out
4139 		 * and report the issue to the caller.  If we exhaust the
4140 		 * "hello timeout" and we haven't exhausted our retries, try
4141 		 * again.  Otherwise bail with a timeout error.
4142 		 */
4143 		for (;;) {
4144 			u32 pcie_fw;
4145 
4146 			msleep(50);
4147 			waiting -= 50;
4148 
4149 			/*
4150 			 * If neither Error nor Initialialized are indicated
4151 			 * by the firmware keep waiting till we exhaust our
4152 			 * timeout ... and then retry if we haven't exhausted
4153 			 * our retries ...
4154 			 */
4155 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4156 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4157 				if (waiting <= 0) {
4158 					if (retries-- > 0)
4159 						goto retry;
4160 
4161 					return -ETIMEDOUT;
4162 				}
4163 				continue;
4164 			}
4165 
4166 			/*
4167 			 * We either have an Error or Initialized condition
4168 			 * report errors preferentially.
4169 			 */
4170 			if (state) {
4171 				if (pcie_fw & F_PCIE_FW_ERR)
4172 					*state = DEV_STATE_ERR;
4173 				else if (pcie_fw & F_PCIE_FW_INIT)
4174 					*state = DEV_STATE_INIT;
4175 			}
4176 
4177 			/*
4178 			 * If we arrived before a Master PF was selected and
4179 			 * there's not a valid Master PF, grab its identity
4180 			 * for our caller.
4181 			 */
4182 			if (master_mbox == M_PCIE_FW_MASTER &&
4183 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4184 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4185 			break;
4186 		}
4187 	}
4188 
4189 	return master_mbox;
4190 }
4191 
4192 /**
4193  *	t4_fw_bye - end communication with FW
4194  *	@adap: the adapter
4195  *	@mbox: mailbox to use for the FW command
4196  *
4197  *	Issues a command to terminate communication with FW.
4198  */
4199 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4200 {
4201 	struct fw_bye_cmd c;
4202 
4203 	memset(&c, 0, sizeof(c));
4204 	INIT_CMD(c, BYE, WRITE);
4205 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4206 }
4207 
4208 /**
4209  *	t4_fw_reset - issue a reset to FW
4210  *	@adap: the adapter
4211  *	@mbox: mailbox to use for the FW command
4212  *	@reset: specifies the type of reset to perform
4213  *
4214  *	Issues a reset command of the specified type to FW.
4215  */
4216 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4217 {
4218 	struct fw_reset_cmd c;
4219 
4220 	memset(&c, 0, sizeof(c));
4221 	INIT_CMD(c, RESET, WRITE);
4222 	c.val = htonl(reset);
4223 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4224 }
4225 
4226 /**
4227  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4228  *	@adap: the adapter
4229  *	@mbox: mailbox to use for the FW RESET command (if desired)
4230  *	@force: force uP into RESET even if FW RESET command fails
4231  *
4232  *	Issues a RESET command to firmware (if desired) with a HALT indication
4233  *	and then puts the microprocessor into RESET state.  The RESET command
4234  *	will only be issued if a legitimate mailbox is provided (mbox <=
4235  *	M_PCIE_FW_MASTER).
4236  *
4237  *	This is generally used in order for the host to safely manipulate the
4238  *	adapter without fear of conflicting with whatever the firmware might
4239  *	be doing.  The only way out of this state is to RESTART the firmware
4240  *	...
4241  */
4242 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4243 {
4244 	int ret = 0;
4245 
4246 	/*
4247 	 * If a legitimate mailbox is provided, issue a RESET command
4248 	 * with a HALT indication.
4249 	 */
4250 	if (mbox <= M_PCIE_FW_MASTER) {
4251 		struct fw_reset_cmd c;
4252 
4253 		memset(&c, 0, sizeof(c));
4254 		INIT_CMD(c, RESET, WRITE);
4255 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4256 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4257 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4258 	}
4259 
4260 	/*
4261 	 * Normally we won't complete the operation if the firmware RESET
4262 	 * command fails but if our caller insists we'll go ahead and put the
4263 	 * uP into RESET.  This can be useful if the firmware is hung or even
4264 	 * missing ...  We'll have to take the risk of putting the uP into
4265 	 * RESET without the cooperation of firmware in that case.
4266 	 *
4267 	 * We also force the firmware's HALT flag to be on in case we bypassed
4268 	 * the firmware RESET command above or we're dealing with old firmware
4269 	 * which doesn't have the HALT capability.  This will serve as a flag
4270 	 * for the incoming firmware to know that it's coming out of a HALT
4271 	 * rather than a RESET ... if it's new enough to understand that ...
4272 	 */
4273 	if (ret == 0 || force) {
4274 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4275 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4276 	}
4277 
4278 	/*
4279 	 * And we always return the result of the firmware RESET command
4280 	 * even when we force the uP into RESET ...
4281 	 */
4282 	return ret;
4283 }
4284 
4285 /**
4286  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4287  *	@adap: the adapter
4288  *	@reset: if we want to do a RESET to restart things
4289  *
4290  *	Restart firmware previously halted by t4_fw_halt().  On successful
4291  *	return the previous PF Master remains as the new PF Master and there
4292  *	is no need to issue a new HELLO command, etc.
4293  *
4294  *	We do this in two ways:
4295  *
4296  *	 1. If we're dealing with newer firmware we'll simply want to take
4297  *	    the chip's microprocessor out of RESET.  This will cause the
4298  *	    firmware to start up from its start vector.  And then we'll loop
4299  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4300  *	    reset to 0) or we timeout.
4301  *
4302  *	 2. If we're dealing with older firmware then we'll need to RESET
4303  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4304  *	    flag and automatically RESET itself on startup.
4305  */
4306 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4307 {
4308 	if (reset) {
4309 		/*
4310 		 * Since we're directing the RESET instead of the firmware
4311 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4312 		 * bit.
4313 		 */
4314 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4315 
4316 		/*
4317 		 * If we've been given a valid mailbox, first try to get the
4318 		 * firmware to do the RESET.  If that works, great and we can
4319 		 * return success.  Otherwise, if we haven't been given a
4320 		 * valid mailbox or the RESET command failed, fall back to
4321 		 * hitting the chip with a hammer.
4322 		 */
4323 		if (mbox <= M_PCIE_FW_MASTER) {
4324 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4325 			msleep(100);
4326 			if (t4_fw_reset(adap, mbox,
4327 					F_PIORST | F_PIORSTMODE) == 0)
4328 				return 0;
4329 		}
4330 
4331 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4332 		msleep(2000);
4333 	} else {
4334 		int ms;
4335 
4336 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4337 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4338 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4339 				return FW_SUCCESS;
4340 			msleep(100);
4341 			ms += 100;
4342 		}
4343 		return -ETIMEDOUT;
4344 	}
4345 	return 0;
4346 }
4347 
4348 /**
4349  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4350  *	@adap: the adapter
4351  *	@mbox: mailbox to use for the FW RESET command (if desired)
4352  *	@fw_data: the firmware image to write
4353  *	@size: image size
4354  *	@force: force upgrade even if firmware doesn't cooperate
4355  *
4356  *	Perform all of the steps necessary for upgrading an adapter's
4357  *	firmware image.  Normally this requires the cooperation of the
4358  *	existing firmware in order to halt all existing activities
4359  *	but if an invalid mailbox token is passed in we skip that step
4360  *	(though we'll still put the adapter microprocessor into RESET in
4361  *	that case).
4362  *
4363  *	On successful return the new firmware will have been loaded and
4364  *	the adapter will have been fully RESET losing all previous setup
4365  *	state.  On unsuccessful return the adapter may be completely hosed ...
4366  *	positive errno indicates that the adapter is ~probably~ intact, a
4367  *	negative errno indicates that things are looking bad ...
4368  */
4369 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4370 		  const u8 *fw_data, unsigned int size, int force)
4371 {
4372 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4373 	int reset, ret;
4374 
4375 	ret = t4_fw_halt(adap, mbox, force);
4376 	if (ret < 0 && !force)
4377 		return ret;
4378 
4379 	ret = t4_load_fw(adap, fw_data, size);
4380 	if (ret < 0)
4381 		return ret;
4382 
4383 	/*
4384 	 * Older versions of the firmware don't understand the new
4385 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4386 	 * restart.  So for newly loaded older firmware we'll have to do the
4387 	 * RESET for it so it starts up on a clean slate.  We can tell if
4388 	 * the newly loaded firmware will handle this right by checking
4389 	 * its header flags to see if it advertises the capability.
4390 	 */
4391 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4392 	return t4_fw_restart(adap, mbox, reset);
4393 }
4394 
4395 /**
4396  *	t4_fw_initialize - ask FW to initialize the device
4397  *	@adap: the adapter
4398  *	@mbox: mailbox to use for the FW command
4399  *
4400  *	Issues a command to FW to partially initialize the device.  This
4401  *	performs initialization that generally doesn't depend on user input.
4402  */
4403 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4404 {
4405 	struct fw_initialize_cmd c;
4406 
4407 	memset(&c, 0, sizeof(c));
4408 	INIT_CMD(c, INITIALIZE, WRITE);
4409 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4410 }
4411 
4412 /**
4413  *	t4_query_params - query FW or device parameters
4414  *	@adap: the adapter
4415  *	@mbox: mailbox to use for the FW command
4416  *	@pf: the PF
4417  *	@vf: the VF
4418  *	@nparams: the number of parameters
4419  *	@params: the parameter names
4420  *	@val: the parameter values
4421  *
4422  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4423  *	queried at once.
4424  */
4425 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4426 		    unsigned int vf, unsigned int nparams, const u32 *params,
4427 		    u32 *val)
4428 {
4429 	int i, ret;
4430 	struct fw_params_cmd c;
4431 	__be32 *p = &c.param[0].mnem;
4432 
4433 	if (nparams > 7)
4434 		return -EINVAL;
4435 
4436 	memset(&c, 0, sizeof(c));
4437 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4438 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4439 			    V_FW_PARAMS_CMD_VFN(vf));
4440 	c.retval_len16 = htonl(FW_LEN16(c));
4441 
4442 	for (i = 0; i < nparams; i++, p += 2)
4443 		*p = htonl(*params++);
4444 
4445 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4446 	if (ret == 0)
4447 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4448 			*val++ = ntohl(*p);
4449 	return ret;
4450 }
4451 
4452 /**
4453  *	t4_set_params - sets FW or device parameters
4454  *	@adap: the adapter
4455  *	@mbox: mailbox to use for the FW command
4456  *	@pf: the PF
4457  *	@vf: the VF
4458  *	@nparams: the number of parameters
4459  *	@params: the parameter names
4460  *	@val: the parameter values
4461  *
4462  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4463  *	specified at once.
4464  */
4465 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4466 		  unsigned int vf, unsigned int nparams, const u32 *params,
4467 		  const u32 *val)
4468 {
4469 	struct fw_params_cmd c;
4470 	__be32 *p = &c.param[0].mnem;
4471 
4472 	if (nparams > 7)
4473 		return -EINVAL;
4474 
4475 	memset(&c, 0, sizeof(c));
4476 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4477 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4478 			    V_FW_PARAMS_CMD_VFN(vf));
4479 	c.retval_len16 = htonl(FW_LEN16(c));
4480 
4481 	while (nparams--) {
4482 		*p++ = htonl(*params++);
4483 		*p++ = htonl(*val++);
4484 	}
4485 
4486 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4487 }
4488 
4489 /**
4490  *	t4_cfg_pfvf - configure PF/VF resource limits
4491  *	@adap: the adapter
4492  *	@mbox: mailbox to use for the FW command
4493  *	@pf: the PF being configured
4494  *	@vf: the VF being configured
4495  *	@txq: the max number of egress queues
4496  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4497  *	@rxqi: the max number of interrupt-capable ingress queues
4498  *	@rxq: the max number of interruptless ingress queues
4499  *	@tc: the PCI traffic class
4500  *	@vi: the max number of virtual interfaces
4501  *	@cmask: the channel access rights mask for the PF/VF
4502  *	@pmask: the port access rights mask for the PF/VF
4503  *	@nexact: the maximum number of exact MPS filters
4504  *	@rcaps: read capabilities
4505  *	@wxcaps: write/execute capabilities
4506  *
4507  *	Configures resource limits and capabilities for a physical or virtual
4508  *	function.
4509  */
4510 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4511 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4512 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4513 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4514 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4515 {
4516 	struct fw_pfvf_cmd c;
4517 
4518 	memset(&c, 0, sizeof(c));
4519 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4520 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4521 			    V_FW_PFVF_CMD_VFN(vf));
4522 	c.retval_len16 = htonl(FW_LEN16(c));
4523 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4524 			       V_FW_PFVF_CMD_NIQ(rxq));
4525 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4526 			      V_FW_PFVF_CMD_PMASK(pmask) |
4527 			      V_FW_PFVF_CMD_NEQ(txq));
4528 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4529 				V_FW_PFVF_CMD_NEXACTF(nexact));
4530 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4531 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4532 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4533 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4534 }
4535 
4536 /**
4537  *	t4_alloc_vi_func - allocate a virtual interface
4538  *	@adap: the adapter
4539  *	@mbox: mailbox to use for the FW command
4540  *	@port: physical port associated with the VI
4541  *	@pf: the PF owning the VI
4542  *	@vf: the VF owning the VI
4543  *	@nmac: number of MAC addresses needed (1 to 5)
4544  *	@mac: the MAC addresses of the VI
4545  *	@rss_size: size of RSS table slice associated with this VI
4546  *	@portfunc: which Port Application Function MAC Address is desired
4547  *	@idstype: Intrusion Detection Type
4548  *
4549  *	Allocates a virtual interface for the given physical port.  If @mac is
4550  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4551  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4552  *	stored consecutively so the space needed is @nmac * 6 bytes.
4553  *	Returns a negative error number or the non-negative VI id.
4554  */
4555 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4556 		     unsigned int port, unsigned int pf, unsigned int vf,
4557 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4558 		     unsigned int portfunc, unsigned int idstype)
4559 {
4560 	int ret;
4561 	struct fw_vi_cmd c;
4562 
4563 	memset(&c, 0, sizeof(c));
4564 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4565 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4566 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4567 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4568 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4569 			       V_FW_VI_CMD_FUNC(portfunc));
4570 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4571 	c.nmac = nmac - 1;
4572 
4573 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4574 	if (ret)
4575 		return ret;
4576 
4577 	if (mac) {
4578 		memcpy(mac, c.mac, sizeof(c.mac));
4579 		switch (nmac) {
4580 		case 5:
4581 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4582 		case 4:
4583 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4584 		case 3:
4585 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4586 		case 2:
4587 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4588 		}
4589 	}
4590 	if (rss_size)
4591 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
4592 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4593 }
4594 
4595 /**
4596  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4597  *	@adap: the adapter
4598  *	@mbox: mailbox to use for the FW command
4599  *	@port: physical port associated with the VI
4600  *	@pf: the PF owning the VI
4601  *	@vf: the VF owning the VI
4602  *	@nmac: number of MAC addresses needed (1 to 5)
4603  *	@mac: the MAC addresses of the VI
4604  *	@rss_size: size of RSS table slice associated with this VI
4605  *
4606  *	backwards compatible and convieniance routine to allocate a Virtual
4607  *	Interface with a Ethernet Port Application Function and Intrustion
4608  *	Detection System disabled.
4609  */
4610 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4611 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4612 		unsigned int *rss_size)
4613 {
4614 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4615 				FW_VI_FUNC_ETH, 0);
4616 }
4617 
4618 /**
4619  *	t4_free_vi - free a virtual interface
4620  *	@adap: the adapter
4621  *	@mbox: mailbox to use for the FW command
4622  *	@pf: the PF owning the VI
4623  *	@vf: the VF owning the VI
4624  *	@viid: virtual interface identifiler
4625  *
4626  *	Free a previously allocated virtual interface.
4627  */
4628 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4629 	       unsigned int vf, unsigned int viid)
4630 {
4631 	struct fw_vi_cmd c;
4632 
4633 	memset(&c, 0, sizeof(c));
4634 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4635 			    F_FW_CMD_REQUEST |
4636 			    F_FW_CMD_EXEC |
4637 			    V_FW_VI_CMD_PFN(pf) |
4638 			    V_FW_VI_CMD_VFN(vf));
4639 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4640 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4641 
4642 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4643 }
4644 
4645 /**
4646  *	t4_set_rxmode - set Rx properties of a virtual interface
4647  *	@adap: the adapter
4648  *	@mbox: mailbox to use for the FW command
4649  *	@viid: the VI id
4650  *	@mtu: the new MTU or -1
4651  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4652  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4653  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4654  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4655  *	@sleep_ok: if true we may sleep while awaiting command completion
4656  *
4657  *	Sets Rx properties of a virtual interface.
4658  */
4659 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4660 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4661 		  bool sleep_ok)
4662 {
4663 	struct fw_vi_rxmode_cmd c;
4664 
4665 	/* convert to FW values */
4666 	if (mtu < 0)
4667 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4668 	if (promisc < 0)
4669 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4670 	if (all_multi < 0)
4671 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4672 	if (bcast < 0)
4673 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4674 	if (vlanex < 0)
4675 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4676 
4677 	memset(&c, 0, sizeof(c));
4678 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4679 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4680 	c.retval_len16 = htonl(FW_LEN16(c));
4681 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4682 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4683 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4684 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4685 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4686 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4687 }
4688 
4689 /**
4690  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4691  *	@adap: the adapter
4692  *	@mbox: mailbox to use for the FW command
4693  *	@viid: the VI id
4694  *	@free: if true any existing filters for this VI id are first removed
4695  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4696  *	@addr: the MAC address(es)
4697  *	@idx: where to store the index of each allocated filter
4698  *	@hash: pointer to hash address filter bitmap
4699  *	@sleep_ok: call is allowed to sleep
4700  *
4701  *	Allocates an exact-match filter for each of the supplied addresses and
4702  *	sets it to the corresponding address.  If @idx is not %NULL it should
4703  *	have at least @naddr entries, each of which will be set to the index of
4704  *	the filter allocated for the corresponding MAC address.  If a filter
4705  *	could not be allocated for an address its index is set to 0xffff.
4706  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4707  *	are hashed and update the hash filter bitmap pointed at by @hash.
4708  *
4709  *	Returns a negative error number or the number of filters allocated.
4710  */
4711 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4712 		      unsigned int viid, bool free, unsigned int naddr,
4713 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4714 {
4715 	int offset, ret = 0;
4716 	struct fw_vi_mac_cmd c;
4717 	unsigned int nfilters = 0;
4718 	unsigned int rem = naddr;
4719 
4720 	if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
4721 		return -EINVAL;
4722 
4723 	for (offset = 0; offset < naddr ; /**/) {
4724 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4725 					 ? rem
4726 					 : ARRAY_SIZE(c.u.exact));
4727 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4728 						     u.exact[fw_naddr]), 16);
4729 		struct fw_vi_mac_exact *p;
4730 		int i;
4731 
4732 		memset(&c, 0, sizeof(c));
4733 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4734 				     F_FW_CMD_REQUEST |
4735 				     F_FW_CMD_WRITE |
4736 				     V_FW_CMD_EXEC(free) |
4737 				     V_FW_VI_MAC_CMD_VIID(viid));
4738 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4739 					    V_FW_CMD_LEN16(len16));
4740 
4741 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4742 			p->valid_to_idx = htons(
4743 				F_FW_VI_MAC_CMD_VALID |
4744 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4745 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4746 		}
4747 
4748 		/*
4749 		 * It's okay if we run out of space in our MAC address arena.
4750 		 * Some of the addresses we submit may get stored so we need
4751 		 * to run through the reply to see what the results were ...
4752 		 */
4753 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4754 		if (ret && ret != -FW_ENOMEM)
4755 			break;
4756 
4757 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4758 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4759 
4760 			if (idx)
4761 				idx[offset+i] = (index >= NUM_MPS_CLS_SRAM_L_INSTANCES
4762 						 ? 0xffff
4763 						 : index);
4764 			if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
4765 				nfilters++;
4766 			else if (hash)
4767 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4768 		}
4769 
4770 		free = false;
4771 		offset += fw_naddr;
4772 		rem -= fw_naddr;
4773 	}
4774 
4775 	if (ret == 0 || ret == -FW_ENOMEM)
4776 		ret = nfilters;
4777 	return ret;
4778 }
4779 
4780 /**
4781  *	t4_change_mac - modifies the exact-match filter for a MAC address
4782  *	@adap: the adapter
4783  *	@mbox: mailbox to use for the FW command
4784  *	@viid: the VI id
4785  *	@idx: index of existing filter for old value of MAC address, or -1
4786  *	@addr: the new MAC address value
4787  *	@persist: whether a new MAC allocation should be persistent
4788  *	@add_smt: if true also add the address to the HW SMT
4789  *
4790  *	Modifies an exact-match filter and sets it to the new MAC address if
4791  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4792  *	latter case the address is added persistently if @persist is %true.
4793  *
4794  *	Note that in general it is not possible to modify the value of a given
4795  *	filter so the generic way to modify an address filter is to free the one
4796  *	being used by the old address value and allocate a new filter for the
4797  *	new address value.
4798  *
4799  *	Returns a negative error number or the index of the filter with the new
4800  *	MAC value.  Note that this index may differ from @idx.
4801  */
4802 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4803 		  int idx, const u8 *addr, bool persist, bool add_smt)
4804 {
4805 	int ret, mode;
4806 	struct fw_vi_mac_cmd c;
4807 	struct fw_vi_mac_exact *p = c.u.exact;
4808 
4809 	if (idx < 0)                             /* new allocation */
4810 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4811 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4812 
4813 	memset(&c, 0, sizeof(c));
4814 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4815 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4816 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4817 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4818 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4819 				V_FW_VI_MAC_CMD_IDX(idx));
4820 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
4821 
4822 	ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
4823 	if (ret == 0) {
4824 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4825 		if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
4826 			ret = -ENOMEM;
4827 	}
4828 	return ret;
4829 }
4830 
4831 /**
4832  *	t4_set_addr_hash - program the MAC inexact-match hash filter
4833  *	@adap: the adapter
4834  *	@mbox: mailbox to use for the FW command
4835  *	@viid: the VI id
4836  *	@ucast: whether the hash filter should also match unicast addresses
4837  *	@vec: the value to be written to the hash filter
4838  *	@sleep_ok: call is allowed to sleep
4839  *
4840  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
4841  */
4842 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4843 		     bool ucast, u64 vec, bool sleep_ok)
4844 {
4845 	struct fw_vi_mac_cmd c;
4846 
4847 	memset(&c, 0, sizeof(c));
4848 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4849 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4850 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4851 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4852 				    V_FW_CMD_LEN16(1));
4853 	c.u.hash.hashvec = cpu_to_be64(vec);
4854 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4855 }
4856 
4857 /**
4858  *	t4_enable_vi - enable/disable a virtual interface
4859  *	@adap: the adapter
4860  *	@mbox: mailbox to use for the FW command
4861  *	@viid: the VI id
4862  *	@rx_en: 1=enable Rx, 0=disable Rx
4863  *	@tx_en: 1=enable Tx, 0=disable Tx
4864  *
4865  *	Enables/disables a virtual interface.
4866  */
4867 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4868 		 bool rx_en, bool tx_en)
4869 {
4870 	struct fw_vi_enable_cmd c;
4871 
4872 	memset(&c, 0, sizeof(c));
4873 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4874 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4875 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4876 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4877 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4878 }
4879 
4880 /**
4881  *	t4_identify_port - identify a VI's port by blinking its LED
4882  *	@adap: the adapter
4883  *	@mbox: mailbox to use for the FW command
4884  *	@viid: the VI id
4885  *	@nblinks: how many times to blink LED at 2.5 Hz
4886  *
4887  *	Identifies a VI's port by blinking its LED.
4888  */
4889 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4890 		     unsigned int nblinks)
4891 {
4892 	struct fw_vi_enable_cmd c;
4893 
4894 	memset(&c, 0, sizeof(c));
4895 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4896 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4897 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4898 	c.blinkdur = htons(nblinks);
4899 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4900 }
4901 
4902 /**
4903  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
4904  *	@adap: the adapter
4905  *	@mbox: mailbox to use for the FW command
4906  *	@start: %true to enable the queues, %false to disable them
4907  *	@pf: the PF owning the queues
4908  *	@vf: the VF owning the queues
4909  *	@iqid: ingress queue id
4910  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4911  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4912  *
4913  *	Starts or stops an ingress queue and its associated FLs, if any.
4914  */
4915 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4916 		     unsigned int pf, unsigned int vf, unsigned int iqid,
4917 		     unsigned int fl0id, unsigned int fl1id)
4918 {
4919 	struct fw_iq_cmd c;
4920 
4921 	memset(&c, 0, sizeof(c));
4922 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4923 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4924 			    V_FW_IQ_CMD_VFN(vf));
4925 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4926 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4927 	c.iqid = htons(iqid);
4928 	c.fl0id = htons(fl0id);
4929 	c.fl1id = htons(fl1id);
4930 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4931 }
4932 
4933 /**
4934  *	t4_iq_free - free an ingress queue and its FLs
4935  *	@adap: the adapter
4936  *	@mbox: mailbox to use for the FW command
4937  *	@pf: the PF owning the queues
4938  *	@vf: the VF owning the queues
4939  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4940  *	@iqid: ingress queue id
4941  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4942  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4943  *
4944  *	Frees an ingress queue and its associated FLs, if any.
4945  */
4946 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4947 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4948 	       unsigned int fl0id, unsigned int fl1id)
4949 {
4950 	struct fw_iq_cmd c;
4951 
4952 	memset(&c, 0, sizeof(c));
4953 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4954 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4955 			    V_FW_IQ_CMD_VFN(vf));
4956 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4957 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4958 	c.iqid = htons(iqid);
4959 	c.fl0id = htons(fl0id);
4960 	c.fl1id = htons(fl1id);
4961 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4962 }
4963 
4964 /**
4965  *	t4_eth_eq_free - free an Ethernet egress queue
4966  *	@adap: the adapter
4967  *	@mbox: mailbox to use for the FW command
4968  *	@pf: the PF owning the queue
4969  *	@vf: the VF owning the queue
4970  *	@eqid: egress queue id
4971  *
4972  *	Frees an Ethernet egress queue.
4973  */
4974 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4975 		   unsigned int vf, unsigned int eqid)
4976 {
4977 	struct fw_eq_eth_cmd c;
4978 
4979 	memset(&c, 0, sizeof(c));
4980 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4981 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4982 			    V_FW_EQ_ETH_CMD_VFN(vf));
4983 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4984 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4985 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4986 }
4987 
4988 /**
4989  *	t4_ctrl_eq_free - free a control egress queue
4990  *	@adap: the adapter
4991  *	@mbox: mailbox to use for the FW command
4992  *	@pf: the PF owning the queue
4993  *	@vf: the VF owning the queue
4994  *	@eqid: egress queue id
4995  *
4996  *	Frees a control egress queue.
4997  */
4998 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4999 		    unsigned int vf, unsigned int eqid)
5000 {
5001 	struct fw_eq_ctrl_cmd c;
5002 
5003 	memset(&c, 0, sizeof(c));
5004 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5005 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5006 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5007 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5008 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5009 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5010 }
5011 
5012 /**
5013  *	t4_ofld_eq_free - free an offload egress queue
5014  *	@adap: the adapter
5015  *	@mbox: mailbox to use for the FW command
5016  *	@pf: the PF owning the queue
5017  *	@vf: the VF owning the queue
5018  *	@eqid: egress queue id
5019  *
5020  *	Frees a control egress queue.
5021  */
5022 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5023 		    unsigned int vf, unsigned int eqid)
5024 {
5025 	struct fw_eq_ofld_cmd c;
5026 
5027 	memset(&c, 0, sizeof(c));
5028 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5029 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5030 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5031 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5032 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5033 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5034 }
5035 
5036 /**
5037  *	t4_handle_fw_rpl - process a FW reply message
5038  *	@adap: the adapter
5039  *	@rpl: start of the FW message
5040  *
5041  *	Processes a FW message, such as link state change messages.
5042  */
5043 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5044 {
5045 	u8 opcode = *(const u8 *)rpl;
5046 	const struct fw_port_cmd *p = (const void *)rpl;
5047 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5048 
5049 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5050 		/* link/module state change message */
5051 		int speed = 0, fc = 0, i;
5052 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5053 		struct port_info *pi = NULL;
5054 		struct link_config *lc;
5055 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5056 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5057 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5058 
5059 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5060 			fc |= PAUSE_RX;
5061 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5062 			fc |= PAUSE_TX;
5063 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5064 			speed = SPEED_100;
5065 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5066 			speed = SPEED_1000;
5067 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5068 			speed = SPEED_10000;
5069 
5070 		for_each_port(adap, i) {
5071 			pi = adap2pinfo(adap, i);
5072 			if (pi->tx_chan == chan)
5073 				break;
5074 		}
5075 		lc = &pi->link_cfg;
5076 
5077 		if (link_ok != lc->link_ok || speed != lc->speed ||
5078 		    fc != lc->fc) {                    /* something changed */
5079 			lc->link_ok = link_ok;
5080 			lc->speed = speed;
5081 			lc->fc = fc;
5082 			t4_os_link_changed(adap, i, link_ok);
5083 		}
5084 		if (mod != pi->mod_type) {
5085 			pi->mod_type = mod;
5086 			t4_os_portmod_changed(adap, i);
5087 		}
5088 	} else {
5089 		CH_WARN_RATELIMIT(adap,
5090 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5091 		return -EINVAL;
5092 	}
5093 	return 0;
5094 }
5095 
5096 /**
5097  *	get_pci_mode - determine a card's PCI mode
5098  *	@adapter: the adapter
5099  *	@p: where to store the PCI settings
5100  *
5101  *	Determines a card's PCI mode and associated parameters, such as speed
5102  *	and width.
5103  */
5104 static void __devinit get_pci_mode(struct adapter *adapter,
5105 				   struct pci_params *p)
5106 {
5107 	u16 val;
5108 	u32 pcie_cap;
5109 
5110 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5111 	if (pcie_cap) {
5112 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5113 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5114 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5115 	}
5116 }
5117 
5118 /**
5119  *	init_link_config - initialize a link's SW state
5120  *	@lc: structure holding the link state
5121  *	@caps: link capabilities
5122  *
5123  *	Initializes the SW state maintained for each link, including the link's
5124  *	capabilities and default speed/flow-control/autonegotiation settings.
5125  */
5126 static void __devinit init_link_config(struct link_config *lc,
5127 				       unsigned int caps)
5128 {
5129 	lc->supported = caps;
5130 	lc->requested_speed = 0;
5131 	lc->speed = 0;
5132 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5133 	if (lc->supported & FW_PORT_CAP_ANEG) {
5134 		lc->advertising = lc->supported & ADVERT_MASK;
5135 		lc->autoneg = AUTONEG_ENABLE;
5136 		lc->requested_fc |= PAUSE_AUTONEG;
5137 	} else {
5138 		lc->advertising = 0;
5139 		lc->autoneg = AUTONEG_DISABLE;
5140 	}
5141 }
5142 
5143 static int __devinit wait_dev_ready(struct adapter *adap)
5144 {
5145 	u32 whoami;
5146 
5147 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
5148 
5149 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
5150 		return 0;
5151 
5152 	msleep(500);
5153 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
5154 	return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
5155 		? 0 : -EIO);
5156 }
5157 
5158 static int __devinit get_flash_params(struct adapter *adapter)
5159 {
5160 	int ret;
5161 	u32 info = 0;
5162 
5163 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5164 	if (!ret)
5165 		ret = sf1_read(adapter, 3, 0, 1, &info);
5166 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5167 	if (ret < 0)
5168 		return ret;
5169 
5170 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5171 		return -EINVAL;
5172 	info >>= 16;                           /* log2 of size */
5173 	if (info >= 0x14 && info < 0x18)
5174 		adapter->params.sf_nsec = 1 << (info - 16);
5175 	else if (info == 0x18)
5176 		adapter->params.sf_nsec = 64;
5177 	else
5178 		return -EINVAL;
5179 	adapter->params.sf_size = 1 << info;
5180 	return 0;
5181 }
5182 
5183 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5184 						  u8 range)
5185 {
5186 	u16 val;
5187 	u32 pcie_cap;
5188 
5189 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5190 	if (pcie_cap) {
5191 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5192 		val &= 0xfff0;
5193 		val |= range ;
5194 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5195 	}
5196 }
5197 
5198 /**
5199  *	t4_prep_adapter - prepare SW and HW for operation
5200  *	@adapter: the adapter
5201  *	@reset: if true perform a HW reset
5202  *
5203  *	Initialize adapter SW state for the various HW modules, set initial
5204  *	values for some adapter tunables, take PHYs out of reset, and
5205  *	initialize the MDIO interface.
5206  */
5207 int __devinit t4_prep_adapter(struct adapter *adapter)
5208 {
5209 	int ret;
5210 
5211 	ret = wait_dev_ready(adapter);
5212 	if (ret < 0)
5213 		return ret;
5214 
5215 	get_pci_mode(adapter, &adapter->params.pci);
5216 
5217 	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
5218 	/* T4A1 chip is no longer supported */
5219 	if (adapter->params.rev == 1) {
5220 		CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
5221 		return -EINVAL;
5222 	}
5223 	adapter->params.pci.vpd_cap_addr =
5224 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5225 
5226 	ret = get_flash_params(adapter);
5227 	if (ret < 0)
5228 		return ret;
5229 
5230 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5231 	if (ret < 0)
5232 		return ret;
5233 
5234 	if (t4_read_reg(adapter, A_PCIE_REVISION) != 0) {
5235 		/* FPGA */
5236 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5237 	} else {
5238 		/* ASIC */
5239 		adapter->params.cim_la_size = CIMLA_SIZE;
5240 	}
5241 
5242 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5243 
5244 	/*
5245 	 * Default port and clock for debugging in case we can't reach FW.
5246 	 */
5247 	adapter->params.nports = 1;
5248 	adapter->params.portvec = 1;
5249 	adapter->params.vpd.cclk = 50000;
5250 
5251 	/* Set pci completion timeout value to 4 seconds. */
5252 	set_pcie_completion_timeout(adapter, 0xd);
5253 	return 0;
5254 }
5255 
5256 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5257 {
5258 	u8 addr[6];
5259 	int ret, i, j;
5260 	struct fw_port_cmd c;
5261 	unsigned int rss_size;
5262 	adapter_t *adap = p->adapter;
5263 
5264 	memset(&c, 0, sizeof(c));
5265 
5266 	for (i = 0, j = -1; i <= p->port_id; i++) {
5267 		do {
5268 			j++;
5269 		} while ((adap->params.portvec & (1 << j)) == 0);
5270 	}
5271 
5272 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5273 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5274 			       V_FW_PORT_CMD_PORTID(j));
5275 	c.action_to_len16 = htonl(
5276 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5277 		FW_LEN16(c));
5278 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5279 	if (ret)
5280 		return ret;
5281 
5282 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5283 	if (ret < 0)
5284 		return ret;
5285 
5286 	p->viid = ret;
5287 	p->tx_chan = j;
5288 	p->lport = j;
5289 	p->rss_size = rss_size;
5290 	t4_os_set_hw_addr(adap, p->port_id, addr);
5291 
5292 	ret = ntohl(c.u.info.lstatus_to_modtype);
5293 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5294 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5295 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5296 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5297 
5298 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5299 
5300 	return 0;
5301 }
5302 
5303 int t4_config_scheduler(struct adapter *adapter, int mode, int level,
5304 			int pktsize, int sched_class, int port, int unit,
5305 			int rate, int weight, int minrate, int maxrate)
5306 {
5307 	struct fw_sched_cmd cmd, rpl;
5308 
5309 	if (rate < 0 || unit < 0)
5310 		return -EINVAL;
5311 
5312 	memset(&cmd, 0, sizeof(cmd));
5313 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5314 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5315 	cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(sizeof(cmd)/16));
5316 
5317 	cmd.u.params.sc = 1;
5318 	cmd.u.params.level = level;
5319 	cmd.u.params.mode = mode;
5320 	cmd.u.params.ch = port;
5321 	cmd.u.params.cl = sched_class;
5322 	cmd.u.params.rate = rate;
5323 	cmd.u.params.unit = unit;
5324 
5325  	switch (level) {
5326 		case FW_SCHED_PARAMS_LEVEL_CH_WRR:
5327 		case FW_SCHED_PARAMS_LEVEL_CL_WRR:
5328 			cmd.u.params.weight = cpu_to_be16(weight);
5329 			break;
5330 		case FW_SCHED_PARAMS_LEVEL_CH_RL:
5331 		case FW_SCHED_PARAMS_LEVEL_CL_RL:
5332 			cmd.u.params.max = cpu_to_be32(maxrate);
5333 			cmd.u.params.min = cpu_to_be32(minrate);
5334 			cmd.u.params.pktsize = cpu_to_be16(pktsize);
5335 			break;
5336 		default:
5337 			return -EINVAL;
5338 	}
5339 
5340 	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl, 1);
5341 }
5342