xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision 8a166cafe0965f6bd72cd3d2f5372704f05cb5e8)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 
32 #include "common.h"
33 #include "t4_regs.h"
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
36 
37 #undef msleep
38 #define msleep(x) do { \
39 	if (cold) \
40 		DELAY((x) * 1000); \
41 	else \
42 		pause("t4hw", (x) * hz / 1000); \
43 } while (0)
44 
45 /**
46  *	t4_wait_op_done_val - wait until an operation is completed
47  *	@adapter: the adapter performing the operation
48  *	@reg: the register to check for completion
49  *	@mask: a single-bit field within @reg that indicates completion
50  *	@polarity: the value of the field when the operation is completed
51  *	@attempts: number of check iterations
52  *	@delay: delay in usecs between iterations
53  *	@valp: where to store the value of the register at completion time
54  *
55  *	Wait until an operation is completed by checking a bit in a register
56  *	up to @attempts times.  If @valp is not NULL the value of the register
57  *	at the time it indicated completion is stored there.  Returns 0 if the
58  *	operation completes and	-EAGAIN	otherwise.
59  */
60 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 		        int polarity, int attempts, int delay, u32 *valp)
62 {
63 	while (1) {
64 		u32 val = t4_read_reg(adapter, reg);
65 
66 		if (!!(val & mask) == polarity) {
67 			if (valp)
68 				*valp = val;
69 			return 0;
70 		}
71 		if (--attempts == 0)
72 			return -EAGAIN;
73 		if (delay)
74 			udelay(delay);
75 	}
76 }
77 
78 /**
79  *	t4_set_reg_field - set a register field to a value
80  *	@adapter: the adapter to program
81  *	@addr: the register address
82  *	@mask: specifies the portion of the register to modify
83  *	@val: the new value for the register field
84  *
85  *	Sets a register field specified by the supplied mask to the
86  *	given value.
87  */
88 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89 		      u32 val)
90 {
91 	u32 v = t4_read_reg(adapter, addr) & ~mask;
92 
93 	t4_write_reg(adapter, addr, v | val);
94 	(void) t4_read_reg(adapter, addr);      /* flush */
95 }
96 
97 /**
98  *	t4_read_indirect - read indirectly addressed registers
99  *	@adap: the adapter
100  *	@addr_reg: register holding the indirect address
101  *	@data_reg: register holding the value of the indirect register
102  *	@vals: where the read register values are stored
103  *	@nregs: how many indirect registers to read
104  *	@start_idx: index of first indirect register to read
105  *
106  *	Reads registers that are accessed indirectly through an address/data
107  *	register pair.
108  */
109 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
111 		      unsigned int start_idx)
112 {
113 	while (nregs--) {
114 		t4_write_reg(adap, addr_reg, start_idx);
115 		*vals++ = t4_read_reg(adap, data_reg);
116 		start_idx++;
117 	}
118 }
119 
120 /**
121  *	t4_write_indirect - write indirectly addressed registers
122  *	@adap: the adapter
123  *	@addr_reg: register holding the indirect addresses
124  *	@data_reg: register holding the value for the indirect registers
125  *	@vals: values to write
126  *	@nregs: how many indirect registers to write
127  *	@start_idx: address of first indirect register to write
128  *
129  *	Writes a sequential block of registers that are accessed indirectly
130  *	through an address/data register pair.
131  */
132 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133 		       unsigned int data_reg, const u32 *vals,
134 		       unsigned int nregs, unsigned int start_idx)
135 {
136 	while (nregs--) {
137 		t4_write_reg(adap, addr_reg, start_idx++);
138 		t4_write_reg(adap, data_reg, *vals++);
139 	}
140 }
141 
142 /*
143  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144  * mechanism.  This guarantees that we get the real value even if we're
145  * operating within a Virtual Machine and the Hypervisor is trapping our
146  * Configuration Space accesses.
147  */
148 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149 {
150 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151 		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152 		     V_REGISTER(reg));
153 	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154 }
155 
156 /*
157  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
158  */
159 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
160 			 u32 mbox_addr)
161 {
162 	for ( ; nflit; nflit--, mbox_addr += 8)
163 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
164 }
165 
166 /*
167  * Handle a FW assertion reported in a mailbox.
168  */
169 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
170 {
171 	struct fw_debug_cmd asrt;
172 
173 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
174 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
175 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
176 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
177 }
178 
179 #define X_CIM_PF_NOACCESS 0xeeeeeeee
180 /**
181  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
182  *	@adap: the adapter
183  *	@mbox: index of the mailbox to use
184  *	@cmd: the command to write
185  *	@size: command length in bytes
186  *	@rpl: where to optionally store the reply
187  *	@sleep_ok: if true we may sleep while awaiting command completion
188  *
189  *	Sends the given command to FW through the selected mailbox and waits
190  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
191  *	store the FW's reply to the command.  The command and its optional
192  *	reply are of the same length.  Some FW commands like RESET and
193  *	INITIALIZE can take a considerable amount of time to execute.
194  *	@sleep_ok determines whether we may sleep while awaiting the response.
195  *	If sleeping is allowed we use progressive backoff otherwise we spin.
196  *
197  *	The return value is 0 on success or a negative errno on failure.  A
198  *	failure can happen either because we are not able to execute the
199  *	command or FW executes it but signals an error.  In the latter case
200  *	the return value is the error code indicated by FW (negated).
201  */
202 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
203 		    void *rpl, bool sleep_ok)
204 {
205 	/*
206 	 * We delay in small increments at first in an effort to maintain
207 	 * responsiveness for simple, fast executing commands but then back
208 	 * off to larger delays to a maximum retry delay.
209 	 */
210 	static const int delay[] = {
211 		1, 1, 3, 5, 10, 10, 20, 50, 100
212 	};
213 
214 	u32 v;
215 	u64 res;
216 	int i, ms, delay_idx;
217 	const __be64 *p = cmd;
218 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
219 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
220 
221 	if ((size & 15) || size > MBOX_LEN)
222 		return -EINVAL;
223 
224 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
225 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
226 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
227 
228 	if (v != X_MBOWNER_PL)
229 		return v ? -EBUSY : -ETIMEDOUT;
230 
231 	for (i = 0; i < size; i += 8, p++)
232 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
233 
234 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
235 	t4_read_reg(adap, ctl_reg);          /* flush write */
236 
237 	delay_idx = 0;
238 	ms = delay[0];
239 
240 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
241 		if (sleep_ok) {
242 			ms = delay[delay_idx];  /* last element may repeat */
243 			if (delay_idx < ARRAY_SIZE(delay) - 1)
244 				delay_idx++;
245 			msleep(ms);
246 		} else
247 			mdelay(ms);
248 
249 		v = t4_read_reg(adap, ctl_reg);
250 		if (v == X_CIM_PF_NOACCESS)
251 			continue;
252 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
253 			if (!(v & F_MBMSGVALID)) {
254 				t4_write_reg(adap, ctl_reg,
255 					     V_MBOWNER(X_MBOWNER_NONE));
256 				continue;
257 			}
258 
259 			res = t4_read_reg64(adap, data_reg);
260 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
261 				fw_asrt(adap, data_reg);
262 				res = V_FW_CMD_RETVAL(EIO);
263 			} else if (rpl)
264 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
265 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
266 			return -G_FW_CMD_RETVAL((int)res);
267 		}
268 	}
269 
270 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
271 	       *(const u8 *)cmd, mbox);
272 	return -ETIMEDOUT;
273 }
274 
275 /**
276  *	t4_mc_read - read from MC through backdoor accesses
277  *	@adap: the adapter
278  *	@addr: address of first byte requested
279  *	@data: 64 bytes of data containing the requested address
280  *	@ecc: where to store the corresponding 64-bit ECC word
281  *
282  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
283  *	that covers the requested address @addr.  If @parity is not %NULL it
284  *	is assigned the 64-bit ECC word for the read data.
285  */
286 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
287 {
288 	int i;
289 
290 	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
291 		return -EBUSY;
292 	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
293 	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
294 	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
295 	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
296 		     V_BIST_CMD_GAP(1));
297 	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
298 	if (i)
299 		return i;
300 
301 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
302 
303 	for (i = 15; i >= 0; i--)
304 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
305 	if (ecc)
306 		*ecc = t4_read_reg64(adap, MC_DATA(16));
307 #undef MC_DATA
308 	return 0;
309 }
310 
311 /**
312  *	t4_edc_read - read from EDC through backdoor accesses
313  *	@adap: the adapter
314  *	@idx: which EDC to access
315  *	@addr: address of first byte requested
316  *	@data: 64 bytes of data containing the requested address
317  *	@ecc: where to store the corresponding 64-bit ECC word
318  *
319  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
320  *	that covers the requested address @addr.  If @parity is not %NULL it
321  *	is assigned the 64-bit ECC word for the read data.
322  */
323 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
324 {
325 	int i;
326 
327 	idx *= EDC_STRIDE;
328 	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
329 		return -EBUSY;
330 	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
331 	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
332 	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
333 	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
334 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
335 	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
336 	if (i)
337 		return i;
338 
339 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
340 
341 	for (i = 15; i >= 0; i--)
342 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
343 	if (ecc)
344 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
345 #undef EDC_DATA
346 	return 0;
347 }
348 
349 /**
350  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
351  *	@adap: the adapter
352  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
353  *	@addr: address within indicated memory type
354  *	@len: amount of memory to read
355  *	@buf: host memory buffer
356  *
357  *	Reads an [almost] arbitrary memory region in the firmware: the
358  *	firmware memory address, length and host buffer must be aligned on
359  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
360  *	the firmware's memory.  If this memory contains data structures which
361  *	contain multi-byte integers, it's the callers responsibility to
362  *	perform appropriate byte order conversions.
363  */
364 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
365 		__be32 *buf)
366 {
367 	u32 pos, start, end, offset;
368 	int ret;
369 
370 	/*
371 	 * Argument sanity checks ...
372 	 */
373 	if ((addr & 0x3) || (len & 0x3))
374 		return -EINVAL;
375 
376 	/*
377 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
378 	 * need to round down the start and round up the end.  We'll start
379 	 * copying out of the first line at (addr - start) a word at a time.
380 	 */
381 	start = addr & ~(64-1);
382 	end = (addr + len + 64-1) & ~(64-1);
383 	offset = (addr - start)/sizeof(__be32);
384 
385 	for (pos = start; pos < end; pos += 64, offset = 0) {
386 		__be32 data[16];
387 
388 		/*
389 		 * Read the chip's memory block and bail if there's an error.
390 		 */
391 		if (mtype == MEM_MC)
392 			ret = t4_mc_read(adap, pos, data, NULL);
393 		else
394 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
395 		if (ret)
396 			return ret;
397 
398 		/*
399 		 * Copy the data into the caller's memory buffer.
400 		 */
401 		while (offset < 16 && len > 0) {
402 			*buf++ = data[offset++];
403 			len -= sizeof(__be32);
404 		}
405 	}
406 
407 	return 0;
408 }
409 
410 /*
411  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
412  * VPD-R header.
413  */
414 struct t4_vpd_hdr {
415 	u8  id_tag;
416 	u8  id_len[2];
417 	u8  id_data[ID_LEN];
418 	u8  vpdr_tag;
419 	u8  vpdr_len[2];
420 };
421 
422 /*
423  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
424  */
425 #define EEPROM_MAX_RD_POLL 40
426 #define EEPROM_MAX_WR_POLL 6
427 #define EEPROM_STAT_ADDR   0x7bfc
428 #define VPD_BASE           0x400
429 #define VPD_BASE_OLD       0
430 #define VPD_LEN            512
431 #define VPD_INFO_FLD_HDR_SIZE	3
432 
433 /**
434  *	t4_seeprom_read - read a serial EEPROM location
435  *	@adapter: adapter to read
436  *	@addr: EEPROM virtual address
437  *	@data: where to store the read data
438  *
439  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
440  *	VPD capability.  Note that this function must be called with a virtual
441  *	address.
442  */
443 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
444 {
445 	u16 val;
446 	int attempts = EEPROM_MAX_RD_POLL;
447 	unsigned int base = adapter->params.pci.vpd_cap_addr;
448 
449 	if (addr >= EEPROMVSIZE || (addr & 3))
450 		return -EINVAL;
451 
452 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
453 	do {
454 		udelay(10);
455 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
456 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
457 
458 	if (!(val & PCI_VPD_ADDR_F)) {
459 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
460 		return -EIO;
461 	}
462 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
463 	*data = le32_to_cpu(*data);
464 	return 0;
465 }
466 
467 /**
468  *	t4_seeprom_write - write a serial EEPROM location
469  *	@adapter: adapter to write
470  *	@addr: virtual EEPROM address
471  *	@data: value to write
472  *
473  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
474  *	VPD capability.  Note that this function must be called with a virtual
475  *	address.
476  */
477 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
478 {
479 	u16 val;
480 	int attempts = EEPROM_MAX_WR_POLL;
481 	unsigned int base = adapter->params.pci.vpd_cap_addr;
482 
483 	if (addr >= EEPROMVSIZE || (addr & 3))
484 		return -EINVAL;
485 
486 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
487 				 cpu_to_le32(data));
488 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
489 				 (u16)addr | PCI_VPD_ADDR_F);
490 	do {
491 		msleep(1);
492 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
493 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
494 
495 	if (val & PCI_VPD_ADDR_F) {
496 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
497 		return -EIO;
498 	}
499 	return 0;
500 }
501 
502 /**
503  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
504  *	@phys_addr: the physical EEPROM address
505  *	@fn: the PCI function number
506  *	@sz: size of function-specific area
507  *
508  *	Translate a physical EEPROM address to virtual.  The first 1K is
509  *	accessed through virtual addresses starting at 31K, the rest is
510  *	accessed through virtual addresses starting at 0.
511  *
512  *	The mapping is as follows:
513  *	[0..1K) -> [31K..32K)
514  *	[1K..1K+A) -> [ES-A..ES)
515  *	[1K+A..ES) -> [0..ES-A-1K)
516  *
517  *	where A = @fn * @sz, and ES = EEPROM size.
518  */
519 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
520 {
521 	fn *= sz;
522 	if (phys_addr < 1024)
523 		return phys_addr + (31 << 10);
524 	if (phys_addr < 1024 + fn)
525 		return EEPROMSIZE - fn + phys_addr - 1024;
526 	if (phys_addr < EEPROMSIZE)
527 		return phys_addr - 1024 - fn;
528 	return -EINVAL;
529 }
530 
531 /**
532  *	t4_seeprom_wp - enable/disable EEPROM write protection
533  *	@adapter: the adapter
534  *	@enable: whether to enable or disable write protection
535  *
536  *	Enables or disables write protection on the serial EEPROM.
537  */
538 int t4_seeprom_wp(struct adapter *adapter, int enable)
539 {
540 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
541 }
542 
543 /**
544  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
545  *	@v: Pointer to buffered vpd data structure
546  *	@kw: The keyword to search for
547  *
548  *	Returns the value of the information field keyword or
549  *	-ENOENT otherwise.
550  */
551 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
552 {
553          int i;
554 	 unsigned int offset , len;
555 	 const u8 *buf = &v->id_tag;
556 	 const u8 *vpdr_len = &v->vpdr_tag;
557 	 offset = sizeof(struct t4_vpd_hdr);
558 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
559 
560 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
561 		 return -ENOENT;
562 	 }
563 
564          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
565 		 if(memcmp(buf + i , kw , 2) == 0){
566 			 i += VPD_INFO_FLD_HDR_SIZE;
567                          return i;
568 		  }
569 
570                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
571          }
572 
573          return -ENOENT;
574 }
575 
576 
577 /**
578  *	get_vpd_params - read VPD parameters from VPD EEPROM
579  *	@adapter: adapter to read
580  *	@p: where to store the parameters
581  *
582  *	Reads card parameters stored in VPD EEPROM.
583  */
584 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
585 {
586 	int i, ret, addr;
587 	int ec, sn, pn, na;
588 	u8 vpd[VPD_LEN], csum;
589 	const struct t4_vpd_hdr *v;
590 
591 	/*
592 	 * Card information normally starts at VPD_BASE but early cards had
593 	 * it at 0.
594 	 */
595 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
596 	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
597 
598 	for (i = 0; i < sizeof(vpd); i += 4) {
599 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
600 		if (ret)
601 			return ret;
602 	}
603  	v = (const struct t4_vpd_hdr *)vpd;
604 
605 #define FIND_VPD_KW(var,name) do { \
606 	var = get_vpd_keyword_val(v , name); \
607 	if (var < 0) { \
608 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
609 		return -EINVAL; \
610 	} \
611 } while (0)
612 
613 	FIND_VPD_KW(i, "RV");
614 	for (csum = 0; i >= 0; i--)
615 		csum += vpd[i];
616 
617 	if (csum) {
618 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
619 		return -EINVAL;
620 	}
621 	FIND_VPD_KW(ec, "EC");
622 	FIND_VPD_KW(sn, "SN");
623 	FIND_VPD_KW(pn, "PN");
624 	FIND_VPD_KW(na, "NA");
625 #undef FIND_VPD_KW
626 
627 	memcpy(p->id, v->id_data, ID_LEN);
628 	strstrip(p->id);
629 	memcpy(p->ec, vpd + ec, EC_LEN);
630 	strstrip(p->ec);
631 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
632 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
633 	strstrip(p->sn);
634 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
635 	strstrip((char *)p->pn);
636 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
637 	strstrip((char *)p->na);
638 
639 	return 0;
640 }
641 
642 /* serial flash and firmware constants and flash config file constants */
643 enum {
644 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
645 
646 	/* flash command opcodes */
647 	SF_PROG_PAGE    = 2,          /* program page */
648 	SF_WR_DISABLE   = 4,          /* disable writes */
649 	SF_RD_STATUS    = 5,          /* read status register */
650 	SF_WR_ENABLE    = 6,          /* enable writes */
651 	SF_RD_DATA_FAST = 0xb,        /* read flash */
652 	SF_RD_ID        = 0x9f,       /* read ID */
653 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
654 };
655 
656 /**
657  *	sf1_read - read data from the serial flash
658  *	@adapter: the adapter
659  *	@byte_cnt: number of bytes to read
660  *	@cont: whether another operation will be chained
661  *	@lock: whether to lock SF for PL access only
662  *	@valp: where to store the read data
663  *
664  *	Reads up to 4 bytes of data from the serial flash.  The location of
665  *	the read needs to be specified prior to calling this by issuing the
666  *	appropriate commands to the serial flash.
667  */
668 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
669 		    int lock, u32 *valp)
670 {
671 	int ret;
672 
673 	if (!byte_cnt || byte_cnt > 4)
674 		return -EINVAL;
675 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
676 		return -EBUSY;
677 	t4_write_reg(adapter, A_SF_OP,
678 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
679 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
680 	if (!ret)
681 		*valp = t4_read_reg(adapter, A_SF_DATA);
682 	return ret;
683 }
684 
685 /**
686  *	sf1_write - write data to the serial flash
687  *	@adapter: the adapter
688  *	@byte_cnt: number of bytes to write
689  *	@cont: whether another operation will be chained
690  *	@lock: whether to lock SF for PL access only
691  *	@val: value to write
692  *
693  *	Writes up to 4 bytes of data to the serial flash.  The location of
694  *	the write needs to be specified prior to calling this by issuing the
695  *	appropriate commands to the serial flash.
696  */
697 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
698 		     int lock, u32 val)
699 {
700 	if (!byte_cnt || byte_cnt > 4)
701 		return -EINVAL;
702 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
703 		return -EBUSY;
704 	t4_write_reg(adapter, A_SF_DATA, val);
705 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
706 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
707 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
708 }
709 
710 /**
711  *	flash_wait_op - wait for a flash operation to complete
712  *	@adapter: the adapter
713  *	@attempts: max number of polls of the status register
714  *	@delay: delay between polls in ms
715  *
716  *	Wait for a flash operation to complete by polling the status register.
717  */
718 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
719 {
720 	int ret;
721 	u32 status;
722 
723 	while (1) {
724 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
725 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
726 			return ret;
727 		if (!(status & 1))
728 			return 0;
729 		if (--attempts == 0)
730 			return -EAGAIN;
731 		if (delay)
732 			msleep(delay);
733 	}
734 }
735 
736 /**
737  *	t4_read_flash - read words from serial flash
738  *	@adapter: the adapter
739  *	@addr: the start address for the read
740  *	@nwords: how many 32-bit words to read
741  *	@data: where to store the read data
742  *	@byte_oriented: whether to store data as bytes or as words
743  *
744  *	Read the specified number of 32-bit words from the serial flash.
745  *	If @byte_oriented is set the read data is stored as a byte array
746  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
747  *	natural endianess.
748  */
749 int t4_read_flash(struct adapter *adapter, unsigned int addr,
750 		  unsigned int nwords, u32 *data, int byte_oriented)
751 {
752 	int ret;
753 
754 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
755 		return -EINVAL;
756 
757 	addr = swab32(addr) | SF_RD_DATA_FAST;
758 
759 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
760 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
761 		return ret;
762 
763 	for ( ; nwords; nwords--, data++) {
764 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
765 		if (nwords == 1)
766 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
767 		if (ret)
768 			return ret;
769 		if (byte_oriented)
770 			*data = htonl(*data);
771 	}
772 	return 0;
773 }
774 
775 /**
776  *	t4_write_flash - write up to a page of data to the serial flash
777  *	@adapter: the adapter
778  *	@addr: the start address to write
779  *	@n: length of data to write in bytes
780  *	@data: the data to write
781  *	@byte_oriented: whether to store data as bytes or as words
782  *
783  *	Writes up to a page of data (256 bytes) to the serial flash starting
784  *	at the given address.  All the data must be written to the same page.
785  *	If @byte_oriented is set the write data is stored as byte stream
786  *	(i.e. matches what on disk), otherwise in big-endian.
787  */
788 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
789 			  unsigned int n, const u8 *data, int byte_oriented)
790 {
791 	int ret;
792 	u32 buf[SF_PAGE_SIZE / 4];
793 	unsigned int i, c, left, val, offset = addr & 0xff;
794 
795 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
796 		return -EINVAL;
797 
798 	val = swab32(addr) | SF_PROG_PAGE;
799 
800 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
801 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
802 		goto unlock;
803 
804 	for (left = n; left; left -= c) {
805 		c = min(left, 4U);
806 		for (val = 0, i = 0; i < c; ++i)
807 			val = (val << 8) + *data++;
808 
809 		if (!byte_oriented)
810 			val = htonl(val);
811 
812 		ret = sf1_write(adapter, c, c != left, 1, val);
813 		if (ret)
814 			goto unlock;
815 	}
816 	ret = flash_wait_op(adapter, 8, 1);
817 	if (ret)
818 		goto unlock;
819 
820 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
821 
822 	/* Read the page to verify the write succeeded */
823 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
824 			    byte_oriented);
825 	if (ret)
826 		return ret;
827 
828 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
829 		CH_ERR(adapter, "failed to correctly write the flash page "
830 		       "at %#x\n", addr);
831 		return -EIO;
832 	}
833 	return 0;
834 
835 unlock:
836 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
837 	return ret;
838 }
839 
840 /**
841  *	t4_get_fw_version - read the firmware version
842  *	@adapter: the adapter
843  *	@vers: where to place the version
844  *
845  *	Reads the FW version from flash.
846  */
847 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
848 {
849 	return t4_read_flash(adapter,
850 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
851 			     vers, 0);
852 }
853 
854 /**
855  *	t4_get_tp_version - read the TP microcode version
856  *	@adapter: the adapter
857  *	@vers: where to place the version
858  *
859  *	Reads the TP microcode version from flash.
860  */
861 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
862 {
863 	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
864 							      tp_microcode_ver),
865 			     1, vers, 0);
866 }
867 
868 /**
869  *	t4_check_fw_version - check if the FW is compatible with this driver
870  *	@adapter: the adapter
871  *
872  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
873  *	if there's exact match, a negative error if the version could not be
874  *	read or there's a major version mismatch, and a positive value if the
875  *	expected major version is found but there's a minor version mismatch.
876  */
877 int t4_check_fw_version(struct adapter *adapter)
878 {
879 	int ret, major, minor, micro;
880 
881 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
882 	if (!ret)
883 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
884 	if (ret)
885 		return ret;
886 
887 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
888 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
889 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
890 
891 	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
892 		CH_ERR(adapter, "card FW has major version %u, driver wants "
893 		       "%u\n", major, FW_VERSION_MAJOR);
894 		return -EINVAL;
895 	}
896 
897 	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
898 		return 0;                                   /* perfect match */
899 
900 	/* Minor/micro version mismatch.  Report it but often it's OK. */
901 	return 1;
902 }
903 
904 /**
905  *	t4_flash_erase_sectors - erase a range of flash sectors
906  *	@adapter: the adapter
907  *	@start: the first sector to erase
908  *	@end: the last sector to erase
909  *
910  *	Erases the sectors in the given inclusive range.
911  */
912 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
913 {
914 	int ret = 0;
915 
916 	while (start <= end) {
917 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
918 		    (ret = sf1_write(adapter, 4, 0, 1,
919 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
920 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
921 			CH_ERR(adapter, "erase of flash sector %d failed, "
922 			       "error %d\n", start, ret);
923 			break;
924 		}
925 		start++;
926 	}
927 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
928 	return ret;
929 }
930 
931 /**
932  *	t4_flash_cfg_addr - return the address of the flash configuration file
933  *	@adapter: the adapter
934  *
935  *	Return the address within the flash where the Firmware Configuration
936  *	File is stored.
937  */
938 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
939 {
940 	if (adapter->params.sf_size == 0x100000)
941 		return FLASH_FPGA_CFG_START;
942 	else
943 		return FLASH_CFG_START;
944 }
945 
946 /**
947  *	t4_load_cfg - download config file
948  *	@adap: the adapter
949  *	@cfg_data: the cfg text file to write
950  *	@size: text file size
951  *
952  *	Write the supplied config text file to the card's serial flash.
953  */
954 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
955 {
956 	int ret, i, n;
957 	unsigned int addr;
958 	unsigned int flash_cfg_start_sec;
959 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
960 
961 	addr = t4_flash_cfg_addr(adap);
962 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
963 
964 	if (size > FLASH_CFG_MAX_SIZE) {
965 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
966 		       FLASH_CFG_MAX_SIZE);
967 		return -EFBIG;
968 	}
969 
970 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
971 			 sf_sec_size);
972 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
973 				     flash_cfg_start_sec + i - 1);
974 	/*
975 	 * If size == 0 then we're simply erasing the FLASH sectors associated
976 	 * with the on-adapter Firmware Configuration File.
977 	 */
978 	if (ret || size == 0)
979 		goto out;
980 
981 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
982 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
983 		if ( (size - i) <  SF_PAGE_SIZE)
984 			n = size - i;
985 		else
986 			n = SF_PAGE_SIZE;
987 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
988 		if (ret)
989 			goto out;
990 
991 		addr += SF_PAGE_SIZE;
992 		cfg_data += SF_PAGE_SIZE;
993 	}
994 
995 out:
996 	if (ret)
997 		CH_ERR(adap, "config file %s failed %d\n",
998 		       (size == 0 ? "clear" : "download"), ret);
999 	return ret;
1000 }
1001 
1002 
1003 /**
1004  *	t4_load_fw - download firmware
1005  *	@adap: the adapter
1006  *	@fw_data: the firmware image to write
1007  *	@size: image size
1008  *
1009  *	Write the supplied firmware image to the card's serial flash.
1010  */
1011 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1012 {
1013 	u32 csum;
1014 	int ret, addr;
1015 	unsigned int i;
1016 	u8 first_page[SF_PAGE_SIZE];
1017 	const u32 *p = (const u32 *)fw_data;
1018 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1019 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1020 
1021 	if (!size) {
1022 		CH_ERR(adap, "FW image has no data\n");
1023 		return -EINVAL;
1024 	}
1025 	if (size & 511) {
1026 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1027 		return -EINVAL;
1028 	}
1029 	if (ntohs(hdr->len512) * 512 != size) {
1030 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1031 		return -EINVAL;
1032 	}
1033 	if (size > FLASH_FW_MAX_SIZE) {
1034 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1035 		       FLASH_FW_MAX_SIZE);
1036 		return -EFBIG;
1037 	}
1038 
1039 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1040 		csum += ntohl(p[i]);
1041 
1042 	if (csum != 0xffffffff) {
1043 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1044 		       csum);
1045 		return -EINVAL;
1046 	}
1047 
1048 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1049 	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1050 	    FLASH_FW_START_SEC + i - 1);
1051 	if (ret)
1052 		goto out;
1053 
1054 	/*
1055 	 * We write the correct version at the end so the driver can see a bad
1056 	 * version if the FW write fails.  Start by writing a copy of the
1057 	 * first page with a bad version.
1058 	 */
1059 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1060 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1061 	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1062 	if (ret)
1063 		goto out;
1064 
1065 	addr = FLASH_FW_START;
1066 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1067 		addr += SF_PAGE_SIZE;
1068 		fw_data += SF_PAGE_SIZE;
1069 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1070 		if (ret)
1071 			goto out;
1072 	}
1073 
1074 	ret = t4_write_flash(adap,
1075 			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1076 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1077 out:
1078 	if (ret)
1079 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1080 	return ret;
1081 }
1082 
1083 /* BIOS boot headers */
1084 typedef struct pci_expansion_rom_header {
1085 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1086 	u8	reserved[22]; /* Reserved per processor Architecture data */
1087 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1088 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1089 
1090 /* Legacy PCI Expansion ROM Header */
1091 typedef struct legacy_pci_expansion_rom_header {
1092 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1093 	u8	size512; /* Current Image Size in units of 512 bytes */
1094 	u8	initentry_point[4];
1095 	u8	cksum; /* Checksum computed on the entire Image */
1096 	u8	reserved[16]; /* Reserved */
1097 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1098 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1099 
1100 /* EFI PCI Expansion ROM Header */
1101 typedef struct efi_pci_expansion_rom_header {
1102 	u8	signature[2]; // ROM signature. The value 0xaa55
1103 	u8	initialization_size[2]; /* Units 512. Includes this header */
1104 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1105 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1106 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1107 	u8	compression_type[2]; /* Compression type. */
1108 		/*
1109 		 * Compression type definition
1110 		 * 0x0: uncompressed
1111 		 * 0x1: Compressed
1112 		 * 0x2-0xFFFF: Reserved
1113 		 */
1114 	u8	reserved[8]; /* Reserved */
1115 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1116 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1117 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1118 
1119 /* PCI Data Structure Format */
1120 typedef struct pcir_data_structure { /* PCI Data Structure */
1121 	u8	signature[4]; /* Signature. The string "PCIR" */
1122 	u8	vendor_id[2]; /* Vendor Identification */
1123 	u8	device_id[2]; /* Device Identification */
1124 	u8	vital_product[2]; /* Pointer to Vital Product Data */
1125 	u8	length[2]; /* PCIR Data Structure Length */
1126 	u8	revision; /* PCIR Data Structure Revision */
1127 	u8	class_code[3]; /* Class Code */
1128 	u8	image_length[2]; /* Image Length. Multiple of 512B */
1129 	u8	code_revision[2]; /* Revision Level of Code/Data */
1130 	u8	code_type; /* Code Type. */
1131 		/*
1132 		 * PCI Expansion ROM Code Types
1133 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1134 		 * 0x01: Open Firmware standard for PCI. FCODE
1135 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1136 		 * 0x03: EFI Image. EFI
1137 		 * 0x04-0xFF: Reserved.
1138 		 */
1139 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1140 	u8	reserved[2]; /* Reserved */
1141 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1142 
1143 /* BOOT constants */
1144 enum {
1145 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1146 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1147 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1148 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1149 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1150 	VENDOR_ID = 0x1425, /* Vendor ID */
1151 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1152 };
1153 
1154 /*
1155  *	modify_device_id - Modifies the device ID of the Boot BIOS image
1156  *	@adatper: the device ID to write.
1157  *	@boot_data: the boot image to modify.
1158  *
1159  *	Write the supplied device ID to the boot BIOS image.
1160  */
1161 static void modify_device_id(int device_id, u8 *boot_data)
1162 {
1163 	legacy_pci_exp_rom_header_t *header;
1164 	pcir_data_t *pcir_header;
1165 	u32 cur_header = 0;
1166 
1167 	/*
1168 	 * Loop through all chained images and change the device ID's
1169 	 */
1170 	while (1) {
1171 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1172 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1173 		    le16_to_cpu(*(u16*)header->pcir_offset)];
1174 
1175 		/*
1176 		 * Only modify the Device ID if code type is Legacy or HP.
1177 		 * 0x00: Okay to modify
1178 		 * 0x01: FCODE. Do not be modify
1179 		 * 0x03: Okay to modify
1180 		 * 0x04-0xFF: Do not modify
1181 		 */
1182 		if (pcir_header->code_type == 0x00) {
1183 			u8 csum = 0;
1184 			int i;
1185 
1186 			/*
1187 			 * Modify Device ID to match current adatper
1188 			 */
1189 			*(u16*) pcir_header->device_id = device_id;
1190 
1191 			/*
1192 			 * Set checksum temporarily to 0.
1193 			 * We will recalculate it later.
1194 			 */
1195 			header->cksum = 0x0;
1196 
1197 			/*
1198 			 * Calculate and update checksum
1199 			 */
1200 			for (i = 0; i < (header->size512 * 512); i++)
1201 				csum += (u8)boot_data[cur_header + i];
1202 
1203 			/*
1204 			 * Invert summed value to create the checksum
1205 			 * Writing new checksum value directly to the boot data
1206 			 */
1207 			boot_data[cur_header + 7] = -csum;
1208 
1209 		} else if (pcir_header->code_type == 0x03) {
1210 
1211 			/*
1212 			 * Modify Device ID to match current adatper
1213 			 */
1214 			*(u16*) pcir_header->device_id = device_id;
1215 
1216 		}
1217 
1218 
1219 		/*
1220 		 * Check indicator element to identify if this is the last
1221 		 * image in the ROM.
1222 		 */
1223 		if (pcir_header->indicator & 0x80)
1224 			break;
1225 
1226 		/*
1227 		 * Move header pointer up to the next image in the ROM.
1228 		 */
1229 		cur_header += header->size512 * 512;
1230 	}
1231 }
1232 
1233 /*
1234  *	t4_load_boot - download boot flash
1235  *	@adapter: the adapter
1236  *	@boot_data: the boot image to write
1237  *	@boot_addr: offset in flash to write boot_data
1238  *	@size: image size
1239  *
1240  *	Write the supplied boot image to the card's serial flash.
1241  *	The boot image has the following sections: a 28-byte header and the
1242  *	boot image.
1243  */
1244 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1245 		 unsigned int boot_addr, unsigned int size)
1246 {
1247 	pci_exp_rom_header_t *header;
1248 	int pcir_offset ;
1249 	pcir_data_t *pcir_header;
1250 	int ret, addr;
1251 	uint16_t device_id;
1252 	unsigned int i;
1253 	unsigned int boot_sector = boot_addr * 1024;
1254 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1255 
1256 	/*
1257 	 * Make sure the boot image does not encroach on the firmware region
1258 	 */
1259 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1260 		CH_ERR(adap, "boot image encroaching on firmware region\n");
1261 		return -EFBIG;
1262 	}
1263 
1264 	/*
1265 	 * Number of sectors spanned
1266 	 */
1267 	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1268 			sf_sec_size);
1269 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1270 				     (boot_sector >> 16) + i - 1);
1271 
1272 	/*
1273 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1274 	 * with the on-adapter option ROM file
1275 	 */
1276 	if (ret || (size == 0))
1277 		goto out;
1278 
1279 	/* Get boot header */
1280 	header = (pci_exp_rom_header_t *)boot_data;
1281 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1282 	/* PCIR Data Structure */
1283 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1284 
1285 	/*
1286 	 * Perform some primitive sanity testing to avoid accidentally
1287 	 * writing garbage over the boot sectors.  We ought to check for
1288 	 * more but it's not worth it for now ...
1289 	 */
1290 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1291 		CH_ERR(adap, "boot image too small/large\n");
1292 		return -EFBIG;
1293 	}
1294 
1295 	/*
1296 	 * Check BOOT ROM header signature
1297 	 */
1298 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1299 		CH_ERR(adap, "Boot image missing signature\n");
1300 		return -EINVAL;
1301 	}
1302 
1303 	/*
1304 	 * Check PCI header signature
1305 	 */
1306 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1307 		CH_ERR(adap, "PCI header missing signature\n");
1308 		return -EINVAL;
1309 	}
1310 
1311 	/*
1312 	 * Check Vendor ID matches Chelsio ID
1313 	 */
1314 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1315 		CH_ERR(adap, "Vendor ID missing signature\n");
1316 		return -EINVAL;
1317 	}
1318 
1319 	/*
1320 	 * Retrieve adapter's device ID
1321 	 */
1322 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1323 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1324 	device_id = (device_id & 0xff) | 0x4000;
1325 
1326 	/*
1327 	 * Check PCIE Device ID
1328 	 */
1329 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1330 		/*
1331 		 * Change the device ID in the Boot BIOS image to match
1332 		 * the Device ID of the current adapter.
1333 		 */
1334 		modify_device_id(device_id, boot_data);
1335 	}
1336 
1337 	/*
1338 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1339 	 * we finish copying the rest of the boot image. This will ensure
1340 	 * that the BIOS boot header will only be written if the boot image
1341 	 * was written in full.
1342 	 */
1343 	addr = boot_sector;
1344 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1345 		addr += SF_PAGE_SIZE;
1346 		boot_data += SF_PAGE_SIZE;
1347 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1348 		if (ret)
1349 			goto out;
1350 	}
1351 
1352 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1353 
1354 out:
1355 	if (ret)
1356 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1357 	return ret;
1358 }
1359 
1360 /**
1361  *	t4_read_cimq_cfg - read CIM queue configuration
1362  *	@adap: the adapter
1363  *	@base: holds the queue base addresses in bytes
1364  *	@size: holds the queue sizes in bytes
1365  *	@thres: holds the queue full thresholds in bytes
1366  *
1367  *	Returns the current configuration of the CIM queues, starting with
1368  *	the IBQs, then the OBQs.
1369  */
1370 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1371 {
1372 	unsigned int i, v;
1373 
1374 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1375 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1376 			     V_QUENUMSELECT(i));
1377 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1378 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1379 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1380 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1381 	}
1382 	for (i = 0; i < CIM_NUM_OBQ; i++) {
1383 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1384 			     V_QUENUMSELECT(i));
1385 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1386 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1387 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1388 	}
1389 }
1390 
1391 /**
1392  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1393  *	@adap: the adapter
1394  *	@qid: the queue index
1395  *	@data: where to store the queue contents
1396  *	@n: capacity of @data in 32-bit words
1397  *
1398  *	Reads the contents of the selected CIM queue starting at address 0 up
1399  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1400  *	error and the number of 32-bit words actually read on success.
1401  */
1402 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1403 {
1404 	int i, err;
1405 	unsigned int addr;
1406 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1407 
1408 	if (qid > 5 || (n & 3))
1409 		return -EINVAL;
1410 
1411 	addr = qid * nwords;
1412 	if (n > nwords)
1413 		n = nwords;
1414 
1415 	for (i = 0; i < n; i++, addr++) {
1416 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1417 			     F_IBQDBGEN);
1418 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1419 				      2, 1);
1420 		if (err)
1421 			return err;
1422 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1423 	}
1424 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1425 	return i;
1426 }
1427 
1428 /**
1429  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1430  *	@adap: the adapter
1431  *	@qid: the queue index
1432  *	@data: where to store the queue contents
1433  *	@n: capacity of @data in 32-bit words
1434  *
1435  *	Reads the contents of the selected CIM queue starting at address 0 up
1436  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1437  *	error and the number of 32-bit words actually read on success.
1438  */
1439 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1440 {
1441 	int i, err;
1442 	unsigned int addr, v, nwords;
1443 
1444 	if (qid > 5 || (n & 3))
1445 		return -EINVAL;
1446 
1447 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1448 		     V_QUENUMSELECT(qid));
1449 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1450 
1451 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1452 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1453 	if (n > nwords)
1454 		n = nwords;
1455 
1456 	for (i = 0; i < n; i++, addr++) {
1457 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1458 			     F_OBQDBGEN);
1459 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1460 				      2, 1);
1461 		if (err)
1462 			return err;
1463 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1464 	}
1465 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1466 	return i;
1467 }
1468 
1469 enum {
1470 	CIM_QCTL_BASE     = 0,
1471 	CIM_CTL_BASE      = 0x2000,
1472 	CIM_PBT_ADDR_BASE = 0x2800,
1473 	CIM_PBT_LRF_BASE  = 0x3000,
1474 	CIM_PBT_DATA_BASE = 0x3800
1475 };
1476 
1477 /**
1478  *	t4_cim_read - read a block from CIM internal address space
1479  *	@adap: the adapter
1480  *	@addr: the start address within the CIM address space
1481  *	@n: number of words to read
1482  *	@valp: where to store the result
1483  *
1484  *	Reads a block of 4-byte words from the CIM intenal address space.
1485  */
1486 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1487 		unsigned int *valp)
1488 {
1489 	int ret = 0;
1490 
1491 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1492 		return -EBUSY;
1493 
1494 	for ( ; !ret && n--; addr += 4) {
1495 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1496 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1497 				      0, 5, 2);
1498 		if (!ret)
1499 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1500 	}
1501 	return ret;
1502 }
1503 
1504 /**
1505  *	t4_cim_write - write a block into CIM internal address space
1506  *	@adap: the adapter
1507  *	@addr: the start address within the CIM address space
1508  *	@n: number of words to write
1509  *	@valp: set of values to write
1510  *
1511  *	Writes a block of 4-byte words into the CIM intenal address space.
1512  */
1513 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1514 		 const unsigned int *valp)
1515 {
1516 	int ret = 0;
1517 
1518 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1519 		return -EBUSY;
1520 
1521 	for ( ; !ret && n--; addr += 4) {
1522 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1523 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1524 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1525 				      0, 5, 2);
1526 	}
1527 	return ret;
1528 }
1529 
1530 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1531 {
1532 	return t4_cim_write(adap, addr, 1, &val);
1533 }
1534 
1535 /**
1536  *	t4_cim_ctl_read - read a block from CIM control region
1537  *	@adap: the adapter
1538  *	@addr: the start address within the CIM control region
1539  *	@n: number of words to read
1540  *	@valp: where to store the result
1541  *
1542  *	Reads a block of 4-byte words from the CIM control region.
1543  */
1544 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1545 		    unsigned int *valp)
1546 {
1547 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1548 }
1549 
1550 /**
1551  *	t4_cim_read_la - read CIM LA capture buffer
1552  *	@adap: the adapter
1553  *	@la_buf: where to store the LA data
1554  *	@wrptr: the HW write pointer within the capture buffer
1555  *
1556  *	Reads the contents of the CIM LA buffer with the most recent entry at
1557  *	the end	of the returned data and with the entry at @wrptr first.
1558  *	We try to leave the LA in the running state we find it in.
1559  */
1560 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1561 {
1562 	int i, ret;
1563 	unsigned int cfg, val, idx;
1564 
1565 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1566 	if (ret)
1567 		return ret;
1568 
1569 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1570 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1571 		if (ret)
1572 			return ret;
1573 	}
1574 
1575 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1576 	if (ret)
1577 		goto restart;
1578 
1579 	idx = G_UPDBGLAWRPTR(val);
1580 	if (wrptr)
1581 		*wrptr = idx;
1582 
1583 	for (i = 0; i < adap->params.cim_la_size; i++) {
1584 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1585 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1586 		if (ret)
1587 			break;
1588 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1589 		if (ret)
1590 			break;
1591 		if (val & F_UPDBGLARDEN) {
1592 			ret = -ETIMEDOUT;
1593 			break;
1594 		}
1595 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1596 		if (ret)
1597 			break;
1598 		idx = (idx + 1) & M_UPDBGLARDPTR;
1599 	}
1600 restart:
1601 	if (cfg & F_UPDBGLAEN) {
1602 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1603 				      cfg & ~F_UPDBGLARDEN);
1604 		if (!ret)
1605 			ret = r;
1606 	}
1607 	return ret;
1608 }
1609 
1610 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1611 			unsigned int *pif_req_wrptr,
1612 			unsigned int *pif_rsp_wrptr)
1613 {
1614 	int i, j;
1615 	u32 cfg, val, req, rsp;
1616 
1617 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1618 	if (cfg & F_LADBGEN)
1619 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1620 
1621 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1622 	req = G_POLADBGWRPTR(val);
1623 	rsp = G_PILADBGWRPTR(val);
1624 	if (pif_req_wrptr)
1625 		*pif_req_wrptr = req;
1626 	if (pif_rsp_wrptr)
1627 		*pif_rsp_wrptr = rsp;
1628 
1629 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1630 		for (j = 0; j < 6; j++) {
1631 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1632 				     V_PILADBGRDPTR(rsp));
1633 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1634 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1635 			req++;
1636 			rsp++;
1637 		}
1638 		req = (req + 2) & M_POLADBGRDPTR;
1639 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1640 	}
1641 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1642 }
1643 
1644 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1645 {
1646 	u32 cfg;
1647 	int i, j, idx;
1648 
1649 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1650 	if (cfg & F_LADBGEN)
1651 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1652 
1653 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1654 		for (j = 0; j < 5; j++) {
1655 			idx = 8 * i + j;
1656 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1657 				     V_PILADBGRDPTR(idx));
1658 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1659 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1660 		}
1661 	}
1662 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1663 }
1664 
1665 /**
1666  *	t4_tp_read_la - read TP LA capture buffer
1667  *	@adap: the adapter
1668  *	@la_buf: where to store the LA data
1669  *	@wrptr: the HW write pointer within the capture buffer
1670  *
1671  *	Reads the contents of the TP LA buffer with the most recent entry at
1672  *	the end	of the returned data and with the entry at @wrptr first.
1673  *	We leave the LA in the running state we find it in.
1674  */
1675 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1676 {
1677 	bool last_incomplete;
1678 	unsigned int i, cfg, val, idx;
1679 
1680 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1681 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1682 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1683 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1684 
1685 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1686 	idx = G_DBGLAWPTR(val);
1687 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1688 	if (last_incomplete)
1689 		idx = (idx + 1) & M_DBGLARPTR;
1690 	if (wrptr)
1691 		*wrptr = idx;
1692 
1693 	val &= 0xffff;
1694 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1695 	val |= adap->params.tp.la_mask;
1696 
1697 	for (i = 0; i < TPLA_SIZE; i++) {
1698 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1699 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1700 		idx = (idx + 1) & M_DBGLARPTR;
1701 	}
1702 
1703 	/* Wipe out last entry if it isn't valid */
1704 	if (last_incomplete)
1705 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1706 
1707 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1708 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1709 			     cfg | adap->params.tp.la_mask);
1710 }
1711 
1712 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1713 {
1714 	unsigned int i, j;
1715 
1716 	for (i = 0; i < 8; i++) {
1717 		u32 *p = la_buf + i;
1718 
1719 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1720 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1721 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1722 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1723 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1724 	}
1725 }
1726 
1727 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1728 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1729 
1730 /**
1731  *	t4_link_start - apply link configuration to MAC/PHY
1732  *	@phy: the PHY to setup
1733  *	@mac: the MAC to setup
1734  *	@lc: the requested link configuration
1735  *
1736  *	Set up a port's MAC and PHY according to a desired link configuration.
1737  *	- If the PHY can auto-negotiate first decide what to advertise, then
1738  *	  enable/disable auto-negotiation as desired, and reset.
1739  *	- If the PHY does not auto-negotiate just reset it.
1740  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1741  *	  otherwise do it later based on the outcome of auto-negotiation.
1742  */
1743 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1744 		  struct link_config *lc)
1745 {
1746 	struct fw_port_cmd c;
1747 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1748 
1749 	lc->link_ok = 0;
1750 	if (lc->requested_fc & PAUSE_RX)
1751 		fc |= FW_PORT_CAP_FC_RX;
1752 	if (lc->requested_fc & PAUSE_TX)
1753 		fc |= FW_PORT_CAP_FC_TX;
1754 
1755 	memset(&c, 0, sizeof(c));
1756 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1757 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1758 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1759 				  FW_LEN16(c));
1760 
1761 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1762 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1763 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1764 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1765 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1766 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1767 	} else
1768 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1769 
1770 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1771 }
1772 
1773 /**
1774  *	t4_restart_aneg - restart autonegotiation
1775  *	@adap: the adapter
1776  *	@mbox: mbox to use for the FW command
1777  *	@port: the port id
1778  *
1779  *	Restarts autonegotiation for the selected port.
1780  */
1781 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1782 {
1783 	struct fw_port_cmd c;
1784 
1785 	memset(&c, 0, sizeof(c));
1786 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1787 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1788 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1789 				  FW_LEN16(c));
1790 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1791 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1792 }
1793 
1794 struct intr_info {
1795 	unsigned int mask;       /* bits to check in interrupt status */
1796 	const char *msg;         /* message to print or NULL */
1797 	short stat_idx;          /* stat counter to increment or -1 */
1798 	unsigned short fatal;    /* whether the condition reported is fatal */
1799 };
1800 
1801 /**
1802  *	t4_handle_intr_status - table driven interrupt handler
1803  *	@adapter: the adapter that generated the interrupt
1804  *	@reg: the interrupt status register to process
1805  *	@acts: table of interrupt actions
1806  *
1807  *	A table driven interrupt handler that applies a set of masks to an
1808  *	interrupt status word and performs the corresponding actions if the
1809  *	interrupts described by the mask have occured.  The actions include
1810  *	optionally emitting a warning or alert message.  The table is terminated
1811  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1812  *	conditions.
1813  */
1814 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1815 				 const struct intr_info *acts)
1816 {
1817 	int fatal = 0;
1818 	unsigned int mask = 0;
1819 	unsigned int status = t4_read_reg(adapter, reg);
1820 
1821 	for ( ; acts->mask; ++acts) {
1822 		if (!(status & acts->mask))
1823 			continue;
1824 		if (acts->fatal) {
1825 			fatal++;
1826 			CH_ALERT(adapter, "%s (0x%x)\n",
1827 				 acts->msg, status & acts->mask);
1828 		} else if (acts->msg)
1829 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1830 					  acts->msg, status & acts->mask);
1831 		mask |= acts->mask;
1832 	}
1833 	status &= mask;
1834 	if (status)                           /* clear processed interrupts */
1835 		t4_write_reg(adapter, reg, status);
1836 	return fatal;
1837 }
1838 
1839 /*
1840  * Interrupt handler for the PCIE module.
1841  */
1842 static void pcie_intr_handler(struct adapter *adapter)
1843 {
1844 	static struct intr_info sysbus_intr_info[] = {
1845 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1846 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1847 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1848 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1849 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1850 		{ 0 }
1851 	};
1852 	static struct intr_info pcie_port_intr_info[] = {
1853 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1854 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1855 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1856 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1857 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1858 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1859 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1860 		{ F_RDPE, "Rx data parity error", -1, 1 },
1861 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1862 		{ 0 }
1863 	};
1864 	static struct intr_info pcie_intr_info[] = {
1865 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1866 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1867 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1868 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1869 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1870 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1871 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1872 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1873 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1874 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1875 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1876 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1877 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1878 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1879 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1880 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1881 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1882 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1883 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1884 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1885 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1886 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1887 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1888 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1889 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1890 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1891 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1892 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
1893 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
1894 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1895 		  0 },
1896 		{ 0 }
1897 	};
1898 
1899 	int fat;
1900 
1901 	fat = t4_handle_intr_status(adapter,
1902 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1903 				    sysbus_intr_info) +
1904 	      t4_handle_intr_status(adapter,
1905 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1906 				    pcie_port_intr_info) +
1907 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1908 	if (fat)
1909 		t4_fatal_err(adapter);
1910 }
1911 
1912 /*
1913  * TP interrupt handler.
1914  */
1915 static void tp_intr_handler(struct adapter *adapter)
1916 {
1917 	static struct intr_info tp_intr_info[] = {
1918 		{ 0x3fffffff, "TP parity error", -1, 1 },
1919 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1920 		{ 0 }
1921 	};
1922 
1923 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1924 		t4_fatal_err(adapter);
1925 }
1926 
1927 /*
1928  * SGE interrupt handler.
1929  */
1930 static void sge_intr_handler(struct adapter *adapter)
1931 {
1932 	u64 v;
1933 	u32 err;
1934 
1935 	static struct intr_info sge_intr_info[] = {
1936 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
1937 		  "SGE received CPL exceeding IQE size", -1, 1 },
1938 		{ F_ERR_INVALID_CIDX_INC,
1939 		  "SGE GTS CIDX increment too large", -1, 0 },
1940 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1941 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1942 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1943 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1944 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1945 		  0 },
1946 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1947 		  0 },
1948 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1949 		  0 },
1950 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1951 		  0 },
1952 		{ F_ERR_ING_CTXT_PRIO,
1953 		  "SGE too many priority ingress contexts", -1, 0 },
1954 		{ F_ERR_EGR_CTXT_PRIO,
1955 		  "SGE too many priority egress contexts", -1, 0 },
1956 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1957 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1958 		{ 0 }
1959 	};
1960 
1961 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1962 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1963 	if (v) {
1964 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
1965 			 (unsigned long long)v);
1966 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1967 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1968 	}
1969 
1970 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1971 
1972 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1973 	if (err & F_ERROR_QID_VALID) {
1974 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1975 		if (err & F_UNCAPTURED_ERROR)
1976 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
1977 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
1978 			     F_UNCAPTURED_ERROR);
1979 	}
1980 
1981 	if (v != 0)
1982 		t4_fatal_err(adapter);
1983 }
1984 
1985 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1986 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1987 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1988 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1989 
1990 /*
1991  * CIM interrupt handler.
1992  */
1993 static void cim_intr_handler(struct adapter *adapter)
1994 {
1995 	static struct intr_info cim_intr_info[] = {
1996 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1997 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1998 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1999 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2000 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2001 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2002 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2003 		{ 0 }
2004 	};
2005 	static struct intr_info cim_upintr_info[] = {
2006 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2007 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2008 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2009 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2010 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2011 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2012 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2013 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2014 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2015 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2016 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2017 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2018 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2019 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2020 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2021 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2022 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2023 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2024 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2025 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2026 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2027 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2028 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2029 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2030 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2031 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2032 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2033 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2034 		{ 0 }
2035 	};
2036 
2037 	int fat;
2038 
2039 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2040 				    cim_intr_info) +
2041 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2042 				    cim_upintr_info);
2043 	if (fat)
2044 		t4_fatal_err(adapter);
2045 }
2046 
2047 /*
2048  * ULP RX interrupt handler.
2049  */
2050 static void ulprx_intr_handler(struct adapter *adapter)
2051 {
2052 	static struct intr_info ulprx_intr_info[] = {
2053 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2054 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2055 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2056 		{ 0 }
2057 	};
2058 
2059 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2060 		t4_fatal_err(adapter);
2061 }
2062 
2063 /*
2064  * ULP TX interrupt handler.
2065  */
2066 static void ulptx_intr_handler(struct adapter *adapter)
2067 {
2068 	static struct intr_info ulptx_intr_info[] = {
2069 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2070 		  0 },
2071 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2072 		  0 },
2073 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2074 		  0 },
2075 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2076 		  0 },
2077 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2078 		{ 0 }
2079 	};
2080 
2081 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2082 		t4_fatal_err(adapter);
2083 }
2084 
2085 /*
2086  * PM TX interrupt handler.
2087  */
2088 static void pmtx_intr_handler(struct adapter *adapter)
2089 {
2090 	static struct intr_info pmtx_intr_info[] = {
2091 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2092 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2093 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2094 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2095 		{ 0xffffff0, "PMTX framing error", -1, 1 },
2096 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2097 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2098 		  1 },
2099 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2100 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2101 		{ 0 }
2102 	};
2103 
2104 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2105 		t4_fatal_err(adapter);
2106 }
2107 
2108 /*
2109  * PM RX interrupt handler.
2110  */
2111 static void pmrx_intr_handler(struct adapter *adapter)
2112 {
2113 	static struct intr_info pmrx_intr_info[] = {
2114 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2115 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2116 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2117 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2118 		  1 },
2119 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2120 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2121 		{ 0 }
2122 	};
2123 
2124 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2125 		t4_fatal_err(adapter);
2126 }
2127 
2128 /*
2129  * CPL switch interrupt handler.
2130  */
2131 static void cplsw_intr_handler(struct adapter *adapter)
2132 {
2133 	static struct intr_info cplsw_intr_info[] = {
2134 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2135 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2136 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2137 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2138 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2139 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2140 		{ 0 }
2141 	};
2142 
2143 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2144 		t4_fatal_err(adapter);
2145 }
2146 
2147 /*
2148  * LE interrupt handler.
2149  */
2150 static void le_intr_handler(struct adapter *adap)
2151 {
2152 	static struct intr_info le_intr_info[] = {
2153 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2154 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2155 		{ F_PARITYERR, "LE parity error", -1, 1 },
2156 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2157 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2158 		{ 0 }
2159 	};
2160 
2161 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2162 		t4_fatal_err(adap);
2163 }
2164 
2165 /*
2166  * MPS interrupt handler.
2167  */
2168 static void mps_intr_handler(struct adapter *adapter)
2169 {
2170 	static struct intr_info mps_rx_intr_info[] = {
2171 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2172 		{ 0 }
2173 	};
2174 	static struct intr_info mps_tx_intr_info[] = {
2175 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2176 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2177 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2178 		  -1, 1 },
2179 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2180 		  -1, 1 },
2181 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2182 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2183 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2184 		{ 0 }
2185 	};
2186 	static struct intr_info mps_trc_intr_info[] = {
2187 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2188 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2189 		  1 },
2190 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2191 		{ 0 }
2192 	};
2193 	static struct intr_info mps_stat_sram_intr_info[] = {
2194 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2195 		{ 0 }
2196 	};
2197 	static struct intr_info mps_stat_tx_intr_info[] = {
2198 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2199 		{ 0 }
2200 	};
2201 	static struct intr_info mps_stat_rx_intr_info[] = {
2202 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2203 		{ 0 }
2204 	};
2205 	static struct intr_info mps_cls_intr_info[] = {
2206 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2207 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2208 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2209 		{ 0 }
2210 	};
2211 
2212 	int fat;
2213 
2214 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2215 				    mps_rx_intr_info) +
2216 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2217 				    mps_tx_intr_info) +
2218 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2219 				    mps_trc_intr_info) +
2220 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2221 				    mps_stat_sram_intr_info) +
2222 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2223 				    mps_stat_tx_intr_info) +
2224 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2225 				    mps_stat_rx_intr_info) +
2226 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2227 				    mps_cls_intr_info);
2228 
2229 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2230 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2231 	if (fat)
2232 		t4_fatal_err(adapter);
2233 }
2234 
2235 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2236 
2237 /*
2238  * EDC/MC interrupt handler.
2239  */
2240 static void mem_intr_handler(struct adapter *adapter, int idx)
2241 {
2242 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2243 
2244 	unsigned int addr, cnt_addr, v;
2245 
2246 	if (idx <= MEM_EDC1) {
2247 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2248 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2249 	} else {
2250 		addr = A_MC_INT_CAUSE;
2251 		cnt_addr = A_MC_ECC_STATUS;
2252 	}
2253 
2254 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2255 	if (v & F_PERR_INT_CAUSE)
2256 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2257 	if (v & F_ECC_CE_INT_CAUSE) {
2258 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2259 
2260 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2261 		CH_WARN_RATELIMIT(adapter,
2262 				  "%u %s correctable ECC data error%s\n",
2263 				  cnt, name[idx], cnt > 1 ? "s" : "");
2264 	}
2265 	if (v & F_ECC_UE_INT_CAUSE)
2266 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2267 			 name[idx]);
2268 
2269 	t4_write_reg(adapter, addr, v);
2270 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2271 		t4_fatal_err(adapter);
2272 }
2273 
2274 /*
2275  * MA interrupt handler.
2276  */
2277 static void ma_intr_handler(struct adapter *adapter)
2278 {
2279 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2280 
2281 	if (status & F_MEM_PERR_INT_CAUSE)
2282 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2283 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2284 	if (status & F_MEM_WRAP_INT_CAUSE) {
2285 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2286 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2287 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2288 			 G_MEM_WRAP_ADDRESS(v) << 4);
2289 	}
2290 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2291 	t4_fatal_err(adapter);
2292 }
2293 
2294 /*
2295  * SMB interrupt handler.
2296  */
2297 static void smb_intr_handler(struct adapter *adap)
2298 {
2299 	static struct intr_info smb_intr_info[] = {
2300 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2301 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2302 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2303 		{ 0 }
2304 	};
2305 
2306 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2307 		t4_fatal_err(adap);
2308 }
2309 
2310 /*
2311  * NC-SI interrupt handler.
2312  */
2313 static void ncsi_intr_handler(struct adapter *adap)
2314 {
2315 	static struct intr_info ncsi_intr_info[] = {
2316 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2317 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2318 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2319 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2320 		{ 0 }
2321 	};
2322 
2323 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2324 		t4_fatal_err(adap);
2325 }
2326 
2327 /*
2328  * XGMAC interrupt handler.
2329  */
2330 static void xgmac_intr_handler(struct adapter *adap, int port)
2331 {
2332 	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2333 
2334 	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2335 	if (!v)
2336 		return;
2337 
2338 	if (v & F_TXFIFO_PRTY_ERR)
2339 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2340 	if (v & F_RXFIFO_PRTY_ERR)
2341 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2342 	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2343 	t4_fatal_err(adap);
2344 }
2345 
2346 /*
2347  * PL interrupt handler.
2348  */
2349 static void pl_intr_handler(struct adapter *adap)
2350 {
2351 	static struct intr_info pl_intr_info[] = {
2352 		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
2353 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2354 		{ 0 }
2355 	};
2356 
2357 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2358 		t4_fatal_err(adap);
2359 }
2360 
2361 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2362 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2363 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2364 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2365 
2366 /**
2367  *	t4_slow_intr_handler - control path interrupt handler
2368  *	@adapter: the adapter
2369  *
2370  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2371  *	The designation 'slow' is because it involves register reads, while
2372  *	data interrupts typically don't involve any MMIOs.
2373  */
2374 int t4_slow_intr_handler(struct adapter *adapter)
2375 {
2376 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2377 
2378 	if (!(cause & GLBL_INTR_MASK))
2379 		return 0;
2380 	if (cause & F_CIM)
2381 		cim_intr_handler(adapter);
2382 	if (cause & F_MPS)
2383 		mps_intr_handler(adapter);
2384 	if (cause & F_NCSI)
2385 		ncsi_intr_handler(adapter);
2386 	if (cause & F_PL)
2387 		pl_intr_handler(adapter);
2388 	if (cause & F_SMB)
2389 		smb_intr_handler(adapter);
2390 	if (cause & F_XGMAC0)
2391 		xgmac_intr_handler(adapter, 0);
2392 	if (cause & F_XGMAC1)
2393 		xgmac_intr_handler(adapter, 1);
2394 	if (cause & F_XGMAC_KR0)
2395 		xgmac_intr_handler(adapter, 2);
2396 	if (cause & F_XGMAC_KR1)
2397 		xgmac_intr_handler(adapter, 3);
2398 	if (cause & F_PCIE)
2399 		pcie_intr_handler(adapter);
2400 	if (cause & F_MC)
2401 		mem_intr_handler(adapter, MEM_MC);
2402 	if (cause & F_EDC0)
2403 		mem_intr_handler(adapter, MEM_EDC0);
2404 	if (cause & F_EDC1)
2405 		mem_intr_handler(adapter, MEM_EDC1);
2406 	if (cause & F_LE)
2407 		le_intr_handler(adapter);
2408 	if (cause & F_TP)
2409 		tp_intr_handler(adapter);
2410 	if (cause & F_MA)
2411 		ma_intr_handler(adapter);
2412 	if (cause & F_PM_TX)
2413 		pmtx_intr_handler(adapter);
2414 	if (cause & F_PM_RX)
2415 		pmrx_intr_handler(adapter);
2416 	if (cause & F_ULP_RX)
2417 		ulprx_intr_handler(adapter);
2418 	if (cause & F_CPL_SWITCH)
2419 		cplsw_intr_handler(adapter);
2420 	if (cause & F_SGE)
2421 		sge_intr_handler(adapter);
2422 	if (cause & F_ULP_TX)
2423 		ulptx_intr_handler(adapter);
2424 
2425 	/* Clear the interrupts just processed for which we are the master. */
2426 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2427 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2428 	return 1;
2429 }
2430 
2431 /**
2432  *	t4_intr_enable - enable interrupts
2433  *	@adapter: the adapter whose interrupts should be enabled
2434  *
2435  *	Enable PF-specific interrupts for the calling function and the top-level
2436  *	interrupt concentrator for global interrupts.  Interrupts are already
2437  *	enabled at each module,	here we just enable the roots of the interrupt
2438  *	hierarchies.
2439  *
2440  *	Note: this function should be called only when the driver manages
2441  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2442  *	function at a time should be doing this.
2443  */
2444 void t4_intr_enable(struct adapter *adapter)
2445 {
2446 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2447 
2448 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2449 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2450 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2451 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2452 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2453 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2454 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2455 		     F_EGRESS_SIZE_ERR);
2456 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2457 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2458 }
2459 
2460 /**
2461  *	t4_intr_disable - disable interrupts
2462  *	@adapter: the adapter whose interrupts should be disabled
2463  *
2464  *	Disable interrupts.  We only disable the top-level interrupt
2465  *	concentrators.  The caller must be a PCI function managing global
2466  *	interrupts.
2467  */
2468 void t4_intr_disable(struct adapter *adapter)
2469 {
2470 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2471 
2472 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2473 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2474 }
2475 
2476 /**
2477  *	t4_intr_clear - clear all interrupts
2478  *	@adapter: the adapter whose interrupts should be cleared
2479  *
2480  *	Clears all interrupts.  The caller must be a PCI function managing
2481  *	global interrupts.
2482  */
2483 void t4_intr_clear(struct adapter *adapter)
2484 {
2485 	static const unsigned int cause_reg[] = {
2486 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2487 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2488 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2489 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2490 		A_MC_INT_CAUSE,
2491 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2492 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2493 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2494 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2495 		A_TP_INT_CAUSE,
2496 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2497 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2498 		A_MPS_RX_PERR_INT_CAUSE,
2499 		A_CPL_INTR_CAUSE,
2500 		MYPF_REG(A_PL_PF_INT_CAUSE),
2501 		A_PL_PL_INT_CAUSE,
2502 		A_LE_DB_INT_CAUSE,
2503 	};
2504 
2505 	unsigned int i;
2506 
2507 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2508 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2509 
2510 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2511 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2512 }
2513 
2514 /**
2515  *	hash_mac_addr - return the hash value of a MAC address
2516  *	@addr: the 48-bit Ethernet MAC address
2517  *
2518  *	Hashes a MAC address according to the hash function used by HW inexact
2519  *	(hash) address matching.
2520  */
2521 static int hash_mac_addr(const u8 *addr)
2522 {
2523 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2524 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2525 	a ^= b;
2526 	a ^= (a >> 12);
2527 	a ^= (a >> 6);
2528 	return a & 0x3f;
2529 }
2530 
2531 /**
2532  *	t4_config_rss_range - configure a portion of the RSS mapping table
2533  *	@adapter: the adapter
2534  *	@mbox: mbox to use for the FW command
2535  *	@viid: virtual interface whose RSS subtable is to be written
2536  *	@start: start entry in the table to write
2537  *	@n: how many table entries to write
2538  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2539  *	@nrspq: number of values in @rspq
2540  *
2541  *	Programs the selected part of the VI's RSS mapping table with the
2542  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2543  *	until the full table range is populated.
2544  *
2545  *	The caller must ensure the values in @rspq are in the range allowed for
2546  *	@viid.
2547  */
2548 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2549 			int start, int n, const u16 *rspq, unsigned int nrspq)
2550 {
2551 	int ret;
2552 	const u16 *rsp = rspq;
2553 	const u16 *rsp_end = rspq + nrspq;
2554 	struct fw_rss_ind_tbl_cmd cmd;
2555 
2556 	memset(&cmd, 0, sizeof(cmd));
2557 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2558 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2559 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2560 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2561 
2562 
2563 	/*
2564 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2565 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2566 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2567 	 * reserved.
2568 	 */
2569 	while (n > 0) {
2570 		int nq = min(n, 32);
2571 		int nq_packed = 0;
2572 		__be32 *qp = &cmd.iq0_to_iq2;
2573 
2574 		/*
2575 		 * Set up the firmware RSS command header to send the next
2576 		 * "nq" Ingress Queue IDs to the firmware.
2577 		 */
2578 		cmd.niqid = htons(nq);
2579 		cmd.startidx = htons(start);
2580 
2581 		/*
2582 		 * "nq" more done for the start of the next loop.
2583 		 */
2584 		start += nq;
2585 		n -= nq;
2586 
2587 		/*
2588 		 * While there are still Ingress Queue IDs to stuff into the
2589 		 * current firmware RSS command, retrieve them from the
2590 		 * Ingress Queue ID array and insert them into the command.
2591 		 */
2592 		while (nq > 0) {
2593 			/*
2594 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2595 			 * around the Ingress Queue ID array if necessary) and
2596 			 * insert them into the firmware RSS command at the
2597 			 * current 3-tuple position within the commad.
2598 			 */
2599 			u16 qbuf[3];
2600 			u16 *qbp = qbuf;
2601 			int nqbuf = min(3, nq);
2602 
2603 			nq -= nqbuf;
2604 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2605 			while (nqbuf && nq_packed < 32) {
2606 				nqbuf--;
2607 				nq_packed++;
2608 				*qbp++ = *rsp++;
2609 				if (rsp >= rsp_end)
2610 					rsp = rspq;
2611 			}
2612 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2613 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2614 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2615 		}
2616 
2617 		/*
2618 		 * Send this portion of the RRS table update to the firmware;
2619 		 * bail out on any errors.
2620 		 */
2621 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2622 		if (ret)
2623 			return ret;
2624 	}
2625 
2626 	return 0;
2627 }
2628 
2629 /**
2630  *	t4_config_glbl_rss - configure the global RSS mode
2631  *	@adapter: the adapter
2632  *	@mbox: mbox to use for the FW command
2633  *	@mode: global RSS mode
2634  *	@flags: mode-specific flags
2635  *
2636  *	Sets the global RSS mode.
2637  */
2638 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2639 		       unsigned int flags)
2640 {
2641 	struct fw_rss_glb_config_cmd c;
2642 
2643 	memset(&c, 0, sizeof(c));
2644 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2645 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2646 	c.retval_len16 = htonl(FW_LEN16(c));
2647 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2648 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2649 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2650 		c.u.basicvirtual.mode_pkd =
2651 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2652 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2653 	} else
2654 		return -EINVAL;
2655 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2656 }
2657 
2658 /**
2659  *	t4_config_vi_rss - configure per VI RSS settings
2660  *	@adapter: the adapter
2661  *	@mbox: mbox to use for the FW command
2662  *	@viid: the VI id
2663  *	@flags: RSS flags
2664  *	@defq: id of the default RSS queue for the VI.
2665  *
2666  *	Configures VI-specific RSS properties.
2667  */
2668 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2669 		     unsigned int flags, unsigned int defq)
2670 {
2671 	struct fw_rss_vi_config_cmd c;
2672 
2673 	memset(&c, 0, sizeof(c));
2674 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2675 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2676 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2677 	c.retval_len16 = htonl(FW_LEN16(c));
2678 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2679 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2680 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2681 }
2682 
2683 /* Read an RSS table row */
2684 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2685 {
2686 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2687 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2688 				   5, 0, val);
2689 }
2690 
2691 /**
2692  *	t4_read_rss - read the contents of the RSS mapping table
2693  *	@adapter: the adapter
2694  *	@map: holds the contents of the RSS mapping table
2695  *
2696  *	Reads the contents of the RSS hash->queue mapping table.
2697  */
2698 int t4_read_rss(struct adapter *adapter, u16 *map)
2699 {
2700 	u32 val;
2701 	int i, ret;
2702 
2703 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2704 		ret = rd_rss_row(adapter, i, &val);
2705 		if (ret)
2706 			return ret;
2707 		*map++ = G_LKPTBLQUEUE0(val);
2708 		*map++ = G_LKPTBLQUEUE1(val);
2709 	}
2710 	return 0;
2711 }
2712 
2713 /**
2714  *	t4_read_rss_key - read the global RSS key
2715  *	@adap: the adapter
2716  *	@key: 10-entry array holding the 320-bit RSS key
2717  *
2718  *	Reads the global 320-bit RSS key.
2719  */
2720 void t4_read_rss_key(struct adapter *adap, u32 *key)
2721 {
2722 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2723 			 A_TP_RSS_SECRET_KEY0);
2724 }
2725 
2726 /**
2727  *	t4_write_rss_key - program one of the RSS keys
2728  *	@adap: the adapter
2729  *	@key: 10-entry array holding the 320-bit RSS key
2730  *	@idx: which RSS key to write
2731  *
2732  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2733  *	0..15 the corresponding entry in the RSS key table is written,
2734  *	otherwise the global RSS key is written.
2735  */
2736 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2737 {
2738 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2739 			  A_TP_RSS_SECRET_KEY0);
2740 	if (idx >= 0 && idx < 16)
2741 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2742 			     V_KEYWRADDR(idx) | F_KEYWREN);
2743 }
2744 
2745 /**
2746  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2747  *	@adapter: the adapter
2748  *	@index: the entry in the PF RSS table to read
2749  *	@valp: where to store the returned value
2750  *
2751  *	Reads the PF RSS Configuration Table at the specified index and returns
2752  *	the value found there.
2753  */
2754 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2755 {
2756 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2757 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2758 }
2759 
2760 /**
2761  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2762  *	@adapter: the adapter
2763  *	@index: the entry in the VF RSS table to read
2764  *	@val: the value to store
2765  *
2766  *	Writes the PF RSS Configuration Table at the specified index with the
2767  *	specified value.
2768  */
2769 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2770 {
2771 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2772 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2773 }
2774 
2775 /**
2776  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2777  *	@adapter: the adapter
2778  *	@index: the entry in the VF RSS table to read
2779  *	@vfl: where to store the returned VFL
2780  *	@vfh: where to store the returned VFH
2781  *
2782  *	Reads the VF RSS Configuration Table at the specified index and returns
2783  *	the (VFL, VFH) values found there.
2784  */
2785 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2786 			   u32 *vfl, u32 *vfh)
2787 {
2788 	u32 vrt;
2789 
2790 	/*
2791 	 * Request that the index'th VF Table values be read into VFL/VFH.
2792 	 */
2793 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2794 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2795 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2796 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2797 
2798 	/*
2799 	 * Grab the VFL/VFH values ...
2800 	 */
2801 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2802 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2803 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2804 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2805 }
2806 
2807 /**
2808  *	t4_write_rss_vf_config - write VF RSS Configuration Table
2809  *
2810  *	@adapter: the adapter
2811  *	@index: the entry in the VF RSS table to write
2812  *	@vfl: the VFL to store
2813  *	@vfh: the VFH to store
2814  *
2815  *	Writes the VF RSS Configuration Table at the specified index with the
2816  *	specified (VFL, VFH) values.
2817  */
2818 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2819 			    u32 vfl, u32 vfh)
2820 {
2821 	u32 vrt;
2822 
2823 	/*
2824 	 * Load up VFL/VFH with the values to be written ...
2825 	 */
2826 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2827 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
2828 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2829 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
2830 
2831 	/*
2832 	 * Write the VFL/VFH into the VF Table at index'th location.
2833 	 */
2834 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2835 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2836 	vrt |= V_VFWRADDR(index) | F_VFWREN;
2837 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2838 }
2839 
2840 /**
2841  *	t4_read_rss_pf_map - read PF RSS Map
2842  *	@adapter: the adapter
2843  *
2844  *	Reads the PF RSS Map register and returns its value.
2845  */
2846 u32 t4_read_rss_pf_map(struct adapter *adapter)
2847 {
2848 	u32 pfmap;
2849 
2850 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2851 			 &pfmap, 1, A_TP_RSS_PF_MAP);
2852 	return pfmap;
2853 }
2854 
2855 /**
2856  *	t4_write_rss_pf_map - write PF RSS Map
2857  *	@adapter: the adapter
2858  *	@pfmap: PF RSS Map value
2859  *
2860  *	Writes the specified value to the PF RSS Map register.
2861  */
2862 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2863 {
2864 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2865 			  &pfmap, 1, A_TP_RSS_PF_MAP);
2866 }
2867 
2868 /**
2869  *	t4_read_rss_pf_mask - read PF RSS Mask
2870  *	@adapter: the adapter
2871  *
2872  *	Reads the PF RSS Mask register and returns its value.
2873  */
2874 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2875 {
2876 	u32 pfmask;
2877 
2878 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2879 			 &pfmask, 1, A_TP_RSS_PF_MSK);
2880 	return pfmask;
2881 }
2882 
2883 /**
2884  *	t4_write_rss_pf_mask - write PF RSS Mask
2885  *	@adapter: the adapter
2886  *	@pfmask: PF RSS Mask value
2887  *
2888  *	Writes the specified value to the PF RSS Mask register.
2889  */
2890 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2891 {
2892 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2893 			  &pfmask, 1, A_TP_RSS_PF_MSK);
2894 }
2895 
2896 /**
2897  *	t4_set_filter_mode - configure the optional components of filter tuples
2898  *	@adap: the adapter
2899  *	@mode_map: a bitmap selcting which optional filter components to enable
2900  *
2901  *	Sets the filter mode by selecting the optional components to enable
2902  *	in filter tuples.  Returns 0 on success and a negative error if the
2903  *	requested mode needs more bits than are available for optional
2904  *	components.
2905  */
2906 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2907 {
2908 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2909 
2910 	int i, nbits = 0;
2911 
2912 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2913 		if (mode_map & (1 << i))
2914 			nbits += width[i];
2915 	if (nbits > FILTER_OPT_LEN)
2916 		return -EINVAL;
2917 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2918 			  A_TP_VLAN_PRI_MAP);
2919 	return 0;
2920 }
2921 
2922 /**
2923  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2924  *	@adap: the adapter
2925  *	@v4: holds the TCP/IP counter values
2926  *	@v6: holds the TCP/IPv6 counter values
2927  *
2928  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2929  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2930  */
2931 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2932 			 struct tp_tcp_stats *v6)
2933 {
2934 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2935 
2936 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2937 #define STAT(x)     val[STAT_IDX(x)]
2938 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2939 
2940 	if (v4) {
2941 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2942 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2943 		v4->tcpOutRsts = STAT(OUT_RST);
2944 		v4->tcpInSegs  = STAT64(IN_SEG);
2945 		v4->tcpOutSegs = STAT64(OUT_SEG);
2946 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2947 	}
2948 	if (v6) {
2949 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2950 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2951 		v6->tcpOutRsts = STAT(OUT_RST);
2952 		v6->tcpInSegs  = STAT64(IN_SEG);
2953 		v6->tcpOutSegs = STAT64(OUT_SEG);
2954 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2955 	}
2956 #undef STAT64
2957 #undef STAT
2958 #undef STAT_IDX
2959 }
2960 
2961 /**
2962  *	t4_tp_get_err_stats - read TP's error MIB counters
2963  *	@adap: the adapter
2964  *	@st: holds the counter values
2965  *
2966  *	Returns the values of TP's error counters.
2967  */
2968 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2969 {
2970 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2971 			 12, A_TP_MIB_MAC_IN_ERR_0);
2972 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2973 			 8, A_TP_MIB_TNL_CNG_DROP_0);
2974 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2975 			 4, A_TP_MIB_TNL_DROP_0);
2976 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2977 			 4, A_TP_MIB_OFD_VLN_DROP_0);
2978 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2979 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
2980 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2981 			 2, A_TP_MIB_OFD_ARP_DROP);
2982 }
2983 
2984 /**
2985  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
2986  *	@adap: the adapter
2987  *	@st: holds the counter values
2988  *
2989  *	Returns the values of TP's proxy counters.
2990  */
2991 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2992 {
2993 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2994 			 4, A_TP_MIB_TNL_LPBK_0);
2995 }
2996 
2997 /**
2998  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
2999  *	@adap: the adapter
3000  *	@st: holds the counter values
3001  *
3002  *	Returns the values of TP's CPL counters.
3003  */
3004 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3005 {
3006 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3007 			 8, A_TP_MIB_CPL_IN_REQ_0);
3008 }
3009 
3010 /**
3011  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3012  *	@adap: the adapter
3013  *	@st: holds the counter values
3014  *
3015  *	Returns the values of TP's RDMA counters.
3016  */
3017 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3018 {
3019 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3020 			 2, A_TP_MIB_RQE_DFR_MOD);
3021 }
3022 
3023 /**
3024  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3025  *	@adap: the adapter
3026  *	@idx: the port index
3027  *	@st: holds the counter values
3028  *
3029  *	Returns the values of TP's FCoE counters for the selected port.
3030  */
3031 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3032 		       struct tp_fcoe_stats *st)
3033 {
3034 	u32 val[2];
3035 
3036 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3037 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3038 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3039 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3040 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3041 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3042 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3043 }
3044 
3045 /**
3046  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3047  *	@adap: the adapter
3048  *	@st: holds the counter values
3049  *
3050  *	Returns the values of TP's counters for non-TCP directly-placed packets.
3051  */
3052 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3053 {
3054 	u32 val[4];
3055 
3056 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3057 			 A_TP_MIB_USM_PKTS);
3058 	st->frames = val[0];
3059 	st->drops = val[1];
3060 	st->octets = ((u64)val[2] << 32) | val[3];
3061 }
3062 
3063 /**
3064  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3065  *	@adap: the adapter
3066  *	@mtus: where to store the MTU values
3067  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3068  *
3069  *	Reads the HW path MTU table.
3070  */
3071 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3072 {
3073 	u32 v;
3074 	int i;
3075 
3076 	for (i = 0; i < NMTUS; ++i) {
3077 		t4_write_reg(adap, A_TP_MTU_TABLE,
3078 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3079 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3080 		mtus[i] = G_MTUVALUE(v);
3081 		if (mtu_log)
3082 			mtu_log[i] = G_MTUWIDTH(v);
3083 	}
3084 }
3085 
3086 /**
3087  *	t4_read_cong_tbl - reads the congestion control table
3088  *	@adap: the adapter
3089  *	@incr: where to store the alpha values
3090  *
3091  *	Reads the additive increments programmed into the HW congestion
3092  *	control table.
3093  */
3094 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3095 {
3096 	unsigned int mtu, w;
3097 
3098 	for (mtu = 0; mtu < NMTUS; ++mtu)
3099 		for (w = 0; w < NCCTRL_WIN; ++w) {
3100 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3101 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3102 			incr[mtu][w] = (u16)t4_read_reg(adap,
3103 						A_TP_CCTRL_TABLE) & 0x1fff;
3104 		}
3105 }
3106 
3107 /**
3108  *	t4_read_pace_tbl - read the pace table
3109  *	@adap: the adapter
3110  *	@pace_vals: holds the returned values
3111  *
3112  *	Returns the values of TP's pace table in microseconds.
3113  */
3114 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3115 {
3116 	unsigned int i, v;
3117 
3118 	for (i = 0; i < NTX_SCHED; i++) {
3119 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3120 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3121 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3122 	}
3123 }
3124 
3125 /**
3126  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3127  *	@adap: the adapter
3128  *	@addr: the indirect TP register address
3129  *	@mask: specifies the field within the register to modify
3130  *	@val: new value for the field
3131  *
3132  *	Sets a field of an indirect TP register to the given value.
3133  */
3134 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3135 			    unsigned int mask, unsigned int val)
3136 {
3137 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3138 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3139 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3140 }
3141 
3142 /**
3143  *	init_cong_ctrl - initialize congestion control parameters
3144  *	@a: the alpha values for congestion control
3145  *	@b: the beta values for congestion control
3146  *
3147  *	Initialize the congestion control parameters.
3148  */
3149 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3150 {
3151 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3152 	a[9] = 2;
3153 	a[10] = 3;
3154 	a[11] = 4;
3155 	a[12] = 5;
3156 	a[13] = 6;
3157 	a[14] = 7;
3158 	a[15] = 8;
3159 	a[16] = 9;
3160 	a[17] = 10;
3161 	a[18] = 14;
3162 	a[19] = 17;
3163 	a[20] = 21;
3164 	a[21] = 25;
3165 	a[22] = 30;
3166 	a[23] = 35;
3167 	a[24] = 45;
3168 	a[25] = 60;
3169 	a[26] = 80;
3170 	a[27] = 100;
3171 	a[28] = 200;
3172 	a[29] = 300;
3173 	a[30] = 400;
3174 	a[31] = 500;
3175 
3176 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3177 	b[9] = b[10] = 1;
3178 	b[11] = b[12] = 2;
3179 	b[13] = b[14] = b[15] = b[16] = 3;
3180 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3181 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3182 	b[28] = b[29] = 6;
3183 	b[30] = b[31] = 7;
3184 }
3185 
3186 /* The minimum additive increment value for the congestion control table */
3187 #define CC_MIN_INCR 2U
3188 
3189 /**
3190  *	t4_load_mtus - write the MTU and congestion control HW tables
3191  *	@adap: the adapter
3192  *	@mtus: the values for the MTU table
3193  *	@alpha: the values for the congestion control alpha parameter
3194  *	@beta: the values for the congestion control beta parameter
3195  *
3196  *	Write the HW MTU table with the supplied MTUs and the high-speed
3197  *	congestion control table with the supplied alpha, beta, and MTUs.
3198  *	We write the two tables together because the additive increments
3199  *	depend on the MTUs.
3200  */
3201 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3202 		  const unsigned short *alpha, const unsigned short *beta)
3203 {
3204 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3205 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3206 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3207 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3208 	};
3209 
3210 	unsigned int i, w;
3211 
3212 	for (i = 0; i < NMTUS; ++i) {
3213 		unsigned int mtu = mtus[i];
3214 		unsigned int log2 = fls(mtu);
3215 
3216 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3217 			log2--;
3218 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3219 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3220 
3221 		for (w = 0; w < NCCTRL_WIN; ++w) {
3222 			unsigned int inc;
3223 
3224 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3225 				  CC_MIN_INCR);
3226 
3227 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3228 				     (w << 16) | (beta[w] << 13) | inc);
3229 		}
3230 	}
3231 }
3232 
3233 /**
3234  *	t4_set_pace_tbl - set the pace table
3235  *	@adap: the adapter
3236  *	@pace_vals: the pace values in microseconds
3237  *	@start: index of the first entry in the HW pace table to set
3238  *	@n: how many entries to set
3239  *
3240  *	Sets (a subset of the) HW pace table.
3241  */
3242 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3243 		     unsigned int start, unsigned int n)
3244 {
3245 	unsigned int vals[NTX_SCHED], i;
3246 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3247 
3248 	if (n > NTX_SCHED)
3249 	    return -ERANGE;
3250 
3251 	/* convert values from us to dack ticks, rounding to closest value */
3252 	for (i = 0; i < n; i++, pace_vals++) {
3253 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3254 		if (vals[i] > 0x7ff)
3255 			return -ERANGE;
3256 		if (*pace_vals && vals[i] == 0)
3257 			return -ERANGE;
3258 	}
3259 	for (i = 0; i < n; i++, start++)
3260 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3261 	return 0;
3262 }
3263 
3264 /**
3265  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3266  *	@adap: the adapter
3267  *	@kbps: target rate in Kbps
3268  *	@sched: the scheduler index
3269  *
3270  *	Configure a Tx HW scheduler for the target rate.
3271  */
3272 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3273 {
3274 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3275 	unsigned int clk = adap->params.vpd.cclk * 1000;
3276 	unsigned int selected_cpt = 0, selected_bpt = 0;
3277 
3278 	if (kbps > 0) {
3279 		kbps *= 125;     /* -> bytes */
3280 		for (cpt = 1; cpt <= 255; cpt++) {
3281 			tps = clk / cpt;
3282 			bpt = (kbps + tps / 2) / tps;
3283 			if (bpt > 0 && bpt <= 255) {
3284 				v = bpt * tps;
3285 				delta = v >= kbps ? v - kbps : kbps - v;
3286 				if (delta < mindelta) {
3287 					mindelta = delta;
3288 					selected_cpt = cpt;
3289 					selected_bpt = bpt;
3290 				}
3291 			} else if (selected_cpt)
3292 				break;
3293 		}
3294 		if (!selected_cpt)
3295 			return -EINVAL;
3296 	}
3297 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3298 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3299 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3300 	if (sched & 1)
3301 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3302 	else
3303 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3304 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3305 	return 0;
3306 }
3307 
3308 /**
3309  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3310  *	@adap: the adapter
3311  *	@sched: the scheduler index
3312  *	@ipg: the interpacket delay in tenths of nanoseconds
3313  *
3314  *	Set the interpacket delay for a HW packet rate scheduler.
3315  */
3316 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3317 {
3318 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3319 
3320 	/* convert ipg to nearest number of core clocks */
3321 	ipg *= core_ticks_per_usec(adap);
3322 	ipg = (ipg + 5000) / 10000;
3323 	if (ipg > M_TXTIMERSEPQ0)
3324 		return -EINVAL;
3325 
3326 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3327 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3328 	if (sched & 1)
3329 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3330 	else
3331 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3332 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3333 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3334 	return 0;
3335 }
3336 
3337 /**
3338  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3339  *	@adap: the adapter
3340  *	@sched: the scheduler index
3341  *	@kbps: the byte rate in Kbps
3342  *	@ipg: the interpacket delay in tenths of nanoseconds
3343  *
3344  *	Return the current configuration of a HW Tx scheduler.
3345  */
3346 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3347 		     unsigned int *ipg)
3348 {
3349 	unsigned int v, addr, bpt, cpt;
3350 
3351 	if (kbps) {
3352 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3353 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3354 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3355 		if (sched & 1)
3356 			v >>= 16;
3357 		bpt = (v >> 8) & 0xff;
3358 		cpt = v & 0xff;
3359 		if (!cpt)
3360 			*kbps = 0;        /* scheduler disabled */
3361 		else {
3362 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3363 			*kbps = (v * bpt) / 125;
3364 		}
3365 	}
3366 	if (ipg) {
3367 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3368 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3369 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3370 		if (sched & 1)
3371 			v >>= 16;
3372 		v &= 0xffff;
3373 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3374 	}
3375 }
3376 
3377 /*
3378  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3379  * clocks.  The formula is
3380  *
3381  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3382  *
3383  * which is equivalent to
3384  *
3385  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3386  */
3387 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3388 {
3389 	u64 v = bytes256 * adap->params.vpd.cclk;
3390 
3391 	return v * 62 + v / 2;
3392 }
3393 
3394 /**
3395  *	t4_get_chan_txrate - get the current per channel Tx rates
3396  *	@adap: the adapter
3397  *	@nic_rate: rates for NIC traffic
3398  *	@ofld_rate: rates for offloaded traffic
3399  *
3400  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3401  *	for each channel.
3402  */
3403 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3404 {
3405 	u32 v;
3406 
3407 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3408 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3409 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3410 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3411 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3412 
3413 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3414 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3415 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3416 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3417 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3418 }
3419 
3420 /**
3421  *	t4_set_trace_filter - configure one of the tracing filters
3422  *	@adap: the adapter
3423  *	@tp: the desired trace filter parameters
3424  *	@idx: which filter to configure
3425  *	@enable: whether to enable or disable the filter
3426  *
3427  *	Configures one of the tracing filters available in HW.  If @enable is
3428  *	%0 @tp is not examined and may be %NULL. The user is responsible to
3429  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3430  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3431  *	docs/readme.txt for a complete description of how to setup traceing on
3432  *	T4.
3433  */
3434 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3435 			int enable)
3436 {
3437 	int i, ofst = idx * 4;
3438 	u32 data_reg, mask_reg, cfg;
3439 	u32 multitrc = F_TRCMULTIFILTER;
3440 
3441 	if (!enable) {
3442 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3443 		return 0;
3444 	}
3445 
3446 	/*
3447 	 * TODO - After T4 data book is updated, specify the exact
3448 	 * section below.
3449 	 *
3450 	 * See T4 data book - MPS section for a complete description
3451 	 * of the below if..else handling of A_MPS_TRC_CFG register
3452 	 * value.
3453 	 */
3454 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3455 	if (cfg & F_TRCMULTIFILTER) {
3456 		/*
3457 		 * If multiple tracers are enabled, then maximum
3458 		 * capture size is 2.5KB (FIFO size of a single channel)
3459 		 * minus 2 flits for CPL_TRACE_PKT header.
3460 		 */
3461 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3462 			return -EINVAL;
3463 	}
3464 	else {
3465 		/*
3466 		 * If multiple tracers are disabled, to avoid deadlocks
3467 		 * maximum packet capture size of 9600 bytes is recommended.
3468 		 * Also in this mode, only trace0 can be enabled and running.
3469 		 */
3470 		multitrc = 0;
3471 		if (tp->snap_len > 9600 || idx)
3472 			return -EINVAL;
3473 	}
3474 
3475 	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3476 	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3477 		return -EINVAL;
3478 
3479 	/* stop the tracer we'll be changing */
3480 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3481 
3482 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3483 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3484 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3485 
3486 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3487 		t4_write_reg(adap, data_reg, tp->data[i]);
3488 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3489 	}
3490 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3491 		     V_TFCAPTUREMAX(tp->snap_len) |
3492 		     V_TFMINPKTSIZE(tp->min_len));
3493 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3494 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3495 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3496 
3497 	return 0;
3498 }
3499 
3500 /**
3501  *	t4_get_trace_filter - query one of the tracing filters
3502  *	@adap: the adapter
3503  *	@tp: the current trace filter parameters
3504  *	@idx: which trace filter to query
3505  *	@enabled: non-zero if the filter is enabled
3506  *
3507  *	Returns the current settings of one of the HW tracing filters.
3508  */
3509 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3510 			 int *enabled)
3511 {
3512 	u32 ctla, ctlb;
3513 	int i, ofst = idx * 4;
3514 	u32 data_reg, mask_reg;
3515 
3516 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3517 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3518 
3519 	*enabled = !!(ctla & F_TFEN);
3520 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3521 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3522 	tp->skip_ofst = G_TFOFFSET(ctla);
3523 	tp->skip_len = G_TFLENGTH(ctla);
3524 	tp->invert = !!(ctla & F_TFINVERTMATCH);
3525 	tp->port = G_TFPORT(ctla);
3526 
3527 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3528 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3529 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3530 
3531 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3532 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3533 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3534 	}
3535 }
3536 
3537 /**
3538  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3539  *	@adap: the adapter
3540  *	@cnt: where to store the count statistics
3541  *	@cycles: where to store the cycle statistics
3542  *
3543  *	Returns performance statistics from PMTX.
3544  */
3545 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3546 {
3547 	int i;
3548 
3549 	for (i = 0; i < PM_NSTATS; i++) {
3550 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3551 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3552 		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3553 	}
3554 }
3555 
3556 /**
3557  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3558  *	@adap: the adapter
3559  *	@cnt: where to store the count statistics
3560  *	@cycles: where to store the cycle statistics
3561  *
3562  *	Returns performance statistics from PMRX.
3563  */
3564 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3565 {
3566 	int i;
3567 
3568 	for (i = 0; i < PM_NSTATS; i++) {
3569 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3570 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3571 		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3572 	}
3573 }
3574 
3575 /**
3576  *	get_mps_bg_map - return the buffer groups associated with a port
3577  *	@adap: the adapter
3578  *	@idx: the port index
3579  *
3580  *	Returns a bitmap indicating which MPS buffer groups are associated
3581  *	with the given port.  Bit i is set if buffer group i is used by the
3582  *	port.
3583  */
3584 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3585 {
3586 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3587 
3588 	if (n == 0)
3589 		return idx == 0 ? 0xf : 0;
3590 	if (n == 1)
3591 		return idx < 2 ? (3 << (2 * idx)) : 0;
3592 	return 1 << idx;
3593 }
3594 
3595 /**
3596  *      t4_get_port_stats_offset - collect port stats relative to a previous
3597  *                                 snapshot
3598  *      @adap: The adapter
3599  *      @idx: The port
3600  *      @stats: Current stats to fill
3601  *      @offset: Previous stats snapshot
3602  */
3603 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3604 		struct port_stats *stats,
3605 		struct port_stats *offset)
3606 {
3607 	u64 *s, *o;
3608 	int i;
3609 
3610 	t4_get_port_stats(adap, idx, stats);
3611 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3612 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3613 			i++, s++, o++)
3614 		*s -= *o;
3615 }
3616 
3617 /**
3618  *	t4_get_port_stats - collect port statistics
3619  *	@adap: the adapter
3620  *	@idx: the port index
3621  *	@p: the stats structure to fill
3622  *
3623  *	Collect statistics related to the given port from HW.
3624  */
3625 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3626 {
3627 	u32 bgmap = get_mps_bg_map(adap, idx);
3628 
3629 #define GET_STAT(name) \
3630 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3631 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3632 
3633 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3634 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3635 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3636 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3637 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3638 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3639 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3640 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3641 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3642 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3643 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3644 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3645 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3646 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3647 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3648 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3649 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3650 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3651 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3652 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3653 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3654 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3655 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3656 
3657 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3658 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3659 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3660 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3661 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3662 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3663 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3664 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3665 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3666 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3667 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3668 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3669 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3670 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3671 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3672 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3673 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3674 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3675 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3676 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3677 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3678 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3679 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3680 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3681 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3682 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3683 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3684 
3685 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3686 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3687 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3688 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3689 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3690 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3691 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3692 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3693 
3694 #undef GET_STAT
3695 #undef GET_STAT_COM
3696 }
3697 
3698 /**
3699  *	t4_clr_port_stats - clear port statistics
3700  *	@adap: the adapter
3701  *	@idx: the port index
3702  *
3703  *	Clear HW statistics for the given port.
3704  */
3705 void t4_clr_port_stats(struct adapter *adap, int idx)
3706 {
3707 	unsigned int i;
3708 	u32 bgmap = get_mps_bg_map(adap, idx);
3709 
3710 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3711 	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3712 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3713 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3714 	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3715 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3716 	for (i = 0; i < 4; i++)
3717 		if (bgmap & (1 << i)) {
3718 			t4_write_reg(adap,
3719 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3720 			t4_write_reg(adap,
3721 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3722 		}
3723 }
3724 
3725 /**
3726  *	t4_get_lb_stats - collect loopback port statistics
3727  *	@adap: the adapter
3728  *	@idx: the loopback port index
3729  *	@p: the stats structure to fill
3730  *
3731  *	Return HW statistics for the given loopback port.
3732  */
3733 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3734 {
3735 	u32 bgmap = get_mps_bg_map(adap, idx);
3736 
3737 #define GET_STAT(name) \
3738 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3739 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3740 
3741 	p->octets           = GET_STAT(BYTES);
3742 	p->frames           = GET_STAT(FRAMES);
3743 	p->bcast_frames     = GET_STAT(BCAST);
3744 	p->mcast_frames     = GET_STAT(MCAST);
3745 	p->ucast_frames     = GET_STAT(UCAST);
3746 	p->error_frames     = GET_STAT(ERROR);
3747 
3748 	p->frames_64        = GET_STAT(64B);
3749 	p->frames_65_127    = GET_STAT(65B_127B);
3750 	p->frames_128_255   = GET_STAT(128B_255B);
3751 	p->frames_256_511   = GET_STAT(256B_511B);
3752 	p->frames_512_1023  = GET_STAT(512B_1023B);
3753 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3754 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3755 	p->drop             = t4_read_reg(adap, PORT_REG(idx,
3756 					  A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3757 
3758 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3759 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3760 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3761 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3762 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3763 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3764 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3765 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3766 
3767 #undef GET_STAT
3768 #undef GET_STAT_COM
3769 }
3770 
3771 /**
3772  *	t4_wol_magic_enable - enable/disable magic packet WoL
3773  *	@adap: the adapter
3774  *	@port: the physical port index
3775  *	@addr: MAC address expected in magic packets, %NULL to disable
3776  *
3777  *	Enables/disables magic packet wake-on-LAN for the selected port.
3778  */
3779 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3780 			 const u8 *addr)
3781 {
3782 	if (addr) {
3783 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3784 			     (addr[2] << 24) | (addr[3] << 16) |
3785 			     (addr[4] << 8) | addr[5]);
3786 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3787 			     (addr[0] << 8) | addr[1]);
3788 	}
3789 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3790 			 V_MAGICEN(addr != NULL));
3791 }
3792 
3793 /**
3794  *	t4_wol_pat_enable - enable/disable pattern-based WoL
3795  *	@adap: the adapter
3796  *	@port: the physical port index
3797  *	@map: bitmap of which HW pattern filters to set
3798  *	@mask0: byte mask for bytes 0-63 of a packet
3799  *	@mask1: byte mask for bytes 64-127 of a packet
3800  *	@crc: Ethernet CRC for selected bytes
3801  *	@enable: enable/disable switch
3802  *
3803  *	Sets the pattern filters indicated in @map to mask out the bytes
3804  *	specified in @mask0/@mask1 in received packets and compare the CRC of
3805  *	the resulting packet against @crc.  If @enable is %true pattern-based
3806  *	WoL is enabled, otherwise disabled.
3807  */
3808 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3809 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
3810 {
3811 	int i;
3812 
3813 	if (!enable) {
3814 		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3815 				 F_PATEN, 0);
3816 		return 0;
3817 	}
3818 	if (map > 0xff)
3819 		return -EINVAL;
3820 
3821 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3822 
3823 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3824 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3825 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3826 
3827 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3828 		if (!(map & 1))
3829 			continue;
3830 
3831 		/* write byte masks */
3832 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3833 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3834 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3835 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3836 			return -ETIMEDOUT;
3837 
3838 		/* write CRC */
3839 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
3840 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3841 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3842 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3843 			return -ETIMEDOUT;
3844 	}
3845 #undef EPIO_REG
3846 
3847 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3848 	return 0;
3849 }
3850 
3851 /**
3852  *	t4_mk_filtdelwr - create a delete filter WR
3853  *	@ftid: the filter ID
3854  *	@wr: the filter work request to populate
3855  *	@qid: ingress queue to receive the delete notification
3856  *
3857  *	Creates a filter work request to delete the supplied filter.  If @qid is
3858  *	negative the delete notification is suppressed.
3859  */
3860 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3861 {
3862 	memset(wr, 0, sizeof(*wr));
3863 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3864 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3865 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3866 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
3867 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3868 	if (qid >= 0)
3869 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3870 }
3871 
3872 #define INIT_CMD(var, cmd, rd_wr) do { \
3873 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3874 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3875 	(var).retval_len16 = htonl(FW_LEN16(var)); \
3876 } while (0)
3877 
3878 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
3879 {
3880 	struct fw_ldst_cmd c;
3881 
3882 	memset(&c, 0, sizeof(c));
3883 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3884 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
3885 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3886 	c.u.addrval.addr = htonl(addr);
3887 	c.u.addrval.val = htonl(val);
3888 
3889 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3890 }
3891 
3892 /**
3893  *	t4_i2c_rd - read a byte from an i2c addressable device
3894  *	@adap: the adapter
3895  *	@mbox: mailbox to use for the FW command
3896  *	@port_id: the port id
3897  *	@dev_addr: the i2c device address
3898  *	@offset: the byte offset to read from
3899  *	@valp: where to store the value
3900  */
3901 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
3902 	       u8 dev_addr, u8 offset, u8 *valp)
3903 {
3904 	int ret;
3905 	struct fw_ldst_cmd c;
3906 
3907 	memset(&c, 0, sizeof(c));
3908 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3909 		F_FW_CMD_READ |
3910 		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
3911 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3912 	c.u.i2c.pid_pkd = V_FW_LDST_CMD_PID(port_id);
3913 	c.u.i2c.base = dev_addr;
3914 	c.u.i2c.boffset = offset;
3915 
3916 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3917 	if (ret == 0)
3918 		*valp = c.u.i2c.data;
3919 	return ret;
3920 }
3921 
3922 /**
3923  *	t4_mdio_rd - read a PHY register through MDIO
3924  *	@adap: the adapter
3925  *	@mbox: mailbox to use for the FW command
3926  *	@phy_addr: the PHY address
3927  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3928  *	@reg: the register to read
3929  *	@valp: where to store the value
3930  *
3931  *	Issues a FW command through the given mailbox to read a PHY register.
3932  */
3933 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3934 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
3935 {
3936 	int ret;
3937 	struct fw_ldst_cmd c;
3938 
3939 	memset(&c, 0, sizeof(c));
3940 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3941 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3942 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3943 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3944 				   V_FW_LDST_CMD_MMD(mmd));
3945 	c.u.mdio.raddr = htons(reg);
3946 
3947 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3948 	if (ret == 0)
3949 		*valp = ntohs(c.u.mdio.rval);
3950 	return ret;
3951 }
3952 
3953 /**
3954  *	t4_mdio_wr - write a PHY register through MDIO
3955  *	@adap: the adapter
3956  *	@mbox: mailbox to use for the FW command
3957  *	@phy_addr: the PHY address
3958  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3959  *	@reg: the register to write
3960  *	@valp: value to write
3961  *
3962  *	Issues a FW command through the given mailbox to write a PHY register.
3963  */
3964 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3965 	       unsigned int mmd, unsigned int reg, unsigned int val)
3966 {
3967 	struct fw_ldst_cmd c;
3968 
3969 	memset(&c, 0, sizeof(c));
3970 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3971 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3972 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3973 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3974 				   V_FW_LDST_CMD_MMD(mmd));
3975 	c.u.mdio.raddr = htons(reg);
3976 	c.u.mdio.rval = htons(val);
3977 
3978 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3979 }
3980 
3981 /**
3982  *	t4_sge_ctxt_flush - flush the SGE context cache
3983  *	@adap: the adapter
3984  *	@mbox: mailbox to use for the FW command
3985  *
3986  *	Issues a FW command through the given mailbox to flush the
3987  *	SGE context cache.
3988  */
3989 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
3990 {
3991 	int ret;
3992 	struct fw_ldst_cmd c;
3993 
3994 	memset(&c, 0, sizeof(c));
3995 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3996 			F_FW_CMD_READ |
3997 			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
3998 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3999 	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4000 
4001 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4002 	return ret;
4003 }
4004 
4005 /**
4006  *	t4_sge_ctxt_rd - read an SGE context through FW
4007  *	@adap: the adapter
4008  *	@mbox: mailbox to use for the FW command
4009  *	@cid: the context id
4010  *	@ctype: the context type
4011  *	@data: where to store the context data
4012  *
4013  *	Issues a FW command through the given mailbox to read an SGE context.
4014  */
4015 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4016 		   enum ctxt_type ctype, u32 *data)
4017 {
4018 	int ret;
4019 	struct fw_ldst_cmd c;
4020 
4021 	if (ctype == CTXT_EGRESS)
4022 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4023 	else if (ctype == CTXT_INGRESS)
4024 		ret = FW_LDST_ADDRSPC_SGE_INGC;
4025 	else if (ctype == CTXT_FLM)
4026 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4027 	else
4028 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4029 
4030 	memset(&c, 0, sizeof(c));
4031 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4032 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4033 	c.cycles_to_len16 = htonl(FW_LEN16(c));
4034 	c.u.idctxt.physid = htonl(cid);
4035 
4036 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4037 	if (ret == 0) {
4038 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4039 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4040 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4041 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4042 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4043 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4044 	}
4045 	return ret;
4046 }
4047 
4048 /**
4049  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4050  *	@adap: the adapter
4051  *	@cid: the context id
4052  *	@ctype: the context type
4053  *	@data: where to store the context data
4054  *
4055  *	Reads an SGE context directly, bypassing FW.  This is only for
4056  *	debugging when FW is unavailable.
4057  */
4058 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4059 		      u32 *data)
4060 {
4061 	int i, ret;
4062 
4063 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4064 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4065 	if (!ret)
4066 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4067 			*data++ = t4_read_reg(adap, i);
4068 	return ret;
4069 }
4070 
4071 /**
4072  *	t4_fw_hello - establish communication with FW
4073  *	@adap: the adapter
4074  *	@mbox: mailbox to use for the FW command
4075  *	@evt_mbox: mailbox to receive async FW events
4076  *	@master: specifies the caller's willingness to be the device master
4077  *	@state: returns the current device state (if non-NULL)
4078  *
4079  *	Issues a command to establish communication with FW.  Returns either
4080  *	an error (negative integer) or the mailbox of the Master PF.
4081  */
4082 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4083 		enum dev_master master, enum dev_state *state)
4084 {
4085 	int ret;
4086 	struct fw_hello_cmd c;
4087 	u32 v;
4088 	unsigned int master_mbox;
4089 	int retries = FW_CMD_HELLO_RETRIES;
4090 
4091 retry:
4092 	memset(&c, 0, sizeof(c));
4093 	INIT_CMD(c, HELLO, WRITE);
4094 	c.err_to_clearinit = htonl(
4095 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4096 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4097 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4098 			M_FW_HELLO_CMD_MBMASTER) |
4099 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4100 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4101 		F_FW_HELLO_CMD_CLEARINIT);
4102 
4103 	/*
4104 	 * Issue the HELLO command to the firmware.  If it's not successful
4105 	 * but indicates that we got a "busy" or "timeout" condition, retry
4106 	 * the HELLO until we exhaust our retry limit.
4107 	 */
4108 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4109 	if (ret != FW_SUCCESS) {
4110 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4111 			goto retry;
4112 		return ret;
4113 	}
4114 
4115 	v = ntohl(c.err_to_clearinit);
4116 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4117 	if (state) {
4118 		if (v & F_FW_HELLO_CMD_ERR)
4119 			*state = DEV_STATE_ERR;
4120 		else if (v & F_FW_HELLO_CMD_INIT)
4121 			*state = DEV_STATE_INIT;
4122 		else
4123 			*state = DEV_STATE_UNINIT;
4124 	}
4125 
4126 	/*
4127 	 * If we're not the Master PF then we need to wait around for the
4128 	 * Master PF Driver to finish setting up the adapter.
4129 	 *
4130 	 * Note that we also do this wait if we're a non-Master-capable PF and
4131 	 * there is no current Master PF; a Master PF may show up momentarily
4132 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4133 	 * OS loads lots of different drivers rapidly at the same time).  In
4134 	 * this case, the Master PF returned by the firmware will be
4135 	 * M_PCIE_FW_MASTER so the test below will work ...
4136 	 */
4137 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4138 	    master_mbox != mbox) {
4139 		int waiting = FW_CMD_HELLO_TIMEOUT;
4140 
4141 		/*
4142 		 * Wait for the firmware to either indicate an error or
4143 		 * initialized state.  If we see either of these we bail out
4144 		 * and report the issue to the caller.  If we exhaust the
4145 		 * "hello timeout" and we haven't exhausted our retries, try
4146 		 * again.  Otherwise bail with a timeout error.
4147 		 */
4148 		for (;;) {
4149 			u32 pcie_fw;
4150 
4151 			msleep(50);
4152 			waiting -= 50;
4153 
4154 			/*
4155 			 * If neither Error nor Initialialized are indicated
4156 			 * by the firmware keep waiting till we exhaust our
4157 			 * timeout ... and then retry if we haven't exhausted
4158 			 * our retries ...
4159 			 */
4160 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4161 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4162 				if (waiting <= 0) {
4163 					if (retries-- > 0)
4164 						goto retry;
4165 
4166 					return -ETIMEDOUT;
4167 				}
4168 				continue;
4169 			}
4170 
4171 			/*
4172 			 * We either have an Error or Initialized condition
4173 			 * report errors preferentially.
4174 			 */
4175 			if (state) {
4176 				if (pcie_fw & F_PCIE_FW_ERR)
4177 					*state = DEV_STATE_ERR;
4178 				else if (pcie_fw & F_PCIE_FW_INIT)
4179 					*state = DEV_STATE_INIT;
4180 			}
4181 
4182 			/*
4183 			 * If we arrived before a Master PF was selected and
4184 			 * there's not a valid Master PF, grab its identity
4185 			 * for our caller.
4186 			 */
4187 			if (master_mbox == M_PCIE_FW_MASTER &&
4188 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4189 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4190 			break;
4191 		}
4192 	}
4193 
4194 	return master_mbox;
4195 }
4196 
4197 /**
4198  *	t4_fw_bye - end communication with FW
4199  *	@adap: the adapter
4200  *	@mbox: mailbox to use for the FW command
4201  *
4202  *	Issues a command to terminate communication with FW.
4203  */
4204 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4205 {
4206 	struct fw_bye_cmd c;
4207 
4208 	memset(&c, 0, sizeof(c));
4209 	INIT_CMD(c, BYE, WRITE);
4210 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4211 }
4212 
4213 /**
4214  *	t4_fw_reset - issue a reset to FW
4215  *	@adap: the adapter
4216  *	@mbox: mailbox to use for the FW command
4217  *	@reset: specifies the type of reset to perform
4218  *
4219  *	Issues a reset command of the specified type to FW.
4220  */
4221 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4222 {
4223 	struct fw_reset_cmd c;
4224 
4225 	memset(&c, 0, sizeof(c));
4226 	INIT_CMD(c, RESET, WRITE);
4227 	c.val = htonl(reset);
4228 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4229 }
4230 
4231 /**
4232  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4233  *	@adap: the adapter
4234  *	@mbox: mailbox to use for the FW RESET command (if desired)
4235  *	@force: force uP into RESET even if FW RESET command fails
4236  *
4237  *	Issues a RESET command to firmware (if desired) with a HALT indication
4238  *	and then puts the microprocessor into RESET state.  The RESET command
4239  *	will only be issued if a legitimate mailbox is provided (mbox <=
4240  *	M_PCIE_FW_MASTER).
4241  *
4242  *	This is generally used in order for the host to safely manipulate the
4243  *	adapter without fear of conflicting with whatever the firmware might
4244  *	be doing.  The only way out of this state is to RESTART the firmware
4245  *	...
4246  */
4247 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4248 {
4249 	int ret = 0;
4250 
4251 	/*
4252 	 * If a legitimate mailbox is provided, issue a RESET command
4253 	 * with a HALT indication.
4254 	 */
4255 	if (mbox <= M_PCIE_FW_MASTER) {
4256 		struct fw_reset_cmd c;
4257 
4258 		memset(&c, 0, sizeof(c));
4259 		INIT_CMD(c, RESET, WRITE);
4260 		c.val = htonl(F_PIORST | F_PIORSTMODE);
4261 		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4262 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4263 	}
4264 
4265 	/*
4266 	 * Normally we won't complete the operation if the firmware RESET
4267 	 * command fails but if our caller insists we'll go ahead and put the
4268 	 * uP into RESET.  This can be useful if the firmware is hung or even
4269 	 * missing ...  We'll have to take the risk of putting the uP into
4270 	 * RESET without the cooperation of firmware in that case.
4271 	 *
4272 	 * We also force the firmware's HALT flag to be on in case we bypassed
4273 	 * the firmware RESET command above or we're dealing with old firmware
4274 	 * which doesn't have the HALT capability.  This will serve as a flag
4275 	 * for the incoming firmware to know that it's coming out of a HALT
4276 	 * rather than a RESET ... if it's new enough to understand that ...
4277 	 */
4278 	if (ret == 0 || force) {
4279 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4280 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4281 	}
4282 
4283 	/*
4284 	 * And we always return the result of the firmware RESET command
4285 	 * even when we force the uP into RESET ...
4286 	 */
4287 	return ret;
4288 }
4289 
4290 /**
4291  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4292  *	@adap: the adapter
4293  *	@reset: if we want to do a RESET to restart things
4294  *
4295  *	Restart firmware previously halted by t4_fw_halt().  On successful
4296  *	return the previous PF Master remains as the new PF Master and there
4297  *	is no need to issue a new HELLO command, etc.
4298  *
4299  *	We do this in two ways:
4300  *
4301  *	 1. If we're dealing with newer firmware we'll simply want to take
4302  *	    the chip's microprocessor out of RESET.  This will cause the
4303  *	    firmware to start up from its start vector.  And then we'll loop
4304  *	    until the firmware indicates it's started again (PCIE_FW.HALT
4305  *	    reset to 0) or we timeout.
4306  *
4307  *	 2. If we're dealing with older firmware then we'll need to RESET
4308  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4309  *	    flag and automatically RESET itself on startup.
4310  */
4311 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4312 {
4313 	if (reset) {
4314 		/*
4315 		 * Since we're directing the RESET instead of the firmware
4316 		 * doing it automatically, we need to clear the PCIE_FW.HALT
4317 		 * bit.
4318 		 */
4319 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4320 
4321 		/*
4322 		 * If we've been given a valid mailbox, first try to get the
4323 		 * firmware to do the RESET.  If that works, great and we can
4324 		 * return success.  Otherwise, if we haven't been given a
4325 		 * valid mailbox or the RESET command failed, fall back to
4326 		 * hitting the chip with a hammer.
4327 		 */
4328 		if (mbox <= M_PCIE_FW_MASTER) {
4329 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4330 			msleep(100);
4331 			if (t4_fw_reset(adap, mbox,
4332 					F_PIORST | F_PIORSTMODE) == 0)
4333 				return 0;
4334 		}
4335 
4336 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4337 		msleep(2000);
4338 	} else {
4339 		int ms;
4340 
4341 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4342 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4343 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4344 				return FW_SUCCESS;
4345 			msleep(100);
4346 			ms += 100;
4347 		}
4348 		return -ETIMEDOUT;
4349 	}
4350 	return 0;
4351 }
4352 
4353 /**
4354  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4355  *	@adap: the adapter
4356  *	@mbox: mailbox to use for the FW RESET command (if desired)
4357  *	@fw_data: the firmware image to write
4358  *	@size: image size
4359  *	@force: force upgrade even if firmware doesn't cooperate
4360  *
4361  *	Perform all of the steps necessary for upgrading an adapter's
4362  *	firmware image.  Normally this requires the cooperation of the
4363  *	existing firmware in order to halt all existing activities
4364  *	but if an invalid mailbox token is passed in we skip that step
4365  *	(though we'll still put the adapter microprocessor into RESET in
4366  *	that case).
4367  *
4368  *	On successful return the new firmware will have been loaded and
4369  *	the adapter will have been fully RESET losing all previous setup
4370  *	state.  On unsuccessful return the adapter may be completely hosed ...
4371  *	positive errno indicates that the adapter is ~probably~ intact, a
4372  *	negative errno indicates that things are looking bad ...
4373  */
4374 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4375 		  const u8 *fw_data, unsigned int size, int force)
4376 {
4377 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4378 	int reset, ret;
4379 
4380 	ret = t4_fw_halt(adap, mbox, force);
4381 	if (ret < 0 && !force)
4382 		return ret;
4383 
4384 	ret = t4_load_fw(adap, fw_data, size);
4385 	if (ret < 0)
4386 		return ret;
4387 
4388 	/*
4389 	 * Older versions of the firmware don't understand the new
4390 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4391 	 * restart.  So for newly loaded older firmware we'll have to do the
4392 	 * RESET for it so it starts up on a clean slate.  We can tell if
4393 	 * the newly loaded firmware will handle this right by checking
4394 	 * its header flags to see if it advertises the capability.
4395 	 */
4396 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4397 	return t4_fw_restart(adap, mbox, reset);
4398 }
4399 
4400 /**
4401  *	t4_fw_initialize - ask FW to initialize the device
4402  *	@adap: the adapter
4403  *	@mbox: mailbox to use for the FW command
4404  *
4405  *	Issues a command to FW to partially initialize the device.  This
4406  *	performs initialization that generally doesn't depend on user input.
4407  */
4408 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4409 {
4410 	struct fw_initialize_cmd c;
4411 
4412 	memset(&c, 0, sizeof(c));
4413 	INIT_CMD(c, INITIALIZE, WRITE);
4414 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4415 }
4416 
4417 /**
4418  *	t4_query_params - query FW or device parameters
4419  *	@adap: the adapter
4420  *	@mbox: mailbox to use for the FW command
4421  *	@pf: the PF
4422  *	@vf: the VF
4423  *	@nparams: the number of parameters
4424  *	@params: the parameter names
4425  *	@val: the parameter values
4426  *
4427  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4428  *	queried at once.
4429  */
4430 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4431 		    unsigned int vf, unsigned int nparams, const u32 *params,
4432 		    u32 *val)
4433 {
4434 	int i, ret;
4435 	struct fw_params_cmd c;
4436 	__be32 *p = &c.param[0].mnem;
4437 
4438 	if (nparams > 7)
4439 		return -EINVAL;
4440 
4441 	memset(&c, 0, sizeof(c));
4442 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4443 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4444 			    V_FW_PARAMS_CMD_VFN(vf));
4445 	c.retval_len16 = htonl(FW_LEN16(c));
4446 
4447 	for (i = 0; i < nparams; i++, p += 2)
4448 		*p = htonl(*params++);
4449 
4450 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4451 	if (ret == 0)
4452 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4453 			*val++ = ntohl(*p);
4454 	return ret;
4455 }
4456 
4457 /**
4458  *	t4_set_params - sets FW or device parameters
4459  *	@adap: the adapter
4460  *	@mbox: mailbox to use for the FW command
4461  *	@pf: the PF
4462  *	@vf: the VF
4463  *	@nparams: the number of parameters
4464  *	@params: the parameter names
4465  *	@val: the parameter values
4466  *
4467  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4468  *	specified at once.
4469  */
4470 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4471 		  unsigned int vf, unsigned int nparams, const u32 *params,
4472 		  const u32 *val)
4473 {
4474 	struct fw_params_cmd c;
4475 	__be32 *p = &c.param[0].mnem;
4476 
4477 	if (nparams > 7)
4478 		return -EINVAL;
4479 
4480 	memset(&c, 0, sizeof(c));
4481 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4482 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4483 			    V_FW_PARAMS_CMD_VFN(vf));
4484 	c.retval_len16 = htonl(FW_LEN16(c));
4485 
4486 	while (nparams--) {
4487 		*p++ = htonl(*params++);
4488 		*p++ = htonl(*val++);
4489 	}
4490 
4491 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4492 }
4493 
4494 /**
4495  *	t4_cfg_pfvf - configure PF/VF resource limits
4496  *	@adap: the adapter
4497  *	@mbox: mailbox to use for the FW command
4498  *	@pf: the PF being configured
4499  *	@vf: the VF being configured
4500  *	@txq: the max number of egress queues
4501  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4502  *	@rxqi: the max number of interrupt-capable ingress queues
4503  *	@rxq: the max number of interruptless ingress queues
4504  *	@tc: the PCI traffic class
4505  *	@vi: the max number of virtual interfaces
4506  *	@cmask: the channel access rights mask for the PF/VF
4507  *	@pmask: the port access rights mask for the PF/VF
4508  *	@nexact: the maximum number of exact MPS filters
4509  *	@rcaps: read capabilities
4510  *	@wxcaps: write/execute capabilities
4511  *
4512  *	Configures resource limits and capabilities for a physical or virtual
4513  *	function.
4514  */
4515 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4516 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4517 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4518 		unsigned int vi, unsigned int cmask, unsigned int pmask,
4519 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4520 {
4521 	struct fw_pfvf_cmd c;
4522 
4523 	memset(&c, 0, sizeof(c));
4524 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4525 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4526 			    V_FW_PFVF_CMD_VFN(vf));
4527 	c.retval_len16 = htonl(FW_LEN16(c));
4528 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4529 			       V_FW_PFVF_CMD_NIQ(rxq));
4530 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4531 			      V_FW_PFVF_CMD_PMASK(pmask) |
4532 			      V_FW_PFVF_CMD_NEQ(txq));
4533 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4534 				V_FW_PFVF_CMD_NEXACTF(nexact));
4535 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4536 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4537 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4538 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4539 }
4540 
4541 /**
4542  *	t4_alloc_vi_func - allocate a virtual interface
4543  *	@adap: the adapter
4544  *	@mbox: mailbox to use for the FW command
4545  *	@port: physical port associated with the VI
4546  *	@pf: the PF owning the VI
4547  *	@vf: the VF owning the VI
4548  *	@nmac: number of MAC addresses needed (1 to 5)
4549  *	@mac: the MAC addresses of the VI
4550  *	@rss_size: size of RSS table slice associated with this VI
4551  *	@portfunc: which Port Application Function MAC Address is desired
4552  *	@idstype: Intrusion Detection Type
4553  *
4554  *	Allocates a virtual interface for the given physical port.  If @mac is
4555  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4556  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4557  *	stored consecutively so the space needed is @nmac * 6 bytes.
4558  *	Returns a negative error number or the non-negative VI id.
4559  */
4560 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4561 		     unsigned int port, unsigned int pf, unsigned int vf,
4562 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4563 		     unsigned int portfunc, unsigned int idstype)
4564 {
4565 	int ret;
4566 	struct fw_vi_cmd c;
4567 
4568 	memset(&c, 0, sizeof(c));
4569 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4570 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4571 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4572 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4573 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4574 			       V_FW_VI_CMD_FUNC(portfunc));
4575 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4576 	c.nmac = nmac - 1;
4577 
4578 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4579 	if (ret)
4580 		return ret;
4581 
4582 	if (mac) {
4583 		memcpy(mac, c.mac, sizeof(c.mac));
4584 		switch (nmac) {
4585 		case 5:
4586 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4587 		case 4:
4588 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4589 		case 3:
4590 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4591 		case 2:
4592 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4593 		}
4594 	}
4595 	if (rss_size)
4596 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
4597 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4598 }
4599 
4600 /**
4601  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4602  *	@adap: the adapter
4603  *	@mbox: mailbox to use for the FW command
4604  *	@port: physical port associated with the VI
4605  *	@pf: the PF owning the VI
4606  *	@vf: the VF owning the VI
4607  *	@nmac: number of MAC addresses needed (1 to 5)
4608  *	@mac: the MAC addresses of the VI
4609  *	@rss_size: size of RSS table slice associated with this VI
4610  *
4611  *	backwards compatible and convieniance routine to allocate a Virtual
4612  *	Interface with a Ethernet Port Application Function and Intrustion
4613  *	Detection System disabled.
4614  */
4615 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4616 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4617 		unsigned int *rss_size)
4618 {
4619 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4620 				FW_VI_FUNC_ETH, 0);
4621 }
4622 
4623 /**
4624  *	t4_free_vi - free a virtual interface
4625  *	@adap: the adapter
4626  *	@mbox: mailbox to use for the FW command
4627  *	@pf: the PF owning the VI
4628  *	@vf: the VF owning the VI
4629  *	@viid: virtual interface identifiler
4630  *
4631  *	Free a previously allocated virtual interface.
4632  */
4633 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4634 	       unsigned int vf, unsigned int viid)
4635 {
4636 	struct fw_vi_cmd c;
4637 
4638 	memset(&c, 0, sizeof(c));
4639 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4640 			    F_FW_CMD_REQUEST |
4641 			    F_FW_CMD_EXEC |
4642 			    V_FW_VI_CMD_PFN(pf) |
4643 			    V_FW_VI_CMD_VFN(vf));
4644 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4645 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4646 
4647 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4648 }
4649 
4650 /**
4651  *	t4_set_rxmode - set Rx properties of a virtual interface
4652  *	@adap: the adapter
4653  *	@mbox: mailbox to use for the FW command
4654  *	@viid: the VI id
4655  *	@mtu: the new MTU or -1
4656  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4657  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4658  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4659  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4660  *	@sleep_ok: if true we may sleep while awaiting command completion
4661  *
4662  *	Sets Rx properties of a virtual interface.
4663  */
4664 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4665 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4666 		  bool sleep_ok)
4667 {
4668 	struct fw_vi_rxmode_cmd c;
4669 
4670 	/* convert to FW values */
4671 	if (mtu < 0)
4672 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4673 	if (promisc < 0)
4674 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4675 	if (all_multi < 0)
4676 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4677 	if (bcast < 0)
4678 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4679 	if (vlanex < 0)
4680 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4681 
4682 	memset(&c, 0, sizeof(c));
4683 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4684 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4685 	c.retval_len16 = htonl(FW_LEN16(c));
4686 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4687 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4688 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4689 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4690 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4691 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4692 }
4693 
4694 /**
4695  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4696  *	@adap: the adapter
4697  *	@mbox: mailbox to use for the FW command
4698  *	@viid: the VI id
4699  *	@free: if true any existing filters for this VI id are first removed
4700  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4701  *	@addr: the MAC address(es)
4702  *	@idx: where to store the index of each allocated filter
4703  *	@hash: pointer to hash address filter bitmap
4704  *	@sleep_ok: call is allowed to sleep
4705  *
4706  *	Allocates an exact-match filter for each of the supplied addresses and
4707  *	sets it to the corresponding address.  If @idx is not %NULL it should
4708  *	have at least @naddr entries, each of which will be set to the index of
4709  *	the filter allocated for the corresponding MAC address.  If a filter
4710  *	could not be allocated for an address its index is set to 0xffff.
4711  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4712  *	are hashed and update the hash filter bitmap pointed at by @hash.
4713  *
4714  *	Returns a negative error number or the number of filters allocated.
4715  */
4716 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4717 		      unsigned int viid, bool free, unsigned int naddr,
4718 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4719 {
4720 	int offset, ret = 0;
4721 	struct fw_vi_mac_cmd c;
4722 	unsigned int nfilters = 0;
4723 	unsigned int rem = naddr;
4724 
4725 	if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
4726 		return -EINVAL;
4727 
4728 	for (offset = 0; offset < naddr ; /**/) {
4729 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4730 					 ? rem
4731 					 : ARRAY_SIZE(c.u.exact));
4732 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4733 						     u.exact[fw_naddr]), 16);
4734 		struct fw_vi_mac_exact *p;
4735 		int i;
4736 
4737 		memset(&c, 0, sizeof(c));
4738 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4739 				     F_FW_CMD_REQUEST |
4740 				     F_FW_CMD_WRITE |
4741 				     V_FW_CMD_EXEC(free) |
4742 				     V_FW_VI_MAC_CMD_VIID(viid));
4743 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4744 					    V_FW_CMD_LEN16(len16));
4745 
4746 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4747 			p->valid_to_idx = htons(
4748 				F_FW_VI_MAC_CMD_VALID |
4749 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4750 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4751 		}
4752 
4753 		/*
4754 		 * It's okay if we run out of space in our MAC address arena.
4755 		 * Some of the addresses we submit may get stored so we need
4756 		 * to run through the reply to see what the results were ...
4757 		 */
4758 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4759 		if (ret && ret != -FW_ENOMEM)
4760 			break;
4761 
4762 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4763 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4764 
4765 			if (idx)
4766 				idx[offset+i] = (index >= NUM_MPS_CLS_SRAM_L_INSTANCES
4767 						 ? 0xffff
4768 						 : index);
4769 			if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
4770 				nfilters++;
4771 			else if (hash)
4772 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4773 		}
4774 
4775 		free = false;
4776 		offset += fw_naddr;
4777 		rem -= fw_naddr;
4778 	}
4779 
4780 	if (ret == 0 || ret == -FW_ENOMEM)
4781 		ret = nfilters;
4782 	return ret;
4783 }
4784 
4785 /**
4786  *	t4_change_mac - modifies the exact-match filter for a MAC address
4787  *	@adap: the adapter
4788  *	@mbox: mailbox to use for the FW command
4789  *	@viid: the VI id
4790  *	@idx: index of existing filter for old value of MAC address, or -1
4791  *	@addr: the new MAC address value
4792  *	@persist: whether a new MAC allocation should be persistent
4793  *	@add_smt: if true also add the address to the HW SMT
4794  *
4795  *	Modifies an exact-match filter and sets it to the new MAC address if
4796  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4797  *	latter case the address is added persistently if @persist is %true.
4798  *
4799  *	Note that in general it is not possible to modify the value of a given
4800  *	filter so the generic way to modify an address filter is to free the one
4801  *	being used by the old address value and allocate a new filter for the
4802  *	new address value.
4803  *
4804  *	Returns a negative error number or the index of the filter with the new
4805  *	MAC value.  Note that this index may differ from @idx.
4806  */
4807 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4808 		  int idx, const u8 *addr, bool persist, bool add_smt)
4809 {
4810 	int ret, mode;
4811 	struct fw_vi_mac_cmd c;
4812 	struct fw_vi_mac_exact *p = c.u.exact;
4813 
4814 	if (idx < 0)                             /* new allocation */
4815 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4816 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4817 
4818 	memset(&c, 0, sizeof(c));
4819 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4820 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4821 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4822 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4823 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4824 				V_FW_VI_MAC_CMD_IDX(idx));
4825 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
4826 
4827 	ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
4828 	if (ret == 0) {
4829 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4830 		if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
4831 			ret = -ENOMEM;
4832 	}
4833 	return ret;
4834 }
4835 
4836 /**
4837  *	t4_set_addr_hash - program the MAC inexact-match hash filter
4838  *	@adap: the adapter
4839  *	@mbox: mailbox to use for the FW command
4840  *	@viid: the VI id
4841  *	@ucast: whether the hash filter should also match unicast addresses
4842  *	@vec: the value to be written to the hash filter
4843  *	@sleep_ok: call is allowed to sleep
4844  *
4845  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
4846  */
4847 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4848 		     bool ucast, u64 vec, bool sleep_ok)
4849 {
4850 	struct fw_vi_mac_cmd c;
4851 
4852 	memset(&c, 0, sizeof(c));
4853 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4854 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4855 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4856 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4857 				    V_FW_CMD_LEN16(1));
4858 	c.u.hash.hashvec = cpu_to_be64(vec);
4859 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4860 }
4861 
4862 /**
4863  *	t4_enable_vi - enable/disable a virtual interface
4864  *	@adap: the adapter
4865  *	@mbox: mailbox to use for the FW command
4866  *	@viid: the VI id
4867  *	@rx_en: 1=enable Rx, 0=disable Rx
4868  *	@tx_en: 1=enable Tx, 0=disable Tx
4869  *
4870  *	Enables/disables a virtual interface.
4871  */
4872 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4873 		 bool rx_en, bool tx_en)
4874 {
4875 	struct fw_vi_enable_cmd c;
4876 
4877 	memset(&c, 0, sizeof(c));
4878 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4879 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4880 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4881 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4882 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4883 }
4884 
4885 /**
4886  *	t4_identify_port - identify a VI's port by blinking its LED
4887  *	@adap: the adapter
4888  *	@mbox: mailbox to use for the FW command
4889  *	@viid: the VI id
4890  *	@nblinks: how many times to blink LED at 2.5 Hz
4891  *
4892  *	Identifies a VI's port by blinking its LED.
4893  */
4894 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4895 		     unsigned int nblinks)
4896 {
4897 	struct fw_vi_enable_cmd c;
4898 
4899 	memset(&c, 0, sizeof(c));
4900 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4901 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4902 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4903 	c.blinkdur = htons(nblinks);
4904 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4905 }
4906 
4907 /**
4908  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
4909  *	@adap: the adapter
4910  *	@mbox: mailbox to use for the FW command
4911  *	@start: %true to enable the queues, %false to disable them
4912  *	@pf: the PF owning the queues
4913  *	@vf: the VF owning the queues
4914  *	@iqid: ingress queue id
4915  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4916  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4917  *
4918  *	Starts or stops an ingress queue and its associated FLs, if any.
4919  */
4920 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4921 		     unsigned int pf, unsigned int vf, unsigned int iqid,
4922 		     unsigned int fl0id, unsigned int fl1id)
4923 {
4924 	struct fw_iq_cmd c;
4925 
4926 	memset(&c, 0, sizeof(c));
4927 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4928 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4929 			    V_FW_IQ_CMD_VFN(vf));
4930 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4931 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4932 	c.iqid = htons(iqid);
4933 	c.fl0id = htons(fl0id);
4934 	c.fl1id = htons(fl1id);
4935 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4936 }
4937 
4938 /**
4939  *	t4_iq_free - free an ingress queue and its FLs
4940  *	@adap: the adapter
4941  *	@mbox: mailbox to use for the FW command
4942  *	@pf: the PF owning the queues
4943  *	@vf: the VF owning the queues
4944  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4945  *	@iqid: ingress queue id
4946  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4947  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4948  *
4949  *	Frees an ingress queue and its associated FLs, if any.
4950  */
4951 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4952 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4953 	       unsigned int fl0id, unsigned int fl1id)
4954 {
4955 	struct fw_iq_cmd c;
4956 
4957 	memset(&c, 0, sizeof(c));
4958 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4959 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4960 			    V_FW_IQ_CMD_VFN(vf));
4961 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4962 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4963 	c.iqid = htons(iqid);
4964 	c.fl0id = htons(fl0id);
4965 	c.fl1id = htons(fl1id);
4966 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4967 }
4968 
4969 /**
4970  *	t4_eth_eq_free - free an Ethernet egress queue
4971  *	@adap: the adapter
4972  *	@mbox: mailbox to use for the FW command
4973  *	@pf: the PF owning the queue
4974  *	@vf: the VF owning the queue
4975  *	@eqid: egress queue id
4976  *
4977  *	Frees an Ethernet egress queue.
4978  */
4979 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4980 		   unsigned int vf, unsigned int eqid)
4981 {
4982 	struct fw_eq_eth_cmd c;
4983 
4984 	memset(&c, 0, sizeof(c));
4985 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4986 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4987 			    V_FW_EQ_ETH_CMD_VFN(vf));
4988 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4989 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4990 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4991 }
4992 
4993 /**
4994  *	t4_ctrl_eq_free - free a control egress queue
4995  *	@adap: the adapter
4996  *	@mbox: mailbox to use for the FW command
4997  *	@pf: the PF owning the queue
4998  *	@vf: the VF owning the queue
4999  *	@eqid: egress queue id
5000  *
5001  *	Frees a control egress queue.
5002  */
5003 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5004 		    unsigned int vf, unsigned int eqid)
5005 {
5006 	struct fw_eq_ctrl_cmd c;
5007 
5008 	memset(&c, 0, sizeof(c));
5009 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5010 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5011 			    V_FW_EQ_CTRL_CMD_VFN(vf));
5012 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5013 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5014 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5015 }
5016 
5017 /**
5018  *	t4_ofld_eq_free - free an offload egress queue
5019  *	@adap: the adapter
5020  *	@mbox: mailbox to use for the FW command
5021  *	@pf: the PF owning the queue
5022  *	@vf: the VF owning the queue
5023  *	@eqid: egress queue id
5024  *
5025  *	Frees a control egress queue.
5026  */
5027 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5028 		    unsigned int vf, unsigned int eqid)
5029 {
5030 	struct fw_eq_ofld_cmd c;
5031 
5032 	memset(&c, 0, sizeof(c));
5033 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5034 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5035 			    V_FW_EQ_OFLD_CMD_VFN(vf));
5036 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5037 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5038 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5039 }
5040 
5041 /**
5042  *	t4_handle_fw_rpl - process a FW reply message
5043  *	@adap: the adapter
5044  *	@rpl: start of the FW message
5045  *
5046  *	Processes a FW message, such as link state change messages.
5047  */
5048 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5049 {
5050 	u8 opcode = *(const u8 *)rpl;
5051 	const struct fw_port_cmd *p = (const void *)rpl;
5052 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5053 
5054 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5055 		/* link/module state change message */
5056 		int speed = 0, fc = 0, i;
5057 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5058 		struct port_info *pi = NULL;
5059 		struct link_config *lc;
5060 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5061 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5062 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5063 
5064 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5065 			fc |= PAUSE_RX;
5066 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5067 			fc |= PAUSE_TX;
5068 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5069 			speed = SPEED_100;
5070 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5071 			speed = SPEED_1000;
5072 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5073 			speed = SPEED_10000;
5074 
5075 		for_each_port(adap, i) {
5076 			pi = adap2pinfo(adap, i);
5077 			if (pi->tx_chan == chan)
5078 				break;
5079 		}
5080 		lc = &pi->link_cfg;
5081 
5082 		if (link_ok != lc->link_ok || speed != lc->speed ||
5083 		    fc != lc->fc) {                    /* something changed */
5084 			lc->link_ok = link_ok;
5085 			lc->speed = speed;
5086 			lc->fc = fc;
5087 			t4_os_link_changed(adap, i, link_ok);
5088 		}
5089 		if (mod != pi->mod_type) {
5090 			pi->mod_type = mod;
5091 			t4_os_portmod_changed(adap, i);
5092 		}
5093 	} else {
5094 		CH_WARN_RATELIMIT(adap,
5095 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5096 		return -EINVAL;
5097 	}
5098 	return 0;
5099 }
5100 
5101 /**
5102  *	get_pci_mode - determine a card's PCI mode
5103  *	@adapter: the adapter
5104  *	@p: where to store the PCI settings
5105  *
5106  *	Determines a card's PCI mode and associated parameters, such as speed
5107  *	and width.
5108  */
5109 static void __devinit get_pci_mode(struct adapter *adapter,
5110 				   struct pci_params *p)
5111 {
5112 	u16 val;
5113 	u32 pcie_cap;
5114 
5115 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5116 	if (pcie_cap) {
5117 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5118 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5119 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5120 	}
5121 }
5122 
5123 /**
5124  *	init_link_config - initialize a link's SW state
5125  *	@lc: structure holding the link state
5126  *	@caps: link capabilities
5127  *
5128  *	Initializes the SW state maintained for each link, including the link's
5129  *	capabilities and default speed/flow-control/autonegotiation settings.
5130  */
5131 static void __devinit init_link_config(struct link_config *lc,
5132 				       unsigned int caps)
5133 {
5134 	lc->supported = caps;
5135 	lc->requested_speed = 0;
5136 	lc->speed = 0;
5137 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5138 	if (lc->supported & FW_PORT_CAP_ANEG) {
5139 		lc->advertising = lc->supported & ADVERT_MASK;
5140 		lc->autoneg = AUTONEG_ENABLE;
5141 		lc->requested_fc |= PAUSE_AUTONEG;
5142 	} else {
5143 		lc->advertising = 0;
5144 		lc->autoneg = AUTONEG_DISABLE;
5145 	}
5146 }
5147 
5148 static int __devinit wait_dev_ready(struct adapter *adap)
5149 {
5150 	u32 whoami;
5151 
5152 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
5153 
5154 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
5155 		return 0;
5156 
5157 	msleep(500);
5158 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
5159 	return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
5160 		? 0 : -EIO);
5161 }
5162 
5163 static int __devinit get_flash_params(struct adapter *adapter)
5164 {
5165 	int ret;
5166 	u32 info = 0;
5167 
5168 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5169 	if (!ret)
5170 		ret = sf1_read(adapter, 3, 0, 1, &info);
5171 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5172 	if (ret < 0)
5173 		return ret;
5174 
5175 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5176 		return -EINVAL;
5177 	info >>= 16;                           /* log2 of size */
5178 	if (info >= 0x14 && info < 0x18)
5179 		adapter->params.sf_nsec = 1 << (info - 16);
5180 	else if (info == 0x18)
5181 		adapter->params.sf_nsec = 64;
5182 	else
5183 		return -EINVAL;
5184 	adapter->params.sf_size = 1 << info;
5185 	return 0;
5186 }
5187 
5188 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5189 						  u8 range)
5190 {
5191 	u16 val;
5192 	u32 pcie_cap;
5193 
5194 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5195 	if (pcie_cap) {
5196 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5197 		val &= 0xfff0;
5198 		val |= range ;
5199 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5200 	}
5201 }
5202 
5203 /**
5204  *	t4_prep_adapter - prepare SW and HW for operation
5205  *	@adapter: the adapter
5206  *	@reset: if true perform a HW reset
5207  *
5208  *	Initialize adapter SW state for the various HW modules, set initial
5209  *	values for some adapter tunables, take PHYs out of reset, and
5210  *	initialize the MDIO interface.
5211  */
5212 int __devinit t4_prep_adapter(struct adapter *adapter)
5213 {
5214 	int ret;
5215 
5216 	ret = wait_dev_ready(adapter);
5217 	if (ret < 0)
5218 		return ret;
5219 
5220 	get_pci_mode(adapter, &adapter->params.pci);
5221 
5222 	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
5223 	/* T4A1 chip is no longer supported */
5224 	if (adapter->params.rev == 1) {
5225 		CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
5226 		return -EINVAL;
5227 	}
5228 	adapter->params.pci.vpd_cap_addr =
5229 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5230 
5231 	ret = get_flash_params(adapter);
5232 	if (ret < 0)
5233 		return ret;
5234 
5235 	ret = get_vpd_params(adapter, &adapter->params.vpd);
5236 	if (ret < 0)
5237 		return ret;
5238 
5239 	if (t4_read_reg(adapter, A_PCIE_REVISION) != 0) {
5240 		/* FPGA */
5241 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5242 	} else {
5243 		/* ASIC */
5244 		adapter->params.cim_la_size = CIMLA_SIZE;
5245 	}
5246 
5247 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5248 
5249 	/*
5250 	 * Default port and clock for debugging in case we can't reach FW.
5251 	 */
5252 	adapter->params.nports = 1;
5253 	adapter->params.portvec = 1;
5254 	adapter->params.vpd.cclk = 50000;
5255 
5256 	/* Set pci completion timeout value to 4 seconds. */
5257 	set_pcie_completion_timeout(adapter, 0xd);
5258 	return 0;
5259 }
5260 
5261 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5262 {
5263 	u8 addr[6];
5264 	int ret, i, j;
5265 	struct fw_port_cmd c;
5266 	unsigned int rss_size;
5267 	adapter_t *adap = p->adapter;
5268 
5269 	memset(&c, 0, sizeof(c));
5270 
5271 	for (i = 0, j = -1; i <= p->port_id; i++) {
5272 		do {
5273 			j++;
5274 		} while ((adap->params.portvec & (1 << j)) == 0);
5275 	}
5276 
5277 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5278 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5279 			       V_FW_PORT_CMD_PORTID(j));
5280 	c.action_to_len16 = htonl(
5281 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5282 		FW_LEN16(c));
5283 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5284 	if (ret)
5285 		return ret;
5286 
5287 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5288 	if (ret < 0)
5289 		return ret;
5290 
5291 	p->viid = ret;
5292 	p->tx_chan = j;
5293 	p->lport = j;
5294 	p->rss_size = rss_size;
5295 	t4_os_set_hw_addr(adap, p->port_id, addr);
5296 
5297 	ret = ntohl(c.u.info.lstatus_to_modtype);
5298 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5299 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5300 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5301 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5302 
5303 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5304 
5305 	return 0;
5306 }
5307 
5308 int t4_config_scheduler(struct adapter *adapter, int mode, int level,
5309 			int pktsize, int sched_class, int port, int unit,
5310 			int rate, int weight, int minrate, int maxrate)
5311 {
5312 	struct fw_sched_cmd cmd, rpl;
5313 
5314 	if (rate < 0 || unit < 0)
5315 		return -EINVAL;
5316 
5317 	memset(&cmd, 0, sizeof(cmd));
5318 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5319 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5320 	cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(sizeof(cmd)/16));
5321 
5322 	cmd.u.params.sc = 1;
5323 	cmd.u.params.level = level;
5324 	cmd.u.params.mode = mode;
5325 	cmd.u.params.ch = port;
5326 	cmd.u.params.cl = sched_class;
5327 	cmd.u.params.rate = rate;
5328 	cmd.u.params.unit = unit;
5329 
5330  	switch (level) {
5331 		case FW_SCHED_PARAMS_LEVEL_CH_WRR:
5332 		case FW_SCHED_PARAMS_LEVEL_CL_WRR:
5333 			cmd.u.params.weight = cpu_to_be16(weight);
5334 			break;
5335 		case FW_SCHED_PARAMS_LEVEL_CH_RL:
5336 		case FW_SCHED_PARAMS_LEVEL_CL_RL:
5337 			cmd.u.params.max = cpu_to_be32(maxrate);
5338 			cmd.u.params.min = cpu_to_be32(minrate);
5339 			cmd.u.params.pktsize = cpu_to_be16(pktsize);
5340 			break;
5341 		default:
5342 			return -EINVAL;
5343 	}
5344 
5345 	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl, 1);
5346 }
5347