xref: /illumos-gate/usr/src/uts/common/io/cxgbe/common/t4_hw.c (revision 856f710c9dc323b39da5935194d7928ffb99b67f)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4/T5/T6 Ethernet driver.
14  *
15  * Copyright (C) 2003-2017 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 #include "common.h"
24 #include "t4_regs.h"
25 #include "t4_regs_values.h"
26 #include "t4fw_interface.h"
27 
28 /**
29  *	t4_wait_op_done_val - wait until an operation is completed
30  *	@adapter: the adapter performing the operation
31  *	@reg: the register to check for completion
32  *	@mask: a single-bit field within @reg that indicates completion
33  *	@polarity: the value of the field when the operation is completed
34  *	@attempts: number of check iterations
35  *	@delay: delay in usecs between iterations
36  *	@valp: where to store the value of the register at completion time
37  *
38  *	Wait until an operation is completed by checking a bit in a register
39  *	up to @attempts times.  If @valp is not NULL the value of the register
40  *	at the time it indicated completion is stored there.  Returns 0 if the
41  *	operation completes and	-EAGAIN	otherwise.
42  */
43 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
44 			       int polarity, int attempts, int delay, u32 *valp)
45 {
46 	while (1) {
47 		u32 val = t4_read_reg(adapter, reg);
48 
49 		if (!!(val & mask) == polarity) {
50 			if (valp)
51 				*valp = val;
52 			return 0;
53 		}
54 		if (--attempts == 0)
55 			return -EAGAIN;
56 		if (delay)
57 			udelay(delay);
58 	}
59 }
60 
61 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
62 				  int polarity, int attempts, int delay)
63 {
64 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
65 				   delay, NULL);
66 }
67 
68 /**
69  *	t4_set_reg_field - set a register field to a value
70  *	@adapter: the adapter to program
71  *	@addr: the register address
72  *	@mask: specifies the portion of the register to modify
73  *	@val: the new value for the register field
74  *
75  *	Sets a register field specified by the supplied mask to the
76  *	given value.
77  */
78 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
79 		      u32 val)
80 {
81 	u32 v = t4_read_reg(adapter, addr) & ~mask;
82 
83 	t4_write_reg(adapter, addr, v | val);
84 	(void) t4_read_reg(adapter, addr);      /* flush */
85 }
86 
87 /**
88  *	t4_read_indirect - read indirectly addressed registers
89  *	@adap: the adapter
90  *	@addr_reg: register holding the indirect address
91  *	@data_reg: register holding the value of the indirect register
92  *	@vals: where the read register values are stored
93  *	@nregs: how many indirect registers to read
94  *	@start_idx: index of first indirect register to read
95  *
96  *	Reads registers that are accessed indirectly through an address/data
97  *	register pair.
98  */
99 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
100 			     unsigned int data_reg, u32 *vals,
101 			     unsigned int nregs, unsigned int start_idx)
102 {
103 	while (nregs--) {
104 		t4_write_reg(adap, addr_reg, start_idx);
105 		*vals++ = t4_read_reg(adap, data_reg);
106 		start_idx++;
107 	}
108 }
109 
110 /**
111  *	t4_write_indirect - write indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect addresses
114  *	@data_reg: register holding the value for the indirect registers
115  *	@vals: values to write
116  *	@nregs: how many indirect registers to write
117  *	@start_idx: address of first indirect register to write
118  *
119  *	Writes a sequential block of registers that are accessed indirectly
120  *	through an address/data register pair.
121  */
122 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
123 		       unsigned int data_reg, const u32 *vals,
124 		       unsigned int nregs, unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t4_write_reg(adap, addr_reg, start_idx++);
128 		t4_write_reg(adap, data_reg, *vals++);
129 	}
130 }
131 
132 /*
133  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
134  * mechanism.  This guarantees that we get the real value even if we're
135  * operating within a Virtual Machine and the Hypervisor is trapping our
136  * Configuration Space accesses.
137  *
138  * N.B. This routine should only be used as a last resort: the firmware uses
139  *      the backdoor registers on a regular basis and we can end up
140  *      conflicting with it's uses!
141  */
142 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
143 {
144 	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
145 
146 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
147 		req |= F_ENABLE;
148 	else
149 		req |= F_T6_ENABLE;
150 
151 	if (is_t4(adap->params.chip))
152 		req |= F_LOCALCFG;
153 
154 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
155 	*val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
156 
157 	/* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
158 	 * Configuration Space read.  (None of the other fields matter when
159 	 * F_ENABLE is 0 so a simple register write is easier than a
160 	 * read-modify-write via t4_set_reg_field().)
161 	 */
162 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
163 }
164 
165 /*
166  * t4_report_fw_error - report firmware error
167  * @adap: the adapter
168  *
169  * The adapter firmware can indicate error conditions to the host.
170  * If the firmware has indicated an error, print out the reason for
171  * the firmware error.
172  */
173 static void t4_report_fw_error(struct adapter *adap)
174 {
175 	static const char *const reason[] = {
176 		"Crash",			/* PCIE_FW_EVAL_CRASH */
177 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
178 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
179 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
180 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
181 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
182 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
183 		"Reserved",			/* reserved */
184 	};
185 	u32 pcie_fw;
186 
187 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
188 	if (pcie_fw & F_PCIE_FW_ERR)
189 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
190 			reason[G_PCIE_FW_EVAL(pcie_fw)]);
191 }
192 
193 /*
194  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
195  */
196 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
197 			 u32 mbox_addr)
198 {
199 	for ( ; nflit; nflit--, mbox_addr += 8)
200 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
201 }
202 
203 /*
204  * Handle a FW assertion reported in a mailbox.
205  */
206 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
207 {
208 	CH_ALERT(adap,
209 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
210 		  asrt->u.assert.filename_0_7,
211 		  be32_to_cpu(asrt->u.assert.line),
212 		  be32_to_cpu(asrt->u.assert.x),
213 		  be32_to_cpu(asrt->u.assert.y));
214 }
215 
216 #define X_CIM_PF_NOACCESS 0xeeeeeeee
217 
218 /*
219  * If the Host OS Driver needs locking arround accesses to the mailbox, this
220  * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
221  */
222 /* makes single-statement usage a bit cleaner ... */
223 #ifdef T4_OS_NEEDS_MBOX_LOCKING
224 #define T4_OS_MBOX_LOCKING(x) x
225 #else
226 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
227 #endif
228 
229 /*
230  * If the OS Driver wants busy waits to keep a watchdog happy, tap it during
231  * busy loops which don't sleep.
232  */
233 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG
234 #define T4_OS_TOUCH_NMI_WATCHDOG()	t4_os_touch_nmi_watchdog()
235 #else
236 #define T4_OS_TOUCH_NMI_WATCHDOG()
237 #endif
238 
239 #ifdef T4_OS_LOG_MBOX_CMDS
240 /**
241  *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
242  *	@adapter: the adapter
243  *	@cmd: the Firmware Mailbox Command or Reply
244  *	@size: command length in bytes
245  *	@access: the time (ms) needed to access the Firmware Mailbox
246  *	@execute: the time (ms) the command spent being executed
247  */
248 static void t4_record_mbox(struct adapter *adapter,
249 			   const __be64 *cmd, unsigned int size,
250 			   int access, int execute)
251 {
252 	struct mbox_cmd_log *log = adapter->mbox_log;
253 	struct mbox_cmd *entry;
254 	int i;
255 
256 	entry = mbox_cmd_log_entry(log, log->cursor++);
257 	if (log->cursor == log->size)
258 		log->cursor = 0;
259 
260 	for (i = 0; i < size/8; i++)
261 		entry->cmd[i] = be64_to_cpu(cmd[i]);
262 	while (i < MBOX_LEN/8)
263 		entry->cmd[i++] = 0;
264 	entry->timestamp = t4_os_timestamp();
265 	entry->seqno = log->seqno++;
266 	entry->access = access;
267 	entry->execute = execute;
268 }
269 
270 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
271 	t4_record_mbox(__adapter, __cmd, __size, __access, __execute)
272 
273 #else /* !T4_OS_LOG_MBOX_CMDS */
274 
275 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
276 	/* nothing */
277 
278 #endif /* !T4_OS_LOG_MBOX_CMDS */
279 
280 /**
281  *	t4_record_mbox_marker - record a marker in the mailbox log
282  *	@adapter: the adapter
283  *	@marker: byte array marker
284  *	@size: marker size in bytes
285  *
286  *	We inject a "fake mailbox command" into the Firmware Mailbox Log
287  *	using a known command token and then the bytes of the specified
288  *	marker.  This lets debugging code inject markers into the log to
289  *	help identify which commands are in response to higher level code.
290  */
291 void t4_record_mbox_marker(struct adapter *adapter,
292 			   const void *marker, unsigned int size)
293 {
294 #ifdef T4_OS_LOG_MBOX_CMDS
295 	__be64 marker_cmd[MBOX_LEN/8];
296 	const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64);
297 	unsigned int marker_cmd_size;
298 
299 	if (size > max_marker)
300 		size = max_marker;
301 
302 	marker_cmd[0] = cpu_to_be64(~0LLU);
303 	memcpy(&marker_cmd[1], marker, size);
304 	memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size);
305 	marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64));
306 
307 	t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0);
308 #endif /* T4_OS_LOG_MBOX_CMDS */
309 }
310 
311 /*
312  * Delay time in microseconds to wait for mailbox access/fw reply
313  * to mailbox command
314  */
315 #define MIN_MBOX_CMD_DELAY 900
316 #define MBOX_CMD_DELAY 1000
317 
318 /**
319  *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
320  *	@adap: the adapter
321  *	@mbox: index of the mailbox to use
322  *	@cmd: the command to write
323  *	@size: command length in bytes
324  *	@rpl: where to optionally store the reply
325  *	@sleep_ok: if true we may sleep while awaiting command completion
326  *	@timeout: time to wait for command to finish before timing out
327  *		(negative implies @sleep_ok=false)
328  *
329  *	Sends the given command to FW through the selected mailbox and waits
330  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
331  *	store the FW's reply to the command.  The command and its optional
332  *	reply are of the same length.  Some FW commands like RESET and
333  *	INITIALIZE can take a considerable amount of time to execute.
334  *	@sleep_ok determines whether we may sleep while awaiting the response.
335  *	If sleeping is allowed we use progressive backoff otherwise we spin.
336  *	Note that passing in a negative @timeout is an alternate mechanism
337  *	for specifying @sleep_ok=false.  This is useful when a higher level
338  *	interface allows for specification of @timeout but not @sleep_ok ...
339  *
340  *	The return value is 0 on success or a negative errno on failure.  A
341  *	failure can happen either because we are not able to execute the
342  *	command or FW executes it but signals an error.  In the latter case
343  *	the return value is the error code indicated by FW (negated).
344  */
345 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
346 			    int size, void *rpl, bool sleep_ok, int timeout)
347 {
348 #ifdef T4_OS_NEEDS_MBOX_LOCKING
349 	u16 access = 0;
350 #endif
351 	u32 v;
352 	u64 res;
353 	int i, ret;
354 	const __be64 *p = cmd;
355 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
356 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
357 	u32 ctl;
358 	__be64 cmd_rpl[MBOX_LEN/8];
359 	T4_OS_MBOX_LOCKING(t4_os_list_t entry);
360 	u32 pcie_fw;
361 
362 	if ((size & 15) || size > MBOX_LEN)
363 		return -EINVAL;
364 
365 	/*
366 	 * If we have a negative timeout, that implies that we can't sleep.
367 	 */
368 	if (timeout < 0) {
369 		sleep_ok = false;
370 		timeout = -timeout;
371 	}
372 
373 #ifdef T4_OS_NEEDS_MBOX_LOCKING
374 	/*
375 	 * Queue ourselves onto the mailbox access list.  When our entry is at
376 	 * the front of the list, we have rights to access the mailbox.  So we
377 	 * wait [for a while] till we're at the front [or bail out with an
378 	 * EBUSY] ...
379 	 */
380 	t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
381 
382 	for (i = 0; ; i++) {
383 		/*
384 		 * If we've waited too long, return a busy indication.  This
385 		 * really ought to be based on our initial position in the
386 		 * mailbox access list but this is a start.  We very rarely
387 		 * contend on access to the mailbox ...  Also check for a
388 		 * firmware error which we'll report as a device error.
389 		 */
390 		pcie_fw = t4_read_reg(adap, A_PCIE_FW);
391 		if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) {
392 			t4_os_atomic_list_del(&entry, &adap->mbox_lock);
393 			t4_report_fw_error(adap);
394 			ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
395 			T4_RECORD_MBOX(adap, cmd, size, ret, 0);
396 			return ret;
397 		}
398 
399 		/*
400 		 * If we're at the head, break out and start the mailbox
401 		 * protocol.
402 		 */
403 		if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
404 			break;
405 
406 		/*
407 		 * Delay for a bit before checking again ...
408 		 */
409 		if (sleep_ok) {
410 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
411 		} else {
412 			T4_OS_TOUCH_NMI_WATCHDOG();
413 			udelay(MBOX_CMD_DELAY);
414 		}
415 	}
416 	access = i;
417 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
418 
419 	/*
420 	 * Attempt to gain access to the mailbox.
421 	 */
422 	for (i = 0; i < 4; i++) {
423 		ctl = t4_read_reg(adap, ctl_reg);
424 		v = G_MBOWNER(ctl);
425 		if (v != X_MBOWNER_NONE)
426 			break;
427 	}
428 
429 	/*
430 	 * If we were unable to gain access, dequeue ourselves from the
431 	 * mailbox atomic access list and report the error to our caller.
432 	 */
433 	if (v != X_MBOWNER_PL) {
434 		T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
435 							 &adap->mbox_lock));
436 		t4_report_fw_error(adap);
437 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
438 		T4_RECORD_MBOX(adap, cmd, size, access, ret);
439 		return ret;
440 	}
441 
442 	/*
443 	 * If we gain ownership of the mailbox and there's a "valid" message
444 	 * in it, this is likely an asynchronous error message from the
445 	 * firmware.  So we'll report that and then proceed on with attempting
446 	 * to issue our own command ... which may well fail if the error
447 	 * presaged the firmware crashing ...
448 	 */
449 	if (ctl & F_MBMSGVALID) {
450 		CH_ERR(adap, "found VALID command in mbox %u: "
451 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
452 		       (unsigned long long)t4_read_reg64(adap, data_reg),
453 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
454 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
455 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
456 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
457 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
458 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
459 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
460 	}
461 
462 	/*
463 	 * Copy in the new mailbox command and send it on its way ...
464 	 */
465 	T4_RECORD_MBOX(adap, cmd, size, access, 0);
466 	for (i = 0; i < size; i += 8, p++)
467 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
468 
469 	/*
470 	 * XXX It's not clear that we need this anymore now
471 	 * XXX that we have mailbox logging ...
472 	 */
473 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
474 
475 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
476 	(void) t4_read_reg(adap, ctl_reg);	/* flush write */
477 
478 	/*
479 	 * Loop waiting for the reply; bail out if we time out or the firmware
480 	 * reports an error.
481 	 */
482 	for (i = 0;
483 	     !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
484 	     i < timeout;
485 	     i++) {
486 		if (sleep_ok) {
487 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
488 		} else {
489 			T4_OS_TOUCH_NMI_WATCHDOG();
490 			udelay(MBOX_CMD_DELAY);
491 		}
492 
493 		v = t4_read_reg(adap, ctl_reg);
494 		if (v == X_CIM_PF_NOACCESS)
495 			continue;
496 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
497 			if (!(v & F_MBMSGVALID)) {
498 				t4_write_reg(adap, ctl_reg,
499 					     V_MBOWNER(X_MBOWNER_NONE));
500 				continue;
501 			}
502 
503 			/*
504 			 * Retrieve the command reply and release the mailbox.
505 			 */
506 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
507 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
508 			T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
509 								 &adap->mbox_lock));
510 
511 			T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1);
512 
513 			/*
514 			 * XXX It's not clear that we need this anymore now
515 			 * XXX that we have mailbox logging ...
516 			 */
517 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
518 			CH_MSG(adap, INFO, HW,
519 			       "command completed in %d ms (%ssleeping)\n",
520 			       i + 1, sleep_ok ? "" : "non-");
521 
522 			res = be64_to_cpu(cmd_rpl[0]);
523 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
524 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
525 				res = V_FW_CMD_RETVAL(EIO);
526 			} else if (rpl)
527 				memcpy(rpl, cmd_rpl, size);
528 			return -G_FW_CMD_RETVAL((int)res);
529 		}
530 	}
531 
532 	/*
533 	 * We timed out waiting for a reply to our mailbox command.  Report
534 	 * the error and also check to see if the firmware reported any
535 	 * errors ...
536 	 */
537 	T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, &adap->mbox_lock));
538 
539 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
540 	T4_RECORD_MBOX(adap, cmd, size, access, ret);
541 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
542 	       *(const u8 *)cmd, mbox);
543 
544 	t4_report_fw_error(adap);
545 	t4_fatal_err(adap);
546 	return ret;
547 }
548 
549 #ifdef CONFIG_CUDBG
550 /*
551  * The maximum number of times to iterate for FW reply before
552  * issuing a mailbox timeout
553  */
554 #define FW_REPLY_WAIT_LOOP 6000000
555 
556 /**
557  *	t4_wr_mbox_meat_timeout_panic - send a command to FW through the given
558  *	mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout()
559  *	and is only invoked during a kernel crash. Since this function is
560  *	called through a atomic notifier chain ,we cannot sleep awaiting a
561  *	response from FW, hence repeatedly loop until we get a reply.
562  *
563  *	@adap: the adapter
564  *	@mbox: index of the mailbox to use
565  *	@cmd: the command to write
566  *	@size: command length in bytes
567  *	@rpl: where to optionally store the reply
568  */
569 
570 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox,
571 			    const void *cmd, int size, void *rpl)
572 {
573 	u32 v;
574 	u64 res;
575 	int i, ret;
576 	u64 cnt;
577 	const __be64 *p = cmd;
578 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
579 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
580 	u32 ctl;
581 	__be64 cmd_rpl[MBOX_LEN/8];
582 	u32 pcie_fw;
583 
584 	if ((size & 15) || size > MBOX_LEN)
585 		return -EINVAL;
586 
587 	/*
588 	 * Check for a firmware error which we'll report as a
589 	 * device error.
590 	 */
591 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
592 	if (pcie_fw & F_PCIE_FW_ERR) {
593 		t4_report_fw_error(adap);
594 		ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
595 		return ret;
596 	}
597 
598 	/*
599 	 * Attempt to gain access to the mailbox.
600 	 */
601 	for (i = 0; i < 4; i++) {
602 		ctl = t4_read_reg(adap, ctl_reg);
603 		v = G_MBOWNER(ctl);
604 		if (v != X_MBOWNER_NONE)
605 			break;
606 	}
607 
608 	/*
609 	 * If we were unable to gain access, report the error to our caller.
610 	 */
611 	if (v != X_MBOWNER_PL) {
612 		t4_report_fw_error(adap);
613 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
614 		return ret;
615 	}
616 
617 	/*
618 	 * If we gain ownership of the mailbox and there's a "valid" message
619 	 * in it, this is likely an asynchronous error message from the
620 	 * firmware.  So we'll report that and then proceed on with attempting
621 	 * to issue our own command ... which may well fail if the error
622 	 * presaged the firmware crashing ...
623 	 */
624 	if (ctl & F_MBMSGVALID) {
625 		CH_ERR(adap, "found VALID command in mbox %u: "
626 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
627 		       (unsigned long long)t4_read_reg64(adap, data_reg),
628 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
629 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
630 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
631 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
632 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
633 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
634 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
635 	}
636 
637 	/*
638 	 * Copy in the new mailbox command and send it on its way ...
639 	 */
640 	for (i = 0; i < size; i += 8, p++)
641 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
642 
643 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
644 
645 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
646 	t4_read_reg(adap, ctl_reg);	/* flush write */
647 
648 	/*
649 	 * Loop waiting for the reply; bail out if we time out or the firmware
650 	 * reports an error.
651 	 */
652 	for (cnt = 0;
653 	    !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
654 	    cnt < FW_REPLY_WAIT_LOOP;
655 	    cnt++) {
656 		v = t4_read_reg(adap, ctl_reg);
657 		if (v == X_CIM_PF_NOACCESS)
658 			continue;
659 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
660 			if (!(v & F_MBMSGVALID)) {
661 				t4_write_reg(adap, ctl_reg,
662 					     V_MBOWNER(X_MBOWNER_NONE));
663 				continue;
664 			}
665 
666 			/*
667 			 * Retrieve the command reply and release the mailbox.
668 			 */
669 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
670 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
671 
672 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
673 
674 			res = be64_to_cpu(cmd_rpl[0]);
675 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
676 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
677 				res = V_FW_CMD_RETVAL(EIO);
678 			} else if (rpl)
679 				memcpy(rpl, cmd_rpl, size);
680 			return -G_FW_CMD_RETVAL((int)res);
681 		}
682 	}
683 
684 	/*
685 	 * We timed out waiting for a reply to our mailbox command.  Report
686 	 * the error and also check to see if the firmware reported any
687 	 * errors ...
688 	 */
689 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
690 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
691 	       *(const u8 *)cmd, mbox);
692 
693 	t4_report_fw_error(adap);
694 	t4_fatal_err(adap);
695 	return ret;
696 }
697 #endif
698 
699 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
700 		    void *rpl, bool sleep_ok)
701 {
702 #ifdef CONFIG_CUDBG
703 	if (adap->flags & K_CRASH)
704 		return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size,
705 						     rpl);
706 	else
707 #endif
708 		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
709 					       sleep_ok, FW_CMD_MAX_TIMEOUT);
710 
711 }
712 
713 static int t4_edc_err_read(struct adapter *adap, int idx)
714 {
715 	u32 edc_ecc_err_addr_reg;
716 	u32 edc_bist_status_rdata_reg;
717 
718 	if (is_t4(adap->params.chip)) {
719 		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
720 		return 0;
721 	}
722 	if (idx != MEM_EDC0 && idx != MEM_EDC1) {
723 		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
724 		return 0;
725 	}
726 
727 	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
728 	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
729 
730 	CH_WARN(adap,
731 		"edc%d err addr 0x%x: 0x%x.\n",
732 		idx, edc_ecc_err_addr_reg,
733 		t4_read_reg(adap, edc_ecc_err_addr_reg));
734 	CH_WARN(adap,
735 	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
736 		edc_bist_status_rdata_reg,
737 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
738 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
739 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
740 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
741 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
742 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
743 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
744 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
745 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
746 
747 	return 0;
748 }
749 
750 /**
751  *	t4_memory_rw_addr - read/write adapter memory via PCIE memory window
752  *	@adap: the adapter
753  *	@win: PCI-E Memory Window to use
754  *	@addr: address within adapter memory
755  *	@len: amount of memory to transfer
756  *	@hbuf: host memory buffer
757  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
758  *
759  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
760  *	firmware memory address and host buffer must be aligned on 32-bit
761  *	boudaries; the length may be arbitrary.
762  *
763  *	NOTES:
764  *	 1. The memory is transferred as a raw byte sequence from/to the
765  *	    firmware's memory.  If this memory contains data structures which
766  *	    contain multi-byte integers, it's the caller's responsibility to
767  *	    perform appropriate byte order conversions.
768  *
769  *	 2. It is the Caller's responsibility to ensure that no other code
770  *	    uses the specified PCI-E Memory Window while this routine is
771  *	    using it.  This is typically done via the use of OS-specific
772  *	    locks, etc.
773  */
774 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
775 		      u32 len, void *hbuf, int dir)
776 {
777 	u32 pos, offset, resid;
778 	u32 win_pf, mem_reg, mem_aperture, mem_base;
779 	u32 *buf;
780 
781 	/* Argument sanity checks ...
782 	 */
783 	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
784 		return -EINVAL;
785 	buf = (u32 *)hbuf;
786 
787 	/* It's convenient to be able to handle lengths which aren't a
788 	 * multiple of 32-bits because we often end up transferring files to
789 	 * the firmware.  So we'll handle that by normalizing the length here
790 	 * and then handling any residual transfer at the end.
791 	 */
792 	resid = len & 0x3;
793 	len -= resid;
794 
795 	/* Each PCI-E Memory Window is programmed with a window size -- or
796 	 * "aperture" -- which controls the granularity of its mapping onto
797 	 * adapter memory.  We need to grab that aperture in order to know
798 	 * how to use the specified window.  The window is also programmed
799 	 * with the base address of the Memory Window in BAR0's address
800 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
801 	 * the address is relative to BAR0.
802 	 */
803 	mem_reg = t4_read_reg(adap,
804 			      PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
805 						  win));
806 
807 	/* a dead adapter will return 0xffffffff for PIO reads */
808 	if (mem_reg == 0xffffffff) {
809 		CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n",
810 			win);
811 		return -ENXIO;
812 	}
813 
814 	mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
815 	mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
816 	if (is_t4(adap->params.chip))
817 		mem_base -= adap->t4_bar0;
818 	win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
819 
820 	/* Calculate our initial PCI-E Memory Window Position and Offset into
821 	 * that Window.
822 	 */
823 	pos = addr & ~(mem_aperture-1);
824 	offset = addr - pos;
825 
826 	/* Set up initial PCI-E Memory Window to cover the start of our
827 	 * transfer.  (Read it back to ensure that changes propagate before we
828 	 * attempt to use the new value.)
829 	 */
830 	t4_write_reg(adap,
831 		     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
832 		     pos | win_pf);
833 	t4_read_reg(adap,
834 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
835 
836 	/* Transfer data to/from the adapter as long as there's an integral
837 	 * number of 32-bit transfers to complete.
838 	 *
839 	 * A note on Endianness issues:
840 	 *
841 	 * The "register" reads and writes below from/to the PCI-E Memory
842 	 * Window invoke the standard adapter Big-Endian to PCI-E Link
843 	 * Little-Endian "swizzel."  As a result, if we have the following
844 	 * data in adapter memory:
845 	 *
846 	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
847 	 *     Address:      i+0  i+1  i+2  i+3
848 	 *
849 	 * Then a read of the adapter memory via the PCI-E Memory Window
850 	 * will yield:
851 	 *
852 	 *     x = readl(i)
853 	 *	   31                  0
854 	 *         [ b3 | b2 | b1 | b0 ]
855 	 *
856 	 * If this value is stored into local memory on a Little-Endian system
857 	 * it will show up correctly in local memory as:
858 	 *
859 	 *     ( ..., b0, b1, b2, b3, ... )
860 	 *
861 	 * But on a Big-Endian system, the store will show up in memory
862 	 * incorrectly swizzled as:
863 	 *
864 	 *     ( ..., b3, b2, b1, b0, ... )
865 	 *
866 	 * So we need to account for this in the reads and writes to the
867 	 * PCI-E Memory Window below by undoing the register read/write
868 	 * swizzels.
869 	 */
870 	while (len > 0) {
871 		if (dir == T4_MEMORY_READ)
872 			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
873 						mem_base + offset));
874 		else
875 			t4_write_reg(adap, mem_base + offset,
876 				     (__force u32)cpu_to_le32(*buf++));
877 		offset += sizeof(__be32);
878 		len -= sizeof(__be32);
879 
880 		/* If we've reached the end of our current window aperture,
881 		 * move the PCI-E Memory Window on to the next.  Note that
882 		 * doing this here after "len" may be 0 allows us to set up
883 		 * the PCI-E Memory Window for a possible final residual
884 		 * transfer below ...
885 		 */
886 		if (offset == mem_aperture) {
887 			pos += mem_aperture;
888 			offset = 0;
889 			t4_write_reg(adap,
890 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
891 						    win), pos | win_pf);
892 			t4_read_reg(adap,
893 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
894 						    win));
895 		}
896 	}
897 
898 	/* If the original transfer had a length which wasn't a multiple of
899 	 * 32-bits, now's where we need to finish off the transfer of the
900 	 * residual amount.  The PCI-E Memory Window has already been moved
901 	 * above (if necessary) to cover this final transfer.
902 	 */
903 	if (resid) {
904 		union {
905 			u32 word;
906 			char byte[4];
907 		} last;
908 		unsigned char *bp;
909 		int i;
910 
911 		if (dir == T4_MEMORY_READ) {
912 			last.word = le32_to_cpu(
913 					(__force __le32)t4_read_reg(adap,
914 						mem_base + offset));
915 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
916 				bp[i] = last.byte[i];
917 		} else {
918 			last.word = *buf;
919 			for (i = resid; i < 4; i++)
920 				last.byte[i] = 0;
921 			t4_write_reg(adap, mem_base + offset,
922 				     (__force u32)cpu_to_le32(last.word));
923 		}
924 	}
925 
926 	return 0;
927 }
928 
929 /**
930  *	t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window
931  *	@adap: the adapter
932  *	@win: PCI-E Memory Window to use
933  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
934  *	@maddr: address within indicated memory type
935  *	@len: amount of memory to transfer
936  *	@hbuf: host memory buffer
937  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
938  *
939  *	Reads/writes adapter memory using t4_memory_rw_addr().  This routine
940  *	provides an (memory type, address withing memory type) interface.
941  */
942 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
943 		       u32 len, void *hbuf, int dir)
944 {
945 	u32 mtype_offset;
946 	u32 edc_size, mc_size;
947 
948 	/* Offset into the region of memory which is being accessed
949 	 * MEM_EDC0 = 0
950 	 * MEM_EDC1 = 1
951 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
952 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
953 	 */
954 	edc_size  = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
955 	if (mtype != MEM_MC1)
956 		mtype_offset = (mtype * (edc_size * 1024 * 1024));
957 	else {
958 		mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
959 						      A_MA_EXT_MEMORY0_BAR));
960 		mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
961 	}
962 
963 	return t4_memory_rw_addr(adap, win,
964 				 mtype_offset + maddr, len,
965 				 hbuf, dir);
966 }
967 
968 /*
969  * Return the specified PCI-E Configuration Space register from our Physical
970  * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
971  * since we prefer to let the firmware own all of these registers, but if that
972  * fails we go for it directly ourselves.
973  */
974 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
975 {
976 	u32 val;
977 
978 	/*
979 	 * If fw_attach != 0, construct and send the Firmware LDST Command to
980 	 * retrieve the specified PCI-E Configuration Space register.
981 	 */
982 	if (drv_fw_attach != 0) {
983 		struct fw_ldst_cmd ldst_cmd;
984 		int ret;
985 
986 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
987 		ldst_cmd.op_to_addrspace =
988 			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
989 				    F_FW_CMD_REQUEST |
990 				    F_FW_CMD_READ |
991 				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
992 		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
993 		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
994 		ldst_cmd.u.pcie.ctrl_to_fn =
995 			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
996 		ldst_cmd.u.pcie.r = reg;
997 
998 		/*
999 		 * If the LDST Command succeeds, return the result, otherwise
1000 		 * fall through to reading it directly ourselves ...
1001 		 */
1002 		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1003 				 &ldst_cmd);
1004 		if (ret == 0)
1005 			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
1006 
1007 		CH_WARN(adap, "Firmware failed to return "
1008 			"Configuration Space register %d, err = %d\n",
1009 			reg, -ret);
1010 	}
1011 
1012 	/*
1013 	 * Read the desired Configuration Space register via the PCI-E
1014 	 * Backdoor mechanism.
1015 	 */
1016 	t4_hw_pci_read_cfg4(adap, reg, &val);
1017 	return val;
1018 }
1019 
1020 /*
1021  * Get the window based on base passed to it.
1022  * Window aperture is currently unhandled, but there is no use case for it
1023  * right now
1024  */
1025 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach)
1026 {
1027 	if (is_t4(adap->params.chip)) {
1028 		u32 bar0;
1029 
1030 		/*
1031 		 * Truncation intentional: we only read the bottom 32-bits of
1032 		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
1033 		 * mechanism to read BAR0 instead of using
1034 		 * pci_resource_start() because we could be operating from
1035 		 * within a Virtual Machine which is trapping our accesses to
1036 		 * our Configuration Space and we need to set up the PCI-E
1037 		 * Memory Window decoders with the actual addresses which will
1038 		 * be coming across the PCI-E link.
1039 		 */
1040 		bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach);
1041 		bar0 &= pci_mask;
1042 		adap->t4_bar0 = bar0;
1043 
1044 		return bar0 + memwin_base;
1045 	} else {
1046 		/* For T5, only relative offset inside the PCIe BAR is passed */
1047 		return memwin_base;
1048 	}
1049 }
1050 
1051 /* Get the default utility window (win0) used by everyone */
1052 int t4_get_util_window(struct adapter *adap, int drv_fw_attach)
1053 {
1054 	return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach);
1055 }
1056 
1057 /*
1058  * Set up memory window for accessing adapter memory ranges.  (Read
1059  * back MA register to ensure that changes propagate before we attempt
1060  * to use the new values.)
1061  */
1062 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
1063 {
1064 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window),
1065 		     memwin_base | V_BIR(0) |
1066 		     V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
1067 	t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window));
1068 }
1069 
1070 /**
1071  *	t4_get_regs_len - return the size of the chips register set
1072  *	@adapter: the adapter
1073  *
1074  *	Returns the size of the chip's BAR0 register space.
1075  */
1076 unsigned int t4_get_regs_len(struct adapter *adapter)
1077 {
1078 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
1079 
1080 	switch (chip_version) {
1081 	case CHELSIO_T4:
1082 		return T4_REGMAP_SIZE;
1083 
1084 	case CHELSIO_T5:
1085 	case CHELSIO_T6:
1086 		return T5_REGMAP_SIZE;
1087 	}
1088 
1089 	CH_ERR(adapter,
1090 		"Unsupported chip version %d\n", chip_version);
1091 	return 0;
1092 }
1093 
1094 /**
1095  *	t4_get_regs - read chip registers into provided buffer
1096  *	@adap: the adapter
1097  *	@buf: register buffer
1098  *	@buf_size: size (in bytes) of register buffer
1099  *
1100  *	If the provided register buffer isn't large enough for the chip's
1101  *	full register range, the register dump will be truncated to the
1102  *	register buffer's size.
1103  */
1104 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
1105 {
1106 	static const unsigned int t4_reg_ranges[] = {
1107 		0x1008, 0x1108,
1108 		0x1180, 0x1184,
1109 		0x1190, 0x1194,
1110 		0x11a0, 0x11a4,
1111 		0x11b0, 0x11b4,
1112 		0x11fc, 0x123c,
1113 		0x1300, 0x173c,
1114 		0x1800, 0x18fc,
1115 		0x3000, 0x30d8,
1116 		0x30e0, 0x30e4,
1117 		0x30ec, 0x5910,
1118 		0x5920, 0x5924,
1119 		0x5960, 0x5960,
1120 		0x5968, 0x5968,
1121 		0x5970, 0x5970,
1122 		0x5978, 0x5978,
1123 		0x5980, 0x5980,
1124 		0x5988, 0x5988,
1125 		0x5990, 0x5990,
1126 		0x5998, 0x5998,
1127 		0x59a0, 0x59d4,
1128 		0x5a00, 0x5ae0,
1129 		0x5ae8, 0x5ae8,
1130 		0x5af0, 0x5af0,
1131 		0x5af8, 0x5af8,
1132 		0x6000, 0x6098,
1133 		0x6100, 0x6150,
1134 		0x6200, 0x6208,
1135 		0x6240, 0x6248,
1136 		0x6280, 0x62b0,
1137 		0x62c0, 0x6338,
1138 		0x6370, 0x638c,
1139 		0x6400, 0x643c,
1140 		0x6500, 0x6524,
1141 		0x6a00, 0x6a04,
1142 		0x6a14, 0x6a38,
1143 		0x6a60, 0x6a70,
1144 		0x6a78, 0x6a78,
1145 		0x6b00, 0x6b0c,
1146 		0x6b1c, 0x6b84,
1147 		0x6bf0, 0x6bf8,
1148 		0x6c00, 0x6c0c,
1149 		0x6c1c, 0x6c84,
1150 		0x6cf0, 0x6cf8,
1151 		0x6d00, 0x6d0c,
1152 		0x6d1c, 0x6d84,
1153 		0x6df0, 0x6df8,
1154 		0x6e00, 0x6e0c,
1155 		0x6e1c, 0x6e84,
1156 		0x6ef0, 0x6ef8,
1157 		0x6f00, 0x6f0c,
1158 		0x6f1c, 0x6f84,
1159 		0x6ff0, 0x6ff8,
1160 		0x7000, 0x700c,
1161 		0x701c, 0x7084,
1162 		0x70f0, 0x70f8,
1163 		0x7100, 0x710c,
1164 		0x711c, 0x7184,
1165 		0x71f0, 0x71f8,
1166 		0x7200, 0x720c,
1167 		0x721c, 0x7284,
1168 		0x72f0, 0x72f8,
1169 		0x7300, 0x730c,
1170 		0x731c, 0x7384,
1171 		0x73f0, 0x73f8,
1172 		0x7400, 0x7450,
1173 		0x7500, 0x7530,
1174 		0x7600, 0x760c,
1175 		0x7614, 0x761c,
1176 		0x7680, 0x76cc,
1177 		0x7700, 0x7798,
1178 		0x77c0, 0x77fc,
1179 		0x7900, 0x79fc,
1180 		0x7b00, 0x7b58,
1181 		0x7b60, 0x7b84,
1182 		0x7b8c, 0x7c38,
1183 		0x7d00, 0x7d38,
1184 		0x7d40, 0x7d80,
1185 		0x7d8c, 0x7ddc,
1186 		0x7de4, 0x7e04,
1187 		0x7e10, 0x7e1c,
1188 		0x7e24, 0x7e38,
1189 		0x7e40, 0x7e44,
1190 		0x7e4c, 0x7e78,
1191 		0x7e80, 0x7ea4,
1192 		0x7eac, 0x7edc,
1193 		0x7ee8, 0x7efc,
1194 		0x8dc0, 0x8e04,
1195 		0x8e10, 0x8e1c,
1196 		0x8e30, 0x8e78,
1197 		0x8ea0, 0x8eb8,
1198 		0x8ec0, 0x8f6c,
1199 		0x8fc0, 0x9008,
1200 		0x9010, 0x9058,
1201 		0x9060, 0x9060,
1202 		0x9068, 0x9074,
1203 		0x90fc, 0x90fc,
1204 		0x9400, 0x9408,
1205 		0x9410, 0x9458,
1206 		0x9600, 0x9600,
1207 		0x9608, 0x9638,
1208 		0x9640, 0x96bc,
1209 		0x9800, 0x9808,
1210 		0x9820, 0x983c,
1211 		0x9850, 0x9864,
1212 		0x9c00, 0x9c6c,
1213 		0x9c80, 0x9cec,
1214 		0x9d00, 0x9d6c,
1215 		0x9d80, 0x9dec,
1216 		0x9e00, 0x9e6c,
1217 		0x9e80, 0x9eec,
1218 		0x9f00, 0x9f6c,
1219 		0x9f80, 0x9fec,
1220 		0xd004, 0xd004,
1221 		0xd010, 0xd03c,
1222 		0xdfc0, 0xdfe0,
1223 		0xe000, 0xea7c,
1224 		0xf000, 0x11110,
1225 		0x11118, 0x11190,
1226 		0x19040, 0x1906c,
1227 		0x19078, 0x19080,
1228 		0x1908c, 0x190e4,
1229 		0x190f0, 0x190f8,
1230 		0x19100, 0x19110,
1231 		0x19120, 0x19124,
1232 		0x19150, 0x19194,
1233 		0x1919c, 0x191b0,
1234 		0x191d0, 0x191e8,
1235 		0x19238, 0x1924c,
1236 		0x193f8, 0x1943c,
1237 		0x1944c, 0x19474,
1238 		0x19490, 0x194e0,
1239 		0x194f0, 0x194f8,
1240 		0x19800, 0x19c08,
1241 		0x19c10, 0x19c90,
1242 		0x19ca0, 0x19ce4,
1243 		0x19cf0, 0x19d40,
1244 		0x19d50, 0x19d94,
1245 		0x19da0, 0x19de8,
1246 		0x19df0, 0x19e40,
1247 		0x19e50, 0x19e90,
1248 		0x19ea0, 0x19f4c,
1249 		0x1a000, 0x1a004,
1250 		0x1a010, 0x1a06c,
1251 		0x1a0b0, 0x1a0e4,
1252 		0x1a0ec, 0x1a0f4,
1253 		0x1a100, 0x1a108,
1254 		0x1a114, 0x1a120,
1255 		0x1a128, 0x1a130,
1256 		0x1a138, 0x1a138,
1257 		0x1a190, 0x1a1c4,
1258 		0x1a1fc, 0x1a1fc,
1259 		0x1e040, 0x1e04c,
1260 		0x1e284, 0x1e28c,
1261 		0x1e2c0, 0x1e2c0,
1262 		0x1e2e0, 0x1e2e0,
1263 		0x1e300, 0x1e384,
1264 		0x1e3c0, 0x1e3c8,
1265 		0x1e440, 0x1e44c,
1266 		0x1e684, 0x1e68c,
1267 		0x1e6c0, 0x1e6c0,
1268 		0x1e6e0, 0x1e6e0,
1269 		0x1e700, 0x1e784,
1270 		0x1e7c0, 0x1e7c8,
1271 		0x1e840, 0x1e84c,
1272 		0x1ea84, 0x1ea8c,
1273 		0x1eac0, 0x1eac0,
1274 		0x1eae0, 0x1eae0,
1275 		0x1eb00, 0x1eb84,
1276 		0x1ebc0, 0x1ebc8,
1277 		0x1ec40, 0x1ec4c,
1278 		0x1ee84, 0x1ee8c,
1279 		0x1eec0, 0x1eec0,
1280 		0x1eee0, 0x1eee0,
1281 		0x1ef00, 0x1ef84,
1282 		0x1efc0, 0x1efc8,
1283 		0x1f040, 0x1f04c,
1284 		0x1f284, 0x1f28c,
1285 		0x1f2c0, 0x1f2c0,
1286 		0x1f2e0, 0x1f2e0,
1287 		0x1f300, 0x1f384,
1288 		0x1f3c0, 0x1f3c8,
1289 		0x1f440, 0x1f44c,
1290 		0x1f684, 0x1f68c,
1291 		0x1f6c0, 0x1f6c0,
1292 		0x1f6e0, 0x1f6e0,
1293 		0x1f700, 0x1f784,
1294 		0x1f7c0, 0x1f7c8,
1295 		0x1f840, 0x1f84c,
1296 		0x1fa84, 0x1fa8c,
1297 		0x1fac0, 0x1fac0,
1298 		0x1fae0, 0x1fae0,
1299 		0x1fb00, 0x1fb84,
1300 		0x1fbc0, 0x1fbc8,
1301 		0x1fc40, 0x1fc4c,
1302 		0x1fe84, 0x1fe8c,
1303 		0x1fec0, 0x1fec0,
1304 		0x1fee0, 0x1fee0,
1305 		0x1ff00, 0x1ff84,
1306 		0x1ffc0, 0x1ffc8,
1307 		0x20000, 0x2002c,
1308 		0x20100, 0x2013c,
1309 		0x20190, 0x201a0,
1310 		0x201a8, 0x201b8,
1311 		0x201c4, 0x201c8,
1312 		0x20200, 0x20318,
1313 		0x20400, 0x204b4,
1314 		0x204c0, 0x20528,
1315 		0x20540, 0x20614,
1316 		0x21000, 0x21040,
1317 		0x2104c, 0x21060,
1318 		0x210c0, 0x210ec,
1319 		0x21200, 0x21268,
1320 		0x21270, 0x21284,
1321 		0x212fc, 0x21388,
1322 		0x21400, 0x21404,
1323 		0x21500, 0x21500,
1324 		0x21510, 0x21518,
1325 		0x2152c, 0x21530,
1326 		0x2153c, 0x2153c,
1327 		0x21550, 0x21554,
1328 		0x21600, 0x21600,
1329 		0x21608, 0x2161c,
1330 		0x21624, 0x21628,
1331 		0x21630, 0x21634,
1332 		0x2163c, 0x2163c,
1333 		0x21700, 0x2171c,
1334 		0x21780, 0x2178c,
1335 		0x21800, 0x21818,
1336 		0x21820, 0x21828,
1337 		0x21830, 0x21848,
1338 		0x21850, 0x21854,
1339 		0x21860, 0x21868,
1340 		0x21870, 0x21870,
1341 		0x21878, 0x21898,
1342 		0x218a0, 0x218a8,
1343 		0x218b0, 0x218c8,
1344 		0x218d0, 0x218d4,
1345 		0x218e0, 0x218e8,
1346 		0x218f0, 0x218f0,
1347 		0x218f8, 0x21a18,
1348 		0x21a20, 0x21a28,
1349 		0x21a30, 0x21a48,
1350 		0x21a50, 0x21a54,
1351 		0x21a60, 0x21a68,
1352 		0x21a70, 0x21a70,
1353 		0x21a78, 0x21a98,
1354 		0x21aa0, 0x21aa8,
1355 		0x21ab0, 0x21ac8,
1356 		0x21ad0, 0x21ad4,
1357 		0x21ae0, 0x21ae8,
1358 		0x21af0, 0x21af0,
1359 		0x21af8, 0x21c18,
1360 		0x21c20, 0x21c20,
1361 		0x21c28, 0x21c30,
1362 		0x21c38, 0x21c38,
1363 		0x21c80, 0x21c98,
1364 		0x21ca0, 0x21ca8,
1365 		0x21cb0, 0x21cc8,
1366 		0x21cd0, 0x21cd4,
1367 		0x21ce0, 0x21ce8,
1368 		0x21cf0, 0x21cf0,
1369 		0x21cf8, 0x21d7c,
1370 		0x21e00, 0x21e04,
1371 		0x22000, 0x2202c,
1372 		0x22100, 0x2213c,
1373 		0x22190, 0x221a0,
1374 		0x221a8, 0x221b8,
1375 		0x221c4, 0x221c8,
1376 		0x22200, 0x22318,
1377 		0x22400, 0x224b4,
1378 		0x224c0, 0x22528,
1379 		0x22540, 0x22614,
1380 		0x23000, 0x23040,
1381 		0x2304c, 0x23060,
1382 		0x230c0, 0x230ec,
1383 		0x23200, 0x23268,
1384 		0x23270, 0x23284,
1385 		0x232fc, 0x23388,
1386 		0x23400, 0x23404,
1387 		0x23500, 0x23500,
1388 		0x23510, 0x23518,
1389 		0x2352c, 0x23530,
1390 		0x2353c, 0x2353c,
1391 		0x23550, 0x23554,
1392 		0x23600, 0x23600,
1393 		0x23608, 0x2361c,
1394 		0x23624, 0x23628,
1395 		0x23630, 0x23634,
1396 		0x2363c, 0x2363c,
1397 		0x23700, 0x2371c,
1398 		0x23780, 0x2378c,
1399 		0x23800, 0x23818,
1400 		0x23820, 0x23828,
1401 		0x23830, 0x23848,
1402 		0x23850, 0x23854,
1403 		0x23860, 0x23868,
1404 		0x23870, 0x23870,
1405 		0x23878, 0x23898,
1406 		0x238a0, 0x238a8,
1407 		0x238b0, 0x238c8,
1408 		0x238d0, 0x238d4,
1409 		0x238e0, 0x238e8,
1410 		0x238f0, 0x238f0,
1411 		0x238f8, 0x23a18,
1412 		0x23a20, 0x23a28,
1413 		0x23a30, 0x23a48,
1414 		0x23a50, 0x23a54,
1415 		0x23a60, 0x23a68,
1416 		0x23a70, 0x23a70,
1417 		0x23a78, 0x23a98,
1418 		0x23aa0, 0x23aa8,
1419 		0x23ab0, 0x23ac8,
1420 		0x23ad0, 0x23ad4,
1421 		0x23ae0, 0x23ae8,
1422 		0x23af0, 0x23af0,
1423 		0x23af8, 0x23c18,
1424 		0x23c20, 0x23c20,
1425 		0x23c28, 0x23c30,
1426 		0x23c38, 0x23c38,
1427 		0x23c80, 0x23c98,
1428 		0x23ca0, 0x23ca8,
1429 		0x23cb0, 0x23cc8,
1430 		0x23cd0, 0x23cd4,
1431 		0x23ce0, 0x23ce8,
1432 		0x23cf0, 0x23cf0,
1433 		0x23cf8, 0x23d7c,
1434 		0x23e00, 0x23e04,
1435 		0x24000, 0x2402c,
1436 		0x24100, 0x2413c,
1437 		0x24190, 0x241a0,
1438 		0x241a8, 0x241b8,
1439 		0x241c4, 0x241c8,
1440 		0x24200, 0x24318,
1441 		0x24400, 0x244b4,
1442 		0x244c0, 0x24528,
1443 		0x24540, 0x24614,
1444 		0x25000, 0x25040,
1445 		0x2504c, 0x25060,
1446 		0x250c0, 0x250ec,
1447 		0x25200, 0x25268,
1448 		0x25270, 0x25284,
1449 		0x252fc, 0x25388,
1450 		0x25400, 0x25404,
1451 		0x25500, 0x25500,
1452 		0x25510, 0x25518,
1453 		0x2552c, 0x25530,
1454 		0x2553c, 0x2553c,
1455 		0x25550, 0x25554,
1456 		0x25600, 0x25600,
1457 		0x25608, 0x2561c,
1458 		0x25624, 0x25628,
1459 		0x25630, 0x25634,
1460 		0x2563c, 0x2563c,
1461 		0x25700, 0x2571c,
1462 		0x25780, 0x2578c,
1463 		0x25800, 0x25818,
1464 		0x25820, 0x25828,
1465 		0x25830, 0x25848,
1466 		0x25850, 0x25854,
1467 		0x25860, 0x25868,
1468 		0x25870, 0x25870,
1469 		0x25878, 0x25898,
1470 		0x258a0, 0x258a8,
1471 		0x258b0, 0x258c8,
1472 		0x258d0, 0x258d4,
1473 		0x258e0, 0x258e8,
1474 		0x258f0, 0x258f0,
1475 		0x258f8, 0x25a18,
1476 		0x25a20, 0x25a28,
1477 		0x25a30, 0x25a48,
1478 		0x25a50, 0x25a54,
1479 		0x25a60, 0x25a68,
1480 		0x25a70, 0x25a70,
1481 		0x25a78, 0x25a98,
1482 		0x25aa0, 0x25aa8,
1483 		0x25ab0, 0x25ac8,
1484 		0x25ad0, 0x25ad4,
1485 		0x25ae0, 0x25ae8,
1486 		0x25af0, 0x25af0,
1487 		0x25af8, 0x25c18,
1488 		0x25c20, 0x25c20,
1489 		0x25c28, 0x25c30,
1490 		0x25c38, 0x25c38,
1491 		0x25c80, 0x25c98,
1492 		0x25ca0, 0x25ca8,
1493 		0x25cb0, 0x25cc8,
1494 		0x25cd0, 0x25cd4,
1495 		0x25ce0, 0x25ce8,
1496 		0x25cf0, 0x25cf0,
1497 		0x25cf8, 0x25d7c,
1498 		0x25e00, 0x25e04,
1499 		0x26000, 0x2602c,
1500 		0x26100, 0x2613c,
1501 		0x26190, 0x261a0,
1502 		0x261a8, 0x261b8,
1503 		0x261c4, 0x261c8,
1504 		0x26200, 0x26318,
1505 		0x26400, 0x264b4,
1506 		0x264c0, 0x26528,
1507 		0x26540, 0x26614,
1508 		0x27000, 0x27040,
1509 		0x2704c, 0x27060,
1510 		0x270c0, 0x270ec,
1511 		0x27200, 0x27268,
1512 		0x27270, 0x27284,
1513 		0x272fc, 0x27388,
1514 		0x27400, 0x27404,
1515 		0x27500, 0x27500,
1516 		0x27510, 0x27518,
1517 		0x2752c, 0x27530,
1518 		0x2753c, 0x2753c,
1519 		0x27550, 0x27554,
1520 		0x27600, 0x27600,
1521 		0x27608, 0x2761c,
1522 		0x27624, 0x27628,
1523 		0x27630, 0x27634,
1524 		0x2763c, 0x2763c,
1525 		0x27700, 0x2771c,
1526 		0x27780, 0x2778c,
1527 		0x27800, 0x27818,
1528 		0x27820, 0x27828,
1529 		0x27830, 0x27848,
1530 		0x27850, 0x27854,
1531 		0x27860, 0x27868,
1532 		0x27870, 0x27870,
1533 		0x27878, 0x27898,
1534 		0x278a0, 0x278a8,
1535 		0x278b0, 0x278c8,
1536 		0x278d0, 0x278d4,
1537 		0x278e0, 0x278e8,
1538 		0x278f0, 0x278f0,
1539 		0x278f8, 0x27a18,
1540 		0x27a20, 0x27a28,
1541 		0x27a30, 0x27a48,
1542 		0x27a50, 0x27a54,
1543 		0x27a60, 0x27a68,
1544 		0x27a70, 0x27a70,
1545 		0x27a78, 0x27a98,
1546 		0x27aa0, 0x27aa8,
1547 		0x27ab0, 0x27ac8,
1548 		0x27ad0, 0x27ad4,
1549 		0x27ae0, 0x27ae8,
1550 		0x27af0, 0x27af0,
1551 		0x27af8, 0x27c18,
1552 		0x27c20, 0x27c20,
1553 		0x27c28, 0x27c30,
1554 		0x27c38, 0x27c38,
1555 		0x27c80, 0x27c98,
1556 		0x27ca0, 0x27ca8,
1557 		0x27cb0, 0x27cc8,
1558 		0x27cd0, 0x27cd4,
1559 		0x27ce0, 0x27ce8,
1560 		0x27cf0, 0x27cf0,
1561 		0x27cf8, 0x27d7c,
1562 		0x27e00, 0x27e04,
1563 	};
1564 
1565 	static const unsigned int t5_reg_ranges[] = {
1566 		0x1008, 0x10c0,
1567 		0x10cc, 0x10f8,
1568 		0x1100, 0x1100,
1569 		0x110c, 0x1148,
1570 		0x1180, 0x1184,
1571 		0x1190, 0x1194,
1572 		0x11a0, 0x11a4,
1573 		0x11b0, 0x11b4,
1574 		0x11fc, 0x123c,
1575 		0x1280, 0x173c,
1576 		0x1800, 0x18fc,
1577 		0x3000, 0x3028,
1578 		0x3060, 0x30b0,
1579 		0x30b8, 0x30d8,
1580 		0x30e0, 0x30fc,
1581 		0x3140, 0x357c,
1582 		0x35a8, 0x35cc,
1583 		0x35ec, 0x35ec,
1584 		0x3600, 0x5624,
1585 		0x56cc, 0x56ec,
1586 		0x56f4, 0x5720,
1587 		0x5728, 0x575c,
1588 		0x580c, 0x5814,
1589 		0x5890, 0x589c,
1590 		0x58a4, 0x58ac,
1591 		0x58b8, 0x58bc,
1592 		0x5940, 0x59c8,
1593 		0x59d0, 0x59dc,
1594 		0x59fc, 0x5a18,
1595 		0x5a60, 0x5a70,
1596 		0x5a80, 0x5a9c,
1597 		0x5b94, 0x5bfc,
1598 		0x6000, 0x6020,
1599 		0x6028, 0x6040,
1600 		0x6058, 0x609c,
1601 		0x60a8, 0x614c,
1602 		0x7700, 0x7798,
1603 		0x77c0, 0x78fc,
1604 		0x7b00, 0x7b58,
1605 		0x7b60, 0x7b84,
1606 		0x7b8c, 0x7c54,
1607 		0x7d00, 0x7d38,
1608 		0x7d40, 0x7d80,
1609 		0x7d8c, 0x7ddc,
1610 		0x7de4, 0x7e04,
1611 		0x7e10, 0x7e1c,
1612 		0x7e24, 0x7e38,
1613 		0x7e40, 0x7e44,
1614 		0x7e4c, 0x7e78,
1615 		0x7e80, 0x7edc,
1616 		0x7ee8, 0x7efc,
1617 		0x8dc0, 0x8de0,
1618 		0x8df8, 0x8e04,
1619 		0x8e10, 0x8e84,
1620 		0x8ea0, 0x8f84,
1621 		0x8fc0, 0x9058,
1622 		0x9060, 0x9060,
1623 		0x9068, 0x90f8,
1624 		0x9400, 0x9408,
1625 		0x9410, 0x9470,
1626 		0x9600, 0x9600,
1627 		0x9608, 0x9638,
1628 		0x9640, 0x96f4,
1629 		0x9800, 0x9808,
1630 		0x9820, 0x983c,
1631 		0x9850, 0x9864,
1632 		0x9c00, 0x9c6c,
1633 		0x9c80, 0x9cec,
1634 		0x9d00, 0x9d6c,
1635 		0x9d80, 0x9dec,
1636 		0x9e00, 0x9e6c,
1637 		0x9e80, 0x9eec,
1638 		0x9f00, 0x9f6c,
1639 		0x9f80, 0xa020,
1640 		0xd004, 0xd004,
1641 		0xd010, 0xd03c,
1642 		0xdfc0, 0xdfe0,
1643 		0xe000, 0x1106c,
1644 		0x11074, 0x11088,
1645 		0x1109c, 0x1117c,
1646 		0x11190, 0x11204,
1647 		0x19040, 0x1906c,
1648 		0x19078, 0x19080,
1649 		0x1908c, 0x190e8,
1650 		0x190f0, 0x190f8,
1651 		0x19100, 0x19110,
1652 		0x19120, 0x19124,
1653 		0x19150, 0x19194,
1654 		0x1919c, 0x191b0,
1655 		0x191d0, 0x191e8,
1656 		0x19238, 0x19290,
1657 		0x193f8, 0x19428,
1658 		0x19430, 0x19444,
1659 		0x1944c, 0x1946c,
1660 		0x19474, 0x19474,
1661 		0x19490, 0x194cc,
1662 		0x194f0, 0x194f8,
1663 		0x19c00, 0x19c08,
1664 		0x19c10, 0x19c60,
1665 		0x19c94, 0x19ce4,
1666 		0x19cf0, 0x19d40,
1667 		0x19d50, 0x19d94,
1668 		0x19da0, 0x19de8,
1669 		0x19df0, 0x19e10,
1670 		0x19e50, 0x19e90,
1671 		0x19ea0, 0x19f24,
1672 		0x19f34, 0x19f34,
1673 		0x19f40, 0x19f50,
1674 		0x19f90, 0x19fb4,
1675 		0x19fc4, 0x19fe4,
1676 		0x1a000, 0x1a004,
1677 		0x1a010, 0x1a06c,
1678 		0x1a0b0, 0x1a0e4,
1679 		0x1a0ec, 0x1a0f8,
1680 		0x1a100, 0x1a108,
1681 		0x1a114, 0x1a120,
1682 		0x1a128, 0x1a130,
1683 		0x1a138, 0x1a138,
1684 		0x1a190, 0x1a1c4,
1685 		0x1a1fc, 0x1a1fc,
1686 		0x1e008, 0x1e00c,
1687 		0x1e040, 0x1e044,
1688 		0x1e04c, 0x1e04c,
1689 		0x1e284, 0x1e290,
1690 		0x1e2c0, 0x1e2c0,
1691 		0x1e2e0, 0x1e2e0,
1692 		0x1e300, 0x1e384,
1693 		0x1e3c0, 0x1e3c8,
1694 		0x1e408, 0x1e40c,
1695 		0x1e440, 0x1e444,
1696 		0x1e44c, 0x1e44c,
1697 		0x1e684, 0x1e690,
1698 		0x1e6c0, 0x1e6c0,
1699 		0x1e6e0, 0x1e6e0,
1700 		0x1e700, 0x1e784,
1701 		0x1e7c0, 0x1e7c8,
1702 		0x1e808, 0x1e80c,
1703 		0x1e840, 0x1e844,
1704 		0x1e84c, 0x1e84c,
1705 		0x1ea84, 0x1ea90,
1706 		0x1eac0, 0x1eac0,
1707 		0x1eae0, 0x1eae0,
1708 		0x1eb00, 0x1eb84,
1709 		0x1ebc0, 0x1ebc8,
1710 		0x1ec08, 0x1ec0c,
1711 		0x1ec40, 0x1ec44,
1712 		0x1ec4c, 0x1ec4c,
1713 		0x1ee84, 0x1ee90,
1714 		0x1eec0, 0x1eec0,
1715 		0x1eee0, 0x1eee0,
1716 		0x1ef00, 0x1ef84,
1717 		0x1efc0, 0x1efc8,
1718 		0x1f008, 0x1f00c,
1719 		0x1f040, 0x1f044,
1720 		0x1f04c, 0x1f04c,
1721 		0x1f284, 0x1f290,
1722 		0x1f2c0, 0x1f2c0,
1723 		0x1f2e0, 0x1f2e0,
1724 		0x1f300, 0x1f384,
1725 		0x1f3c0, 0x1f3c8,
1726 		0x1f408, 0x1f40c,
1727 		0x1f440, 0x1f444,
1728 		0x1f44c, 0x1f44c,
1729 		0x1f684, 0x1f690,
1730 		0x1f6c0, 0x1f6c0,
1731 		0x1f6e0, 0x1f6e0,
1732 		0x1f700, 0x1f784,
1733 		0x1f7c0, 0x1f7c8,
1734 		0x1f808, 0x1f80c,
1735 		0x1f840, 0x1f844,
1736 		0x1f84c, 0x1f84c,
1737 		0x1fa84, 0x1fa90,
1738 		0x1fac0, 0x1fac0,
1739 		0x1fae0, 0x1fae0,
1740 		0x1fb00, 0x1fb84,
1741 		0x1fbc0, 0x1fbc8,
1742 		0x1fc08, 0x1fc0c,
1743 		0x1fc40, 0x1fc44,
1744 		0x1fc4c, 0x1fc4c,
1745 		0x1fe84, 0x1fe90,
1746 		0x1fec0, 0x1fec0,
1747 		0x1fee0, 0x1fee0,
1748 		0x1ff00, 0x1ff84,
1749 		0x1ffc0, 0x1ffc8,
1750 		0x30000, 0x30030,
1751 		0x30100, 0x30144,
1752 		0x30190, 0x301a0,
1753 		0x301a8, 0x301b8,
1754 		0x301c4, 0x301c8,
1755 		0x301d0, 0x301d0,
1756 		0x30200, 0x30318,
1757 		0x30400, 0x304b4,
1758 		0x304c0, 0x3052c,
1759 		0x30540, 0x3061c,
1760 		0x30800, 0x30828,
1761 		0x30834, 0x30834,
1762 		0x308c0, 0x30908,
1763 		0x30910, 0x309ac,
1764 		0x30a00, 0x30a14,
1765 		0x30a1c, 0x30a2c,
1766 		0x30a44, 0x30a50,
1767 		0x30a74, 0x30a74,
1768 		0x30a7c, 0x30afc,
1769 		0x30b08, 0x30c24,
1770 		0x30d00, 0x30d00,
1771 		0x30d08, 0x30d14,
1772 		0x30d1c, 0x30d20,
1773 		0x30d3c, 0x30d3c,
1774 		0x30d48, 0x30d50,
1775 		0x31200, 0x3120c,
1776 		0x31220, 0x31220,
1777 		0x31240, 0x31240,
1778 		0x31600, 0x3160c,
1779 		0x31a00, 0x31a1c,
1780 		0x31e00, 0x31e20,
1781 		0x31e38, 0x31e3c,
1782 		0x31e80, 0x31e80,
1783 		0x31e88, 0x31ea8,
1784 		0x31eb0, 0x31eb4,
1785 		0x31ec8, 0x31ed4,
1786 		0x31fb8, 0x32004,
1787 		0x32200, 0x32200,
1788 		0x32208, 0x32240,
1789 		0x32248, 0x32280,
1790 		0x32288, 0x322c0,
1791 		0x322c8, 0x322fc,
1792 		0x32600, 0x32630,
1793 		0x32a00, 0x32abc,
1794 		0x32b00, 0x32b10,
1795 		0x32b20, 0x32b30,
1796 		0x32b40, 0x32b50,
1797 		0x32b60, 0x32b70,
1798 		0x33000, 0x33028,
1799 		0x33030, 0x33048,
1800 		0x33060, 0x33068,
1801 		0x33070, 0x3309c,
1802 		0x330f0, 0x33128,
1803 		0x33130, 0x33148,
1804 		0x33160, 0x33168,
1805 		0x33170, 0x3319c,
1806 		0x331f0, 0x33238,
1807 		0x33240, 0x33240,
1808 		0x33248, 0x33250,
1809 		0x3325c, 0x33264,
1810 		0x33270, 0x332b8,
1811 		0x332c0, 0x332e4,
1812 		0x332f8, 0x33338,
1813 		0x33340, 0x33340,
1814 		0x33348, 0x33350,
1815 		0x3335c, 0x33364,
1816 		0x33370, 0x333b8,
1817 		0x333c0, 0x333e4,
1818 		0x333f8, 0x33428,
1819 		0x33430, 0x33448,
1820 		0x33460, 0x33468,
1821 		0x33470, 0x3349c,
1822 		0x334f0, 0x33528,
1823 		0x33530, 0x33548,
1824 		0x33560, 0x33568,
1825 		0x33570, 0x3359c,
1826 		0x335f0, 0x33638,
1827 		0x33640, 0x33640,
1828 		0x33648, 0x33650,
1829 		0x3365c, 0x33664,
1830 		0x33670, 0x336b8,
1831 		0x336c0, 0x336e4,
1832 		0x336f8, 0x33738,
1833 		0x33740, 0x33740,
1834 		0x33748, 0x33750,
1835 		0x3375c, 0x33764,
1836 		0x33770, 0x337b8,
1837 		0x337c0, 0x337e4,
1838 		0x337f8, 0x337fc,
1839 		0x33814, 0x33814,
1840 		0x3382c, 0x3382c,
1841 		0x33880, 0x3388c,
1842 		0x338e8, 0x338ec,
1843 		0x33900, 0x33928,
1844 		0x33930, 0x33948,
1845 		0x33960, 0x33968,
1846 		0x33970, 0x3399c,
1847 		0x339f0, 0x33a38,
1848 		0x33a40, 0x33a40,
1849 		0x33a48, 0x33a50,
1850 		0x33a5c, 0x33a64,
1851 		0x33a70, 0x33ab8,
1852 		0x33ac0, 0x33ae4,
1853 		0x33af8, 0x33b10,
1854 		0x33b28, 0x33b28,
1855 		0x33b3c, 0x33b50,
1856 		0x33bf0, 0x33c10,
1857 		0x33c28, 0x33c28,
1858 		0x33c3c, 0x33c50,
1859 		0x33cf0, 0x33cfc,
1860 		0x34000, 0x34030,
1861 		0x34100, 0x34144,
1862 		0x34190, 0x341a0,
1863 		0x341a8, 0x341b8,
1864 		0x341c4, 0x341c8,
1865 		0x341d0, 0x341d0,
1866 		0x34200, 0x34318,
1867 		0x34400, 0x344b4,
1868 		0x344c0, 0x3452c,
1869 		0x34540, 0x3461c,
1870 		0x34800, 0x34828,
1871 		0x34834, 0x34834,
1872 		0x348c0, 0x34908,
1873 		0x34910, 0x349ac,
1874 		0x34a00, 0x34a14,
1875 		0x34a1c, 0x34a2c,
1876 		0x34a44, 0x34a50,
1877 		0x34a74, 0x34a74,
1878 		0x34a7c, 0x34afc,
1879 		0x34b08, 0x34c24,
1880 		0x34d00, 0x34d00,
1881 		0x34d08, 0x34d14,
1882 		0x34d1c, 0x34d20,
1883 		0x34d3c, 0x34d3c,
1884 		0x34d48, 0x34d50,
1885 		0x35200, 0x3520c,
1886 		0x35220, 0x35220,
1887 		0x35240, 0x35240,
1888 		0x35600, 0x3560c,
1889 		0x35a00, 0x35a1c,
1890 		0x35e00, 0x35e20,
1891 		0x35e38, 0x35e3c,
1892 		0x35e80, 0x35e80,
1893 		0x35e88, 0x35ea8,
1894 		0x35eb0, 0x35eb4,
1895 		0x35ec8, 0x35ed4,
1896 		0x35fb8, 0x36004,
1897 		0x36200, 0x36200,
1898 		0x36208, 0x36240,
1899 		0x36248, 0x36280,
1900 		0x36288, 0x362c0,
1901 		0x362c8, 0x362fc,
1902 		0x36600, 0x36630,
1903 		0x36a00, 0x36abc,
1904 		0x36b00, 0x36b10,
1905 		0x36b20, 0x36b30,
1906 		0x36b40, 0x36b50,
1907 		0x36b60, 0x36b70,
1908 		0x37000, 0x37028,
1909 		0x37030, 0x37048,
1910 		0x37060, 0x37068,
1911 		0x37070, 0x3709c,
1912 		0x370f0, 0x37128,
1913 		0x37130, 0x37148,
1914 		0x37160, 0x37168,
1915 		0x37170, 0x3719c,
1916 		0x371f0, 0x37238,
1917 		0x37240, 0x37240,
1918 		0x37248, 0x37250,
1919 		0x3725c, 0x37264,
1920 		0x37270, 0x372b8,
1921 		0x372c0, 0x372e4,
1922 		0x372f8, 0x37338,
1923 		0x37340, 0x37340,
1924 		0x37348, 0x37350,
1925 		0x3735c, 0x37364,
1926 		0x37370, 0x373b8,
1927 		0x373c0, 0x373e4,
1928 		0x373f8, 0x37428,
1929 		0x37430, 0x37448,
1930 		0x37460, 0x37468,
1931 		0x37470, 0x3749c,
1932 		0x374f0, 0x37528,
1933 		0x37530, 0x37548,
1934 		0x37560, 0x37568,
1935 		0x37570, 0x3759c,
1936 		0x375f0, 0x37638,
1937 		0x37640, 0x37640,
1938 		0x37648, 0x37650,
1939 		0x3765c, 0x37664,
1940 		0x37670, 0x376b8,
1941 		0x376c0, 0x376e4,
1942 		0x376f8, 0x37738,
1943 		0x37740, 0x37740,
1944 		0x37748, 0x37750,
1945 		0x3775c, 0x37764,
1946 		0x37770, 0x377b8,
1947 		0x377c0, 0x377e4,
1948 		0x377f8, 0x377fc,
1949 		0x37814, 0x37814,
1950 		0x3782c, 0x3782c,
1951 		0x37880, 0x3788c,
1952 		0x378e8, 0x378ec,
1953 		0x37900, 0x37928,
1954 		0x37930, 0x37948,
1955 		0x37960, 0x37968,
1956 		0x37970, 0x3799c,
1957 		0x379f0, 0x37a38,
1958 		0x37a40, 0x37a40,
1959 		0x37a48, 0x37a50,
1960 		0x37a5c, 0x37a64,
1961 		0x37a70, 0x37ab8,
1962 		0x37ac0, 0x37ae4,
1963 		0x37af8, 0x37b10,
1964 		0x37b28, 0x37b28,
1965 		0x37b3c, 0x37b50,
1966 		0x37bf0, 0x37c10,
1967 		0x37c28, 0x37c28,
1968 		0x37c3c, 0x37c50,
1969 		0x37cf0, 0x37cfc,
1970 		0x38000, 0x38030,
1971 		0x38100, 0x38144,
1972 		0x38190, 0x381a0,
1973 		0x381a8, 0x381b8,
1974 		0x381c4, 0x381c8,
1975 		0x381d0, 0x381d0,
1976 		0x38200, 0x38318,
1977 		0x38400, 0x384b4,
1978 		0x384c0, 0x3852c,
1979 		0x38540, 0x3861c,
1980 		0x38800, 0x38828,
1981 		0x38834, 0x38834,
1982 		0x388c0, 0x38908,
1983 		0x38910, 0x389ac,
1984 		0x38a00, 0x38a14,
1985 		0x38a1c, 0x38a2c,
1986 		0x38a44, 0x38a50,
1987 		0x38a74, 0x38a74,
1988 		0x38a7c, 0x38afc,
1989 		0x38b08, 0x38c24,
1990 		0x38d00, 0x38d00,
1991 		0x38d08, 0x38d14,
1992 		0x38d1c, 0x38d20,
1993 		0x38d3c, 0x38d3c,
1994 		0x38d48, 0x38d50,
1995 		0x39200, 0x3920c,
1996 		0x39220, 0x39220,
1997 		0x39240, 0x39240,
1998 		0x39600, 0x3960c,
1999 		0x39a00, 0x39a1c,
2000 		0x39e00, 0x39e20,
2001 		0x39e38, 0x39e3c,
2002 		0x39e80, 0x39e80,
2003 		0x39e88, 0x39ea8,
2004 		0x39eb0, 0x39eb4,
2005 		0x39ec8, 0x39ed4,
2006 		0x39fb8, 0x3a004,
2007 		0x3a200, 0x3a200,
2008 		0x3a208, 0x3a240,
2009 		0x3a248, 0x3a280,
2010 		0x3a288, 0x3a2c0,
2011 		0x3a2c8, 0x3a2fc,
2012 		0x3a600, 0x3a630,
2013 		0x3aa00, 0x3aabc,
2014 		0x3ab00, 0x3ab10,
2015 		0x3ab20, 0x3ab30,
2016 		0x3ab40, 0x3ab50,
2017 		0x3ab60, 0x3ab70,
2018 		0x3b000, 0x3b028,
2019 		0x3b030, 0x3b048,
2020 		0x3b060, 0x3b068,
2021 		0x3b070, 0x3b09c,
2022 		0x3b0f0, 0x3b128,
2023 		0x3b130, 0x3b148,
2024 		0x3b160, 0x3b168,
2025 		0x3b170, 0x3b19c,
2026 		0x3b1f0, 0x3b238,
2027 		0x3b240, 0x3b240,
2028 		0x3b248, 0x3b250,
2029 		0x3b25c, 0x3b264,
2030 		0x3b270, 0x3b2b8,
2031 		0x3b2c0, 0x3b2e4,
2032 		0x3b2f8, 0x3b338,
2033 		0x3b340, 0x3b340,
2034 		0x3b348, 0x3b350,
2035 		0x3b35c, 0x3b364,
2036 		0x3b370, 0x3b3b8,
2037 		0x3b3c0, 0x3b3e4,
2038 		0x3b3f8, 0x3b428,
2039 		0x3b430, 0x3b448,
2040 		0x3b460, 0x3b468,
2041 		0x3b470, 0x3b49c,
2042 		0x3b4f0, 0x3b528,
2043 		0x3b530, 0x3b548,
2044 		0x3b560, 0x3b568,
2045 		0x3b570, 0x3b59c,
2046 		0x3b5f0, 0x3b638,
2047 		0x3b640, 0x3b640,
2048 		0x3b648, 0x3b650,
2049 		0x3b65c, 0x3b664,
2050 		0x3b670, 0x3b6b8,
2051 		0x3b6c0, 0x3b6e4,
2052 		0x3b6f8, 0x3b738,
2053 		0x3b740, 0x3b740,
2054 		0x3b748, 0x3b750,
2055 		0x3b75c, 0x3b764,
2056 		0x3b770, 0x3b7b8,
2057 		0x3b7c0, 0x3b7e4,
2058 		0x3b7f8, 0x3b7fc,
2059 		0x3b814, 0x3b814,
2060 		0x3b82c, 0x3b82c,
2061 		0x3b880, 0x3b88c,
2062 		0x3b8e8, 0x3b8ec,
2063 		0x3b900, 0x3b928,
2064 		0x3b930, 0x3b948,
2065 		0x3b960, 0x3b968,
2066 		0x3b970, 0x3b99c,
2067 		0x3b9f0, 0x3ba38,
2068 		0x3ba40, 0x3ba40,
2069 		0x3ba48, 0x3ba50,
2070 		0x3ba5c, 0x3ba64,
2071 		0x3ba70, 0x3bab8,
2072 		0x3bac0, 0x3bae4,
2073 		0x3baf8, 0x3bb10,
2074 		0x3bb28, 0x3bb28,
2075 		0x3bb3c, 0x3bb50,
2076 		0x3bbf0, 0x3bc10,
2077 		0x3bc28, 0x3bc28,
2078 		0x3bc3c, 0x3bc50,
2079 		0x3bcf0, 0x3bcfc,
2080 		0x3c000, 0x3c030,
2081 		0x3c100, 0x3c144,
2082 		0x3c190, 0x3c1a0,
2083 		0x3c1a8, 0x3c1b8,
2084 		0x3c1c4, 0x3c1c8,
2085 		0x3c1d0, 0x3c1d0,
2086 		0x3c200, 0x3c318,
2087 		0x3c400, 0x3c4b4,
2088 		0x3c4c0, 0x3c52c,
2089 		0x3c540, 0x3c61c,
2090 		0x3c800, 0x3c828,
2091 		0x3c834, 0x3c834,
2092 		0x3c8c0, 0x3c908,
2093 		0x3c910, 0x3c9ac,
2094 		0x3ca00, 0x3ca14,
2095 		0x3ca1c, 0x3ca2c,
2096 		0x3ca44, 0x3ca50,
2097 		0x3ca74, 0x3ca74,
2098 		0x3ca7c, 0x3cafc,
2099 		0x3cb08, 0x3cc24,
2100 		0x3cd00, 0x3cd00,
2101 		0x3cd08, 0x3cd14,
2102 		0x3cd1c, 0x3cd20,
2103 		0x3cd3c, 0x3cd3c,
2104 		0x3cd48, 0x3cd50,
2105 		0x3d200, 0x3d20c,
2106 		0x3d220, 0x3d220,
2107 		0x3d240, 0x3d240,
2108 		0x3d600, 0x3d60c,
2109 		0x3da00, 0x3da1c,
2110 		0x3de00, 0x3de20,
2111 		0x3de38, 0x3de3c,
2112 		0x3de80, 0x3de80,
2113 		0x3de88, 0x3dea8,
2114 		0x3deb0, 0x3deb4,
2115 		0x3dec8, 0x3ded4,
2116 		0x3dfb8, 0x3e004,
2117 		0x3e200, 0x3e200,
2118 		0x3e208, 0x3e240,
2119 		0x3e248, 0x3e280,
2120 		0x3e288, 0x3e2c0,
2121 		0x3e2c8, 0x3e2fc,
2122 		0x3e600, 0x3e630,
2123 		0x3ea00, 0x3eabc,
2124 		0x3eb00, 0x3eb10,
2125 		0x3eb20, 0x3eb30,
2126 		0x3eb40, 0x3eb50,
2127 		0x3eb60, 0x3eb70,
2128 		0x3f000, 0x3f028,
2129 		0x3f030, 0x3f048,
2130 		0x3f060, 0x3f068,
2131 		0x3f070, 0x3f09c,
2132 		0x3f0f0, 0x3f128,
2133 		0x3f130, 0x3f148,
2134 		0x3f160, 0x3f168,
2135 		0x3f170, 0x3f19c,
2136 		0x3f1f0, 0x3f238,
2137 		0x3f240, 0x3f240,
2138 		0x3f248, 0x3f250,
2139 		0x3f25c, 0x3f264,
2140 		0x3f270, 0x3f2b8,
2141 		0x3f2c0, 0x3f2e4,
2142 		0x3f2f8, 0x3f338,
2143 		0x3f340, 0x3f340,
2144 		0x3f348, 0x3f350,
2145 		0x3f35c, 0x3f364,
2146 		0x3f370, 0x3f3b8,
2147 		0x3f3c0, 0x3f3e4,
2148 		0x3f3f8, 0x3f428,
2149 		0x3f430, 0x3f448,
2150 		0x3f460, 0x3f468,
2151 		0x3f470, 0x3f49c,
2152 		0x3f4f0, 0x3f528,
2153 		0x3f530, 0x3f548,
2154 		0x3f560, 0x3f568,
2155 		0x3f570, 0x3f59c,
2156 		0x3f5f0, 0x3f638,
2157 		0x3f640, 0x3f640,
2158 		0x3f648, 0x3f650,
2159 		0x3f65c, 0x3f664,
2160 		0x3f670, 0x3f6b8,
2161 		0x3f6c0, 0x3f6e4,
2162 		0x3f6f8, 0x3f738,
2163 		0x3f740, 0x3f740,
2164 		0x3f748, 0x3f750,
2165 		0x3f75c, 0x3f764,
2166 		0x3f770, 0x3f7b8,
2167 		0x3f7c0, 0x3f7e4,
2168 		0x3f7f8, 0x3f7fc,
2169 		0x3f814, 0x3f814,
2170 		0x3f82c, 0x3f82c,
2171 		0x3f880, 0x3f88c,
2172 		0x3f8e8, 0x3f8ec,
2173 		0x3f900, 0x3f928,
2174 		0x3f930, 0x3f948,
2175 		0x3f960, 0x3f968,
2176 		0x3f970, 0x3f99c,
2177 		0x3f9f0, 0x3fa38,
2178 		0x3fa40, 0x3fa40,
2179 		0x3fa48, 0x3fa50,
2180 		0x3fa5c, 0x3fa64,
2181 		0x3fa70, 0x3fab8,
2182 		0x3fac0, 0x3fae4,
2183 		0x3faf8, 0x3fb10,
2184 		0x3fb28, 0x3fb28,
2185 		0x3fb3c, 0x3fb50,
2186 		0x3fbf0, 0x3fc10,
2187 		0x3fc28, 0x3fc28,
2188 		0x3fc3c, 0x3fc50,
2189 		0x3fcf0, 0x3fcfc,
2190 		0x40000, 0x4000c,
2191 		0x40040, 0x40050,
2192 		0x40060, 0x40068,
2193 		0x4007c, 0x4008c,
2194 		0x40094, 0x400b0,
2195 		0x400c0, 0x40144,
2196 		0x40180, 0x4018c,
2197 		0x40200, 0x40254,
2198 		0x40260, 0x40264,
2199 		0x40270, 0x40288,
2200 		0x40290, 0x40298,
2201 		0x402ac, 0x402c8,
2202 		0x402d0, 0x402e0,
2203 		0x402f0, 0x402f0,
2204 		0x40300, 0x4033c,
2205 		0x403f8, 0x403fc,
2206 		0x41304, 0x413c4,
2207 		0x41400, 0x4140c,
2208 		0x41414, 0x4141c,
2209 		0x41480, 0x414d0,
2210 		0x44000, 0x44054,
2211 		0x4405c, 0x44078,
2212 		0x440c0, 0x44174,
2213 		0x44180, 0x441ac,
2214 		0x441b4, 0x441b8,
2215 		0x441c0, 0x44254,
2216 		0x4425c, 0x44278,
2217 		0x442c0, 0x44374,
2218 		0x44380, 0x443ac,
2219 		0x443b4, 0x443b8,
2220 		0x443c0, 0x44454,
2221 		0x4445c, 0x44478,
2222 		0x444c0, 0x44574,
2223 		0x44580, 0x445ac,
2224 		0x445b4, 0x445b8,
2225 		0x445c0, 0x44654,
2226 		0x4465c, 0x44678,
2227 		0x446c0, 0x44774,
2228 		0x44780, 0x447ac,
2229 		0x447b4, 0x447b8,
2230 		0x447c0, 0x44854,
2231 		0x4485c, 0x44878,
2232 		0x448c0, 0x44974,
2233 		0x44980, 0x449ac,
2234 		0x449b4, 0x449b8,
2235 		0x449c0, 0x449fc,
2236 		0x45000, 0x45004,
2237 		0x45010, 0x45030,
2238 		0x45040, 0x45060,
2239 		0x45068, 0x45068,
2240 		0x45080, 0x45084,
2241 		0x450a0, 0x450b0,
2242 		0x45200, 0x45204,
2243 		0x45210, 0x45230,
2244 		0x45240, 0x45260,
2245 		0x45268, 0x45268,
2246 		0x45280, 0x45284,
2247 		0x452a0, 0x452b0,
2248 		0x460c0, 0x460e4,
2249 		0x47000, 0x4703c,
2250 		0x47044, 0x4708c,
2251 		0x47200, 0x47250,
2252 		0x47400, 0x47408,
2253 		0x47414, 0x47420,
2254 		0x47600, 0x47618,
2255 		0x47800, 0x47814,
2256 		0x48000, 0x4800c,
2257 		0x48040, 0x48050,
2258 		0x48060, 0x48068,
2259 		0x4807c, 0x4808c,
2260 		0x48094, 0x480b0,
2261 		0x480c0, 0x48144,
2262 		0x48180, 0x4818c,
2263 		0x48200, 0x48254,
2264 		0x48260, 0x48264,
2265 		0x48270, 0x48288,
2266 		0x48290, 0x48298,
2267 		0x482ac, 0x482c8,
2268 		0x482d0, 0x482e0,
2269 		0x482f0, 0x482f0,
2270 		0x48300, 0x4833c,
2271 		0x483f8, 0x483fc,
2272 		0x49304, 0x493c4,
2273 		0x49400, 0x4940c,
2274 		0x49414, 0x4941c,
2275 		0x49480, 0x494d0,
2276 		0x4c000, 0x4c054,
2277 		0x4c05c, 0x4c078,
2278 		0x4c0c0, 0x4c174,
2279 		0x4c180, 0x4c1ac,
2280 		0x4c1b4, 0x4c1b8,
2281 		0x4c1c0, 0x4c254,
2282 		0x4c25c, 0x4c278,
2283 		0x4c2c0, 0x4c374,
2284 		0x4c380, 0x4c3ac,
2285 		0x4c3b4, 0x4c3b8,
2286 		0x4c3c0, 0x4c454,
2287 		0x4c45c, 0x4c478,
2288 		0x4c4c0, 0x4c574,
2289 		0x4c580, 0x4c5ac,
2290 		0x4c5b4, 0x4c5b8,
2291 		0x4c5c0, 0x4c654,
2292 		0x4c65c, 0x4c678,
2293 		0x4c6c0, 0x4c774,
2294 		0x4c780, 0x4c7ac,
2295 		0x4c7b4, 0x4c7b8,
2296 		0x4c7c0, 0x4c854,
2297 		0x4c85c, 0x4c878,
2298 		0x4c8c0, 0x4c974,
2299 		0x4c980, 0x4c9ac,
2300 		0x4c9b4, 0x4c9b8,
2301 		0x4c9c0, 0x4c9fc,
2302 		0x4d000, 0x4d004,
2303 		0x4d010, 0x4d030,
2304 		0x4d040, 0x4d060,
2305 		0x4d068, 0x4d068,
2306 		0x4d080, 0x4d084,
2307 		0x4d0a0, 0x4d0b0,
2308 		0x4d200, 0x4d204,
2309 		0x4d210, 0x4d230,
2310 		0x4d240, 0x4d260,
2311 		0x4d268, 0x4d268,
2312 		0x4d280, 0x4d284,
2313 		0x4d2a0, 0x4d2b0,
2314 		0x4e0c0, 0x4e0e4,
2315 		0x4f000, 0x4f03c,
2316 		0x4f044, 0x4f08c,
2317 		0x4f200, 0x4f250,
2318 		0x4f400, 0x4f408,
2319 		0x4f414, 0x4f420,
2320 		0x4f600, 0x4f618,
2321 		0x4f800, 0x4f814,
2322 		0x50000, 0x50084,
2323 		0x50090, 0x500cc,
2324 		0x50400, 0x50400,
2325 		0x50800, 0x50884,
2326 		0x50890, 0x508cc,
2327 		0x50c00, 0x50c00,
2328 		0x51000, 0x5101c,
2329 		0x51300, 0x51308,
2330 	};
2331 
2332 	static const unsigned int t6_reg_ranges[] = {
2333 		0x1008, 0x101c,
2334 		0x1024, 0x10a8,
2335 		0x10b4, 0x10f8,
2336 		0x1100, 0x1114,
2337 		0x111c, 0x112c,
2338 		0x1138, 0x113c,
2339 		0x1144, 0x114c,
2340 		0x1180, 0x1184,
2341 		0x1190, 0x1194,
2342 		0x11a0, 0x11a4,
2343 		0x11b0, 0x11b4,
2344 		0x11fc, 0x1274,
2345 		0x1280, 0x133c,
2346 		0x1800, 0x18fc,
2347 		0x3000, 0x302c,
2348 		0x3060, 0x30b0,
2349 		0x30b8, 0x30d8,
2350 		0x30e0, 0x30fc,
2351 		0x3140, 0x357c,
2352 		0x35a8, 0x35cc,
2353 		0x35ec, 0x35ec,
2354 		0x3600, 0x5624,
2355 		0x56cc, 0x56ec,
2356 		0x56f4, 0x5720,
2357 		0x5728, 0x575c,
2358 		0x580c, 0x5814,
2359 		0x5890, 0x589c,
2360 		0x58a4, 0x58ac,
2361 		0x58b8, 0x58bc,
2362 		0x5940, 0x595c,
2363 		0x5980, 0x598c,
2364 		0x59b0, 0x59c8,
2365 		0x59d0, 0x59dc,
2366 		0x59fc, 0x5a18,
2367 		0x5a60, 0x5a6c,
2368 		0x5a80, 0x5a8c,
2369 		0x5a94, 0x5a9c,
2370 		0x5b94, 0x5bfc,
2371 		0x5c10, 0x5e48,
2372 		0x5e50, 0x5e94,
2373 		0x5ea0, 0x5eb0,
2374 		0x5ec0, 0x5ec0,
2375 		0x5ec8, 0x5ed0,
2376 		0x5ee0, 0x5ee0,
2377 		0x5ef0, 0x5ef0,
2378 		0x5f00, 0x5f00,
2379 		0x6000, 0x6020,
2380 		0x6028, 0x6040,
2381 		0x6058, 0x609c,
2382 		0x60a8, 0x619c,
2383 		0x7700, 0x7798,
2384 		0x77c0, 0x7880,
2385 		0x78cc, 0x78fc,
2386 		0x7b00, 0x7b58,
2387 		0x7b60, 0x7b84,
2388 		0x7b8c, 0x7c54,
2389 		0x7d00, 0x7d38,
2390 		0x7d40, 0x7d84,
2391 		0x7d8c, 0x7ddc,
2392 		0x7de4, 0x7e04,
2393 		0x7e10, 0x7e1c,
2394 		0x7e24, 0x7e38,
2395 		0x7e40, 0x7e44,
2396 		0x7e4c, 0x7e78,
2397 		0x7e80, 0x7edc,
2398 		0x7ee8, 0x7efc,
2399 		0x8dc0, 0x8de4,
2400 		0x8df8, 0x8e04,
2401 		0x8e10, 0x8e84,
2402 		0x8ea0, 0x8f88,
2403 		0x8fb8, 0x9058,
2404 		0x9060, 0x9060,
2405 		0x9068, 0x90f8,
2406 		0x9100, 0x9124,
2407 		0x9400, 0x9470,
2408 		0x9600, 0x9600,
2409 		0x9608, 0x9638,
2410 		0x9640, 0x9704,
2411 		0x9710, 0x971c,
2412 		0x9800, 0x9808,
2413 		0x9820, 0x983c,
2414 		0x9850, 0x9864,
2415 		0x9c00, 0x9c6c,
2416 		0x9c80, 0x9cec,
2417 		0x9d00, 0x9d6c,
2418 		0x9d80, 0x9dec,
2419 		0x9e00, 0x9e6c,
2420 		0x9e80, 0x9eec,
2421 		0x9f00, 0x9f6c,
2422 		0x9f80, 0xa020,
2423 		0xd004, 0xd03c,
2424 		0xd100, 0xd118,
2425 		0xd200, 0xd214,
2426 		0xd220, 0xd234,
2427 		0xd240, 0xd254,
2428 		0xd260, 0xd274,
2429 		0xd280, 0xd294,
2430 		0xd2a0, 0xd2b4,
2431 		0xd2c0, 0xd2d4,
2432 		0xd2e0, 0xd2f4,
2433 		0xd300, 0xd31c,
2434 		0xdfc0, 0xdfe0,
2435 		0xe000, 0xf008,
2436 		0xf010, 0xf018,
2437 		0xf020, 0xf028,
2438 		0x11000, 0x11014,
2439 		0x11048, 0x1106c,
2440 		0x11074, 0x11088,
2441 		0x11098, 0x11120,
2442 		0x1112c, 0x1117c,
2443 		0x11190, 0x112e0,
2444 		0x11300, 0x1130c,
2445 		0x12000, 0x1206c,
2446 		0x19040, 0x1906c,
2447 		0x19078, 0x19080,
2448 		0x1908c, 0x190e8,
2449 		0x190f0, 0x190f8,
2450 		0x19100, 0x19110,
2451 		0x19120, 0x19124,
2452 		0x19150, 0x19194,
2453 		0x1919c, 0x191b0,
2454 		0x191d0, 0x191e8,
2455 		0x19238, 0x19290,
2456 		0x192a4, 0x192b0,
2457 		0x192bc, 0x192bc,
2458 		0x19348, 0x1934c,
2459 		0x193f8, 0x19418,
2460 		0x19420, 0x19428,
2461 		0x19430, 0x19444,
2462 		0x1944c, 0x1946c,
2463 		0x19474, 0x19474,
2464 		0x19490, 0x194cc,
2465 		0x194f0, 0x194f8,
2466 		0x19c00, 0x19c48,
2467 		0x19c50, 0x19c80,
2468 		0x19c94, 0x19c98,
2469 		0x19ca0, 0x19cbc,
2470 		0x19ce4, 0x19ce4,
2471 		0x19cf0, 0x19cf8,
2472 		0x19d00, 0x19d28,
2473 		0x19d50, 0x19d78,
2474 		0x19d94, 0x19d98,
2475 		0x19da0, 0x19dc8,
2476 		0x19df0, 0x19e10,
2477 		0x19e50, 0x19e6c,
2478 		0x19ea0, 0x19ebc,
2479 		0x19ec4, 0x19ef4,
2480 		0x19f04, 0x19f2c,
2481 		0x19f34, 0x19f34,
2482 		0x19f40, 0x19f50,
2483 		0x19f90, 0x19fac,
2484 		0x19fc4, 0x19fc8,
2485 		0x19fd0, 0x19fe4,
2486 		0x1a000, 0x1a004,
2487 		0x1a010, 0x1a06c,
2488 		0x1a0b0, 0x1a0e4,
2489 		0x1a0ec, 0x1a0f8,
2490 		0x1a100, 0x1a108,
2491 		0x1a114, 0x1a120,
2492 		0x1a128, 0x1a130,
2493 		0x1a138, 0x1a138,
2494 		0x1a190, 0x1a1c4,
2495 		0x1a1fc, 0x1a1fc,
2496 		0x1e008, 0x1e00c,
2497 		0x1e040, 0x1e044,
2498 		0x1e04c, 0x1e04c,
2499 		0x1e284, 0x1e290,
2500 		0x1e2c0, 0x1e2c0,
2501 		0x1e2e0, 0x1e2e0,
2502 		0x1e300, 0x1e384,
2503 		0x1e3c0, 0x1e3c8,
2504 		0x1e408, 0x1e40c,
2505 		0x1e440, 0x1e444,
2506 		0x1e44c, 0x1e44c,
2507 		0x1e684, 0x1e690,
2508 		0x1e6c0, 0x1e6c0,
2509 		0x1e6e0, 0x1e6e0,
2510 		0x1e700, 0x1e784,
2511 		0x1e7c0, 0x1e7c8,
2512 		0x1e808, 0x1e80c,
2513 		0x1e840, 0x1e844,
2514 		0x1e84c, 0x1e84c,
2515 		0x1ea84, 0x1ea90,
2516 		0x1eac0, 0x1eac0,
2517 		0x1eae0, 0x1eae0,
2518 		0x1eb00, 0x1eb84,
2519 		0x1ebc0, 0x1ebc8,
2520 		0x1ec08, 0x1ec0c,
2521 		0x1ec40, 0x1ec44,
2522 		0x1ec4c, 0x1ec4c,
2523 		0x1ee84, 0x1ee90,
2524 		0x1eec0, 0x1eec0,
2525 		0x1eee0, 0x1eee0,
2526 		0x1ef00, 0x1ef84,
2527 		0x1efc0, 0x1efc8,
2528 		0x1f008, 0x1f00c,
2529 		0x1f040, 0x1f044,
2530 		0x1f04c, 0x1f04c,
2531 		0x1f284, 0x1f290,
2532 		0x1f2c0, 0x1f2c0,
2533 		0x1f2e0, 0x1f2e0,
2534 		0x1f300, 0x1f384,
2535 		0x1f3c0, 0x1f3c8,
2536 		0x1f408, 0x1f40c,
2537 		0x1f440, 0x1f444,
2538 		0x1f44c, 0x1f44c,
2539 		0x1f684, 0x1f690,
2540 		0x1f6c0, 0x1f6c0,
2541 		0x1f6e0, 0x1f6e0,
2542 		0x1f700, 0x1f784,
2543 		0x1f7c0, 0x1f7c8,
2544 		0x1f808, 0x1f80c,
2545 		0x1f840, 0x1f844,
2546 		0x1f84c, 0x1f84c,
2547 		0x1fa84, 0x1fa90,
2548 		0x1fac0, 0x1fac0,
2549 		0x1fae0, 0x1fae0,
2550 		0x1fb00, 0x1fb84,
2551 		0x1fbc0, 0x1fbc8,
2552 		0x1fc08, 0x1fc0c,
2553 		0x1fc40, 0x1fc44,
2554 		0x1fc4c, 0x1fc4c,
2555 		0x1fe84, 0x1fe90,
2556 		0x1fec0, 0x1fec0,
2557 		0x1fee0, 0x1fee0,
2558 		0x1ff00, 0x1ff84,
2559 		0x1ffc0, 0x1ffc8,
2560 		0x30000, 0x30030,
2561 		0x30100, 0x30168,
2562 		0x30190, 0x301a0,
2563 		0x301a8, 0x301b8,
2564 		0x301c4, 0x301c8,
2565 		0x301d0, 0x301d0,
2566 		0x30200, 0x30320,
2567 		0x30400, 0x304b4,
2568 		0x304c0, 0x3052c,
2569 		0x30540, 0x3061c,
2570 		0x30800, 0x308a0,
2571 		0x308c0, 0x30908,
2572 		0x30910, 0x309b8,
2573 		0x30a00, 0x30a04,
2574 		0x30a0c, 0x30a14,
2575 		0x30a1c, 0x30a2c,
2576 		0x30a44, 0x30a50,
2577 		0x30a74, 0x30a74,
2578 		0x30a7c, 0x30afc,
2579 		0x30b08, 0x30c24,
2580 		0x30d00, 0x30d14,
2581 		0x30d1c, 0x30d3c,
2582 		0x30d44, 0x30d4c,
2583 		0x30d54, 0x30d74,
2584 		0x30d7c, 0x30d7c,
2585 		0x30de0, 0x30de0,
2586 		0x30e00, 0x30ed4,
2587 		0x30f00, 0x30fa4,
2588 		0x30fc0, 0x30fc4,
2589 		0x31000, 0x31004,
2590 		0x31080, 0x310fc,
2591 		0x31208, 0x31220,
2592 		0x3123c, 0x31254,
2593 		0x31300, 0x31300,
2594 		0x31308, 0x3131c,
2595 		0x31338, 0x3133c,
2596 		0x31380, 0x31380,
2597 		0x31388, 0x313a8,
2598 		0x313b4, 0x313b4,
2599 		0x31400, 0x31420,
2600 		0x31438, 0x3143c,
2601 		0x31480, 0x31480,
2602 		0x314a8, 0x314a8,
2603 		0x314b0, 0x314b4,
2604 		0x314c8, 0x314d4,
2605 		0x31a40, 0x31a4c,
2606 		0x31af0, 0x31b20,
2607 		0x31b38, 0x31b3c,
2608 		0x31b80, 0x31b80,
2609 		0x31ba8, 0x31ba8,
2610 		0x31bb0, 0x31bb4,
2611 		0x31bc8, 0x31bd4,
2612 		0x32140, 0x3218c,
2613 		0x321f0, 0x321f4,
2614 		0x32200, 0x32200,
2615 		0x32218, 0x32218,
2616 		0x32400, 0x32400,
2617 		0x32408, 0x3241c,
2618 		0x32618, 0x32620,
2619 		0x32664, 0x32664,
2620 		0x326a8, 0x326a8,
2621 		0x326ec, 0x326ec,
2622 		0x32a00, 0x32abc,
2623 		0x32b00, 0x32b18,
2624 		0x32b20, 0x32b38,
2625 		0x32b40, 0x32b58,
2626 		0x32b60, 0x32b78,
2627 		0x32c00, 0x32c00,
2628 		0x32c08, 0x32c3c,
2629 		0x33000, 0x3302c,
2630 		0x33034, 0x33050,
2631 		0x33058, 0x33058,
2632 		0x33060, 0x3308c,
2633 		0x3309c, 0x330ac,
2634 		0x330c0, 0x330c0,
2635 		0x330c8, 0x330d0,
2636 		0x330d8, 0x330e0,
2637 		0x330ec, 0x3312c,
2638 		0x33134, 0x33150,
2639 		0x33158, 0x33158,
2640 		0x33160, 0x3318c,
2641 		0x3319c, 0x331ac,
2642 		0x331c0, 0x331c0,
2643 		0x331c8, 0x331d0,
2644 		0x331d8, 0x331e0,
2645 		0x331ec, 0x33290,
2646 		0x33298, 0x332c4,
2647 		0x332e4, 0x33390,
2648 		0x33398, 0x333c4,
2649 		0x333e4, 0x3342c,
2650 		0x33434, 0x33450,
2651 		0x33458, 0x33458,
2652 		0x33460, 0x3348c,
2653 		0x3349c, 0x334ac,
2654 		0x334c0, 0x334c0,
2655 		0x334c8, 0x334d0,
2656 		0x334d8, 0x334e0,
2657 		0x334ec, 0x3352c,
2658 		0x33534, 0x33550,
2659 		0x33558, 0x33558,
2660 		0x33560, 0x3358c,
2661 		0x3359c, 0x335ac,
2662 		0x335c0, 0x335c0,
2663 		0x335c8, 0x335d0,
2664 		0x335d8, 0x335e0,
2665 		0x335ec, 0x33690,
2666 		0x33698, 0x336c4,
2667 		0x336e4, 0x33790,
2668 		0x33798, 0x337c4,
2669 		0x337e4, 0x337fc,
2670 		0x33814, 0x33814,
2671 		0x33854, 0x33868,
2672 		0x33880, 0x3388c,
2673 		0x338c0, 0x338d0,
2674 		0x338e8, 0x338ec,
2675 		0x33900, 0x3392c,
2676 		0x33934, 0x33950,
2677 		0x33958, 0x33958,
2678 		0x33960, 0x3398c,
2679 		0x3399c, 0x339ac,
2680 		0x339c0, 0x339c0,
2681 		0x339c8, 0x339d0,
2682 		0x339d8, 0x339e0,
2683 		0x339ec, 0x33a90,
2684 		0x33a98, 0x33ac4,
2685 		0x33ae4, 0x33b10,
2686 		0x33b24, 0x33b28,
2687 		0x33b38, 0x33b50,
2688 		0x33bf0, 0x33c10,
2689 		0x33c24, 0x33c28,
2690 		0x33c38, 0x33c50,
2691 		0x33cf0, 0x33cfc,
2692 		0x34000, 0x34030,
2693 		0x34100, 0x34168,
2694 		0x34190, 0x341a0,
2695 		0x341a8, 0x341b8,
2696 		0x341c4, 0x341c8,
2697 		0x341d0, 0x341d0,
2698 		0x34200, 0x34320,
2699 		0x34400, 0x344b4,
2700 		0x344c0, 0x3452c,
2701 		0x34540, 0x3461c,
2702 		0x34800, 0x348a0,
2703 		0x348c0, 0x34908,
2704 		0x34910, 0x349b8,
2705 		0x34a00, 0x34a04,
2706 		0x34a0c, 0x34a14,
2707 		0x34a1c, 0x34a2c,
2708 		0x34a44, 0x34a50,
2709 		0x34a74, 0x34a74,
2710 		0x34a7c, 0x34afc,
2711 		0x34b08, 0x34c24,
2712 		0x34d00, 0x34d14,
2713 		0x34d1c, 0x34d3c,
2714 		0x34d44, 0x34d4c,
2715 		0x34d54, 0x34d74,
2716 		0x34d7c, 0x34d7c,
2717 		0x34de0, 0x34de0,
2718 		0x34e00, 0x34ed4,
2719 		0x34f00, 0x34fa4,
2720 		0x34fc0, 0x34fc4,
2721 		0x35000, 0x35004,
2722 		0x35080, 0x350fc,
2723 		0x35208, 0x35220,
2724 		0x3523c, 0x35254,
2725 		0x35300, 0x35300,
2726 		0x35308, 0x3531c,
2727 		0x35338, 0x3533c,
2728 		0x35380, 0x35380,
2729 		0x35388, 0x353a8,
2730 		0x353b4, 0x353b4,
2731 		0x35400, 0x35420,
2732 		0x35438, 0x3543c,
2733 		0x35480, 0x35480,
2734 		0x354a8, 0x354a8,
2735 		0x354b0, 0x354b4,
2736 		0x354c8, 0x354d4,
2737 		0x35a40, 0x35a4c,
2738 		0x35af0, 0x35b20,
2739 		0x35b38, 0x35b3c,
2740 		0x35b80, 0x35b80,
2741 		0x35ba8, 0x35ba8,
2742 		0x35bb0, 0x35bb4,
2743 		0x35bc8, 0x35bd4,
2744 		0x36140, 0x3618c,
2745 		0x361f0, 0x361f4,
2746 		0x36200, 0x36200,
2747 		0x36218, 0x36218,
2748 		0x36400, 0x36400,
2749 		0x36408, 0x3641c,
2750 		0x36618, 0x36620,
2751 		0x36664, 0x36664,
2752 		0x366a8, 0x366a8,
2753 		0x366ec, 0x366ec,
2754 		0x36a00, 0x36abc,
2755 		0x36b00, 0x36b18,
2756 		0x36b20, 0x36b38,
2757 		0x36b40, 0x36b58,
2758 		0x36b60, 0x36b78,
2759 		0x36c00, 0x36c00,
2760 		0x36c08, 0x36c3c,
2761 		0x37000, 0x3702c,
2762 		0x37034, 0x37050,
2763 		0x37058, 0x37058,
2764 		0x37060, 0x3708c,
2765 		0x3709c, 0x370ac,
2766 		0x370c0, 0x370c0,
2767 		0x370c8, 0x370d0,
2768 		0x370d8, 0x370e0,
2769 		0x370ec, 0x3712c,
2770 		0x37134, 0x37150,
2771 		0x37158, 0x37158,
2772 		0x37160, 0x3718c,
2773 		0x3719c, 0x371ac,
2774 		0x371c0, 0x371c0,
2775 		0x371c8, 0x371d0,
2776 		0x371d8, 0x371e0,
2777 		0x371ec, 0x37290,
2778 		0x37298, 0x372c4,
2779 		0x372e4, 0x37390,
2780 		0x37398, 0x373c4,
2781 		0x373e4, 0x3742c,
2782 		0x37434, 0x37450,
2783 		0x37458, 0x37458,
2784 		0x37460, 0x3748c,
2785 		0x3749c, 0x374ac,
2786 		0x374c0, 0x374c0,
2787 		0x374c8, 0x374d0,
2788 		0x374d8, 0x374e0,
2789 		0x374ec, 0x3752c,
2790 		0x37534, 0x37550,
2791 		0x37558, 0x37558,
2792 		0x37560, 0x3758c,
2793 		0x3759c, 0x375ac,
2794 		0x375c0, 0x375c0,
2795 		0x375c8, 0x375d0,
2796 		0x375d8, 0x375e0,
2797 		0x375ec, 0x37690,
2798 		0x37698, 0x376c4,
2799 		0x376e4, 0x37790,
2800 		0x37798, 0x377c4,
2801 		0x377e4, 0x377fc,
2802 		0x37814, 0x37814,
2803 		0x37854, 0x37868,
2804 		0x37880, 0x3788c,
2805 		0x378c0, 0x378d0,
2806 		0x378e8, 0x378ec,
2807 		0x37900, 0x3792c,
2808 		0x37934, 0x37950,
2809 		0x37958, 0x37958,
2810 		0x37960, 0x3798c,
2811 		0x3799c, 0x379ac,
2812 		0x379c0, 0x379c0,
2813 		0x379c8, 0x379d0,
2814 		0x379d8, 0x379e0,
2815 		0x379ec, 0x37a90,
2816 		0x37a98, 0x37ac4,
2817 		0x37ae4, 0x37b10,
2818 		0x37b24, 0x37b28,
2819 		0x37b38, 0x37b50,
2820 		0x37bf0, 0x37c10,
2821 		0x37c24, 0x37c28,
2822 		0x37c38, 0x37c50,
2823 		0x37cf0, 0x37cfc,
2824 		0x40040, 0x40040,
2825 		0x40080, 0x40084,
2826 		0x40100, 0x40100,
2827 		0x40140, 0x401bc,
2828 		0x40200, 0x40214,
2829 		0x40228, 0x40228,
2830 		0x40240, 0x40258,
2831 		0x40280, 0x40280,
2832 		0x40304, 0x40304,
2833 		0x40330, 0x4033c,
2834 		0x41304, 0x413c8,
2835 		0x413d0, 0x413dc,
2836 		0x413f0, 0x413f0,
2837 		0x41400, 0x4140c,
2838 		0x41414, 0x4141c,
2839 		0x41480, 0x414d0,
2840 		0x44000, 0x4407c,
2841 		0x440c0, 0x441ac,
2842 		0x441b4, 0x4427c,
2843 		0x442c0, 0x443ac,
2844 		0x443b4, 0x4447c,
2845 		0x444c0, 0x445ac,
2846 		0x445b4, 0x4467c,
2847 		0x446c0, 0x447ac,
2848 		0x447b4, 0x4487c,
2849 		0x448c0, 0x449ac,
2850 		0x449b4, 0x44a7c,
2851 		0x44ac0, 0x44bac,
2852 		0x44bb4, 0x44c7c,
2853 		0x44cc0, 0x44dac,
2854 		0x44db4, 0x44e7c,
2855 		0x44ec0, 0x44fac,
2856 		0x44fb4, 0x4507c,
2857 		0x450c0, 0x451ac,
2858 		0x451b4, 0x451fc,
2859 		0x45800, 0x45804,
2860 		0x45810, 0x45830,
2861 		0x45840, 0x45860,
2862 		0x45868, 0x45868,
2863 		0x45880, 0x45884,
2864 		0x458a0, 0x458b0,
2865 		0x45a00, 0x45a04,
2866 		0x45a10, 0x45a30,
2867 		0x45a40, 0x45a60,
2868 		0x45a68, 0x45a68,
2869 		0x45a80, 0x45a84,
2870 		0x45aa0, 0x45ab0,
2871 		0x460c0, 0x460e4,
2872 		0x47000, 0x4703c,
2873 		0x47044, 0x4708c,
2874 		0x47200, 0x47250,
2875 		0x47400, 0x47408,
2876 		0x47414, 0x47420,
2877 		0x47600, 0x47618,
2878 		0x47800, 0x47814,
2879 		0x47820, 0x4782c,
2880 		0x50000, 0x50084,
2881 		0x50090, 0x500cc,
2882 		0x50300, 0x50384,
2883 		0x50400, 0x50400,
2884 		0x50800, 0x50884,
2885 		0x50890, 0x508cc,
2886 		0x50b00, 0x50b84,
2887 		0x50c00, 0x50c00,
2888 		0x51000, 0x51020,
2889 		0x51028, 0x510b0,
2890 		0x51300, 0x51324,
2891 	};
2892 
2893 	u32 *buf_end = (u32 *)((char *)buf + buf_size);
2894 	const unsigned int *reg_ranges;
2895 	int reg_ranges_size, range;
2896 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2897 
2898 	/* Select the right set of register ranges to dump depending on the
2899 	 * adapter chip type.
2900 	 */
2901 	switch (chip_version) {
2902 	case CHELSIO_T4:
2903 		reg_ranges = t4_reg_ranges;
2904 		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2905 		break;
2906 
2907 	case CHELSIO_T5:
2908 		reg_ranges = t5_reg_ranges;
2909 		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2910 		break;
2911 
2912 	case CHELSIO_T6:
2913 		reg_ranges = t6_reg_ranges;
2914 		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2915 		break;
2916 
2917 	default:
2918 		CH_ERR(adap,
2919 			"Unsupported chip version %d\n", chip_version);
2920 		return;
2921 	}
2922 
2923 	/* Clear the register buffer and insert the appropriate register
2924 	 * values selected by the above register ranges.
2925 	 */
2926 	memset(buf, 0, buf_size);
2927 	for (range = 0; range < reg_ranges_size; range += 2) {
2928 		unsigned int reg = reg_ranges[range];
2929 		unsigned int last_reg = reg_ranges[range + 1];
2930 		u32 *bufp = (u32 *)((char *)buf + reg);
2931 
2932 		/* Iterate across the register range filling in the register
2933 		 * buffer but don't write past the end of the register buffer.
2934 		 */
2935 		while (reg <= last_reg && bufp < buf_end) {
2936 			*bufp++ = t4_read_reg(adap, reg);
2937 			reg += sizeof(u32);
2938 		}
2939 	}
2940 }
2941 
2942 /*
2943  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2944  */
2945 #define EEPROM_DELAY		10		// 10us per poll spin
2946 #define EEPROM_MAX_POLL		5000		// x 5000 == 50ms
2947 
2948 #define EEPROM_STAT_ADDR	0x7bfc
2949 #define VPD_SIZE		0x800
2950 #define VPD_BASE		0x400
2951 #define VPD_BASE_OLD		0
2952 #define VPD_LEN			1024
2953 #define VPD_INFO_FLD_HDR_SIZE	3
2954 #define CHELSIO_VPD_UNIQUE_ID	0x82
2955 
2956 /*
2957  * Small utility function to wait till any outstanding VPD Access is complete.
2958  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2959  * VPD Access in flight.  This allows us to handle the problem of having a
2960  * previous VPD Access time out and prevent an attempt to inject a new VPD
2961  * Request before any in-flight VPD reguest has completed.
2962  */
2963 static int t4_seeprom_wait(struct adapter *adapter)
2964 {
2965 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2966 	int max_poll;
2967 
2968 	/*
2969 	 * If no VPD Access is in flight, we can just return success right
2970 	 * away.
2971 	 */
2972 	if (!adapter->vpd_busy)
2973 		return 0;
2974 
2975 	/*
2976 	 * Poll the VPD Capability Address/Flag register waiting for it
2977 	 * to indicate that the operation is complete.
2978 	 */
2979 	max_poll = EEPROM_MAX_POLL;
2980 	do {
2981 		u16 val;
2982 
2983 		udelay(EEPROM_DELAY);
2984 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2985 
2986 		/*
2987 		 * If the operation is complete, mark the VPD as no longer
2988 		 * busy and return success.
2989 		 */
2990 		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2991 			adapter->vpd_busy = 0;
2992 			return 0;
2993 		}
2994 	} while (--max_poll);
2995 
2996 	/*
2997 	 * Failure!  Note that we leave the VPD Busy status set in order to
2998 	 * avoid pushing a new VPD Access request into the VPD Capability till
2999 	 * the current operation eventually succeeds.  It's a bug to issue a
3000 	 * new request when an existing request is in flight and will result
3001 	 * in corrupt hardware state.
3002 	 */
3003 	return -ETIMEDOUT;
3004 }
3005 
3006 /**
3007  *	t4_seeprom_read - read a serial EEPROM location
3008  *	@adapter: adapter to read
3009  *	@addr: EEPROM virtual address
3010  *	@data: where to store the read data
3011  *
3012  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
3013  *	VPD capability.  Note that this function must be called with a virtual
3014  *	address.
3015  */
3016 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3017 {
3018 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3019 	int ret;
3020 
3021 	/*
3022 	 * VPD Accesses must alway be 4-byte aligned!
3023 	 */
3024 	if (addr >= EEPROMVSIZE || (addr & 3))
3025 		return -EINVAL;
3026 
3027 	/*
3028 	 * Wait for any previous operation which may still be in flight to
3029 	 * complete.
3030 	 */
3031 	ret = t4_seeprom_wait(adapter);
3032 	if (ret) {
3033 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3034 		return ret;
3035 	}
3036 
3037 	/*
3038 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3039 	 * for our request to complete.  If it doesn't complete, note the
3040 	 * error and return it to our caller.  Note that we do not reset the
3041 	 * VPD Busy status!
3042 	 */
3043 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3044 	adapter->vpd_busy = 1;
3045 	adapter->vpd_flag = PCI_VPD_ADDR_F;
3046 	ret = t4_seeprom_wait(adapter);
3047 	if (ret) {
3048 		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3049 		return ret;
3050 	}
3051 
3052 	/*
3053 	 * Grab the returned data, swizzle it into our endianess and
3054 	 * return success.
3055 	 */
3056 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3057 	*data = le32_to_cpu(*data);
3058 	return 0;
3059 }
3060 
3061 /**
3062  *	t4_seeprom_write - write a serial EEPROM location
3063  *	@adapter: adapter to write
3064  *	@addr: virtual EEPROM address
3065  *	@data: value to write
3066  *
3067  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
3068  *	VPD capability.  Note that this function must be called with a virtual
3069  *	address.
3070  */
3071 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3072 {
3073 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3074 	int ret;
3075 	u32 stats_reg;
3076 	int max_poll;
3077 
3078 	/*
3079 	 * VPD Accesses must alway be 4-byte aligned!
3080 	 */
3081 	if (addr >= EEPROMVSIZE || (addr & 3))
3082 		return -EINVAL;
3083 
3084 	/*
3085 	 * Wait for any previous operation which may still be in flight to
3086 	 * complete.
3087 	 */
3088 	ret = t4_seeprom_wait(adapter);
3089 	if (ret) {
3090 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3091 		return ret;
3092 	}
3093 
3094 	/*
3095 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3096 	 * for our request to complete.  If it doesn't complete, note the
3097 	 * error and return it to our caller.  Note that we do not reset the
3098 	 * VPD Busy status!
3099 	 */
3100 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3101 				 cpu_to_le32(data));
3102 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3103 				 (u16)addr | PCI_VPD_ADDR_F);
3104 	adapter->vpd_busy = 1;
3105 	adapter->vpd_flag = 0;
3106 	ret = t4_seeprom_wait(adapter);
3107 	if (ret) {
3108 		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3109 		return ret;
3110 	}
3111 
3112 	/*
3113 	 * Reset PCI_VPD_DATA register after a transaction and wait for our
3114 	 * request to complete. If it doesn't complete, return error.
3115 	 */
3116 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3117 	max_poll = EEPROM_MAX_POLL;
3118 	do {
3119 		udelay(EEPROM_DELAY);
3120 		ret = t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3121 		if (!ret && (stats_reg & 0x1))
3122 			break;
3123 	} while (--max_poll);
3124 	if (!max_poll)
3125 		return -ETIMEDOUT;
3126 
3127 	/* Return success! */
3128 	return 0;
3129 }
3130 
3131 /**
3132  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
3133  *	@phys_addr: the physical EEPROM address
3134  *	@fn: the PCI function number
3135  *	@sz: size of function-specific area
3136  *
3137  *	Translate a physical EEPROM address to virtual.  The first 1K is
3138  *	accessed through virtual addresses starting at 31K, the rest is
3139  *	accessed through virtual addresses starting at 0.
3140  *
3141  *	The mapping is as follows:
3142  *	[0..1K) -> [31K..32K)
3143  *	[1K..1K+A) -> [ES-A..ES)
3144  *	[1K+A..ES) -> [0..ES-A-1K)
3145  *
3146  *	where A = @fn * @sz, and ES = EEPROM size.
3147  */
3148 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3149 {
3150 	fn *= sz;
3151 	if (phys_addr < 1024)
3152 		return phys_addr + (31 << 10);
3153 	if (phys_addr < 1024 + fn)
3154 		return EEPROMSIZE - fn + phys_addr - 1024;
3155 	if (phys_addr < EEPROMSIZE)
3156 		return phys_addr - 1024 - fn;
3157 	return -EINVAL;
3158 }
3159 
3160 /**
3161  *	t4_seeprom_wp - enable/disable EEPROM write protection
3162  *	@adapter: the adapter
3163  *	@enable: whether to enable or disable write protection
3164  *
3165  *	Enables or disables write protection on the serial EEPROM.
3166  */
3167 int t4_seeprom_wp(struct adapter *adapter, int enable)
3168 {
3169 	return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3170 }
3171 
3172 /**
3173  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
3174  *	@v: Pointer to buffered vpd data structure
3175  *	@kw: The keyword to search for
3176  *
3177  *	Returns the value of the information field keyword or
3178  *	-ENOENT otherwise.
3179  */
3180 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
3181 {
3182 	int i;
3183 	unsigned int offset , len;
3184 	const u8 *buf = (const u8 *)v;
3185 	const u8 *vpdr_len = &v->vpdr_len[0];
3186 	offset = sizeof(struct t4_vpd_hdr);
3187 	len =  (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
3188 
3189 	if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
3190 		return -ENOENT;
3191 	}
3192 
3193 	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3194 		if(memcmp(buf + i , kw , 2) == 0){
3195 			i += VPD_INFO_FLD_HDR_SIZE;
3196 			return i;
3197 		}
3198 
3199 		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
3200 	}
3201 
3202 	return -ENOENT;
3203 }
3204 
3205 /*
3206  * str_strip
3207  * Removes trailing whitespaces from string "s"
3208  * Based on strstrip() implementation in string.c
3209  */
3210 static void str_strip(char *s)
3211 {
3212 	size_t size;
3213 	char *end;
3214 
3215 	size = strlen(s);
3216 	if (!size)
3217 		return;
3218 
3219 	end = s + size - 1;
3220 	while (end >= s && isspace(*end))
3221 		end--;
3222 	*(end + 1) = '\0';
3223 }
3224 
3225 /**
3226  *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
3227  *	@adapter: adapter to read
3228  *	@p: where to store the parameters
3229  *
3230  *	Reads card parameters stored in VPD EEPROM.
3231  */
3232 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
3233 {
3234 	int i, ret = 0, addr;
3235 	int ec, sn, pn, na;
3236 	u8 *vpd, csum;
3237 	const struct t4_vpd_hdr *v;
3238 
3239 	vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN);
3240 	if (!vpd)
3241 		return -ENOMEM;
3242 
3243 	/* We have two VPD data structures stored in the adapter VPD area.
3244 	 * By default, Linux calculates the size of the VPD area by traversing
3245 	 * the first VPD area at offset 0x0, so we need to tell the OS what
3246 	 * our real VPD size is.
3247 	 */
3248 	ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE);
3249 	if (ret < 0)
3250 		goto out;
3251 
3252 	/* Card information normally starts at VPD_BASE but early cards had
3253 	 * it at 0.
3254 	 */
3255 	ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd));
3256 	if (ret)
3257 		goto out;
3258 
3259 	/* The VPD shall have a unique identifier specified by the PCI SIG.
3260 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3261 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3262 	 * is expected to automatically put this entry at the
3263 	 * beginning of the VPD.
3264 	 */
3265 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3266 
3267 	for (i = 0; i < VPD_LEN; i += 4) {
3268 		ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i));
3269 		if (ret)
3270 			goto out;
3271 	}
3272  	v = (const struct t4_vpd_hdr *)vpd;
3273 
3274 #define FIND_VPD_KW(var,name) do { \
3275 	var = get_vpd_keyword_val(v , name); \
3276 	if (var < 0) { \
3277 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3278 		ret = -EINVAL; \
3279 		goto out;      \
3280 	} \
3281 } while (0)
3282 
3283 	FIND_VPD_KW(i, "RV");
3284 	for (csum = 0; i >= 0; i--)
3285 		csum += vpd[i];
3286 
3287 	if (csum) {
3288 		CH_ERR(adapter,
3289 			"corrupted VPD EEPROM, actual csum %u\n", csum);
3290 		ret = -EINVAL;
3291 		goto out;
3292 	}
3293 
3294 	FIND_VPD_KW(ec, "EC");
3295 	FIND_VPD_KW(sn, "SN");
3296 	FIND_VPD_KW(pn, "PN");
3297 	FIND_VPD_KW(na, "NA");
3298 #undef FIND_VPD_KW
3299 
3300 	memcpy(p->id, v->id_data, ID_LEN);
3301 	str_strip((char *)p->id);
3302 	memcpy(p->ec, vpd + ec, EC_LEN);
3303 	str_strip((char *)p->ec);
3304 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3305 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3306 	str_strip((char *)p->sn);
3307 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3308 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3309 	str_strip((char *)p->pn);
3310 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3311 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3312 	str_strip((char *)p->na);
3313 
3314 out:
3315 	kmem_free(vpd, sizeof(u8) * VPD_LEN);
3316 	return ret < 0 ? ret : 0;
3317 }
3318 
3319 /**
3320  *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
3321  *	@adapter: adapter to read
3322  *	@p: where to store the parameters
3323  *
3324  *	Reads card parameters stored in VPD EEPROM and retrieves the Core
3325  *	Clock.  This can only be called after a connection to the firmware
3326  *	is established.
3327  */
3328 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
3329 {
3330 	u32 cclk_param, cclk_val;
3331 	int ret;
3332 
3333 	/*
3334 	 * Grab the raw VPD parameters.
3335 	 */
3336 	ret = t4_get_raw_vpd_params(adapter, p);
3337 	if (ret)
3338 		return ret;
3339 
3340 	/*
3341 	 * Ask firmware for the Core Clock since it knows how to translate the
3342 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
3343 	 */
3344 	cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3345 		      V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
3346 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3347 			      1, &cclk_param, &cclk_val);
3348 
3349 	if (ret)
3350 		return ret;
3351 	p->cclk = cclk_val;
3352 
3353 	return 0;
3354 }
3355 
3356 /* serial flash and firmware constants and flash config file constants */
3357 enum {
3358 	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3359 
3360 	/* flash command opcodes */
3361 	SF_PROG_PAGE    = 2,	/* program page */
3362 	SF_WR_DISABLE   = 4,	/* disable writes */
3363 	SF_RD_STATUS    = 5,	/* read status register */
3364 	SF_WR_ENABLE    = 6,	/* enable writes */
3365 	SF_RD_DATA_FAST = 0xb,	/* read flash */
3366 	SF_RD_ID	= 0x9f,	/* read ID */
3367 	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
3368 };
3369 
3370 /**
3371  *	sf1_read - read data from the serial flash
3372  *	@adapter: the adapter
3373  *	@byte_cnt: number of bytes to read
3374  *	@cont: whether another operation will be chained
3375  *	@lock: whether to lock SF for PL access only
3376  *	@valp: where to store the read data
3377  *
3378  *	Reads up to 4 bytes of data from the serial flash.  The location of
3379  *	the read needs to be specified prior to calling this by issuing the
3380  *	appropriate commands to the serial flash.
3381  */
3382 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3383 		    int lock, u32 *valp)
3384 {
3385 	int ret;
3386 
3387 	if (!byte_cnt || byte_cnt > 4)
3388 		return -EINVAL;
3389 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3390 		return -EBUSY;
3391 	t4_write_reg(adapter, A_SF_OP,
3392 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3393 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3394 	if (!ret)
3395 		*valp = t4_read_reg(adapter, A_SF_DATA);
3396 	return ret;
3397 }
3398 
3399 /**
3400  *	sf1_write - write data to the serial flash
3401  *	@adapter: the adapter
3402  *	@byte_cnt: number of bytes to write
3403  *	@cont: whether another operation will be chained
3404  *	@lock: whether to lock SF for PL access only
3405  *	@val: value to write
3406  *
3407  *	Writes up to 4 bytes of data to the serial flash.  The location of
3408  *	the write needs to be specified prior to calling this by issuing the
3409  *	appropriate commands to the serial flash.
3410  */
3411 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3412 		     int lock, u32 val)
3413 {
3414 	if (!byte_cnt || byte_cnt > 4)
3415 		return -EINVAL;
3416 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3417 		return -EBUSY;
3418 	t4_write_reg(adapter, A_SF_DATA, val);
3419 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3420 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3421 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3422 }
3423 
3424 /**
3425  *	flash_wait_op - wait for a flash operation to complete
3426  *	@adapter: the adapter
3427  *	@attempts: max number of polls of the status register
3428  *	@delay: delay between polls in ms
3429  *
3430  *	Wait for a flash operation to complete by polling the status register.
3431  */
3432 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay)
3433 {
3434 	int ret;
3435 	u32 status;
3436 
3437 	while (1) {
3438 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3439 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3440 			return ret;
3441 		if (!(status & 1))
3442 			return 0;
3443 		if (--attempts == 0)
3444 			return -EAGAIN;
3445 		if (ch_delay) {
3446 #ifdef CONFIG_CUDBG
3447 			if (adapter->flags & K_CRASH)
3448 				mdelay(ch_delay);
3449 			else
3450 #endif
3451 				msleep(ch_delay);
3452 		}
3453 	}
3454 }
3455 
3456 /**
3457  *	t4_read_flash - read words from serial flash
3458  *	@adapter: the adapter
3459  *	@addr: the start address for the read
3460  *	@nwords: how many 32-bit words to read
3461  *	@data: where to store the read data
3462  *	@byte_oriented: whether to store data as bytes or as words
3463  *
3464  *	Read the specified number of 32-bit words from the serial flash.
3465  *	If @byte_oriented is set the read data is stored as a byte array
3466  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3467  *	natural endianness.
3468  */
3469 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3470 		  unsigned int nwords, u32 *data, int byte_oriented)
3471 {
3472 	int ret;
3473 
3474 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3475 		return -EINVAL;
3476 
3477 	addr = swab32(addr) | SF_RD_DATA_FAST;
3478 
3479 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3480 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3481 		return ret;
3482 
3483 	for ( ; nwords; nwords--, data++) {
3484 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3485 		if (nwords == 1)
3486 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3487 		if (ret)
3488 			return ret;
3489 		if (byte_oriented)
3490 			*data = (__force __u32)(cpu_to_be32(*data));
3491 	}
3492 	return 0;
3493 }
3494 
3495 /**
3496  *	t4_write_flash - write up to a page of data to the serial flash
3497  *	@adapter: the adapter
3498  *	@addr: the start address to write
3499  *	@n: length of data to write in bytes
3500  *	@data: the data to write
3501  *	@byte_oriented: whether to store data as bytes or as words
3502  *
3503  *	Writes up to a page of data (256 bytes) to the serial flash starting
3504  *	at the given address.  All the data must be written to the same page.
3505  *	If @byte_oriented is set the write data is stored as byte stream
3506  *	(i.e. matches what on disk), otherwise in big-endian.
3507  */
3508 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3509 			  unsigned int n, const u8 *data, int byte_oriented)
3510 {
3511 	int ret;
3512 	u32 buf[64];
3513 	unsigned int i, c, left, val, offset = addr & 0xff;
3514 
3515 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3516 		return -EINVAL;
3517 
3518 	val = swab32(addr) | SF_PROG_PAGE;
3519 
3520 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3521 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3522 		goto unlock;
3523 
3524 	for (left = n; left; left -= c) {
3525 		c = min(left, 4U);
3526 		for (val = 0, i = 0; i < c; ++i)
3527 			val = (val << 8) + *data++;
3528 
3529 		if (!byte_oriented)
3530 			val = cpu_to_be32(val);
3531 
3532 		ret = sf1_write(adapter, c, c != left, 1, val);
3533 		if (ret)
3534 			goto unlock;
3535 	}
3536 	ret = flash_wait_op(adapter, 8, 1);
3537 	if (ret)
3538 		goto unlock;
3539 
3540 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3541 
3542 	/* Read the page to verify the write succeeded */
3543 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3544 			    byte_oriented);
3545 	if (ret)
3546 		return ret;
3547 
3548 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3549 		CH_ERR(adapter,
3550 			"failed to correctly write the flash page at %#x\n",
3551 			addr);
3552 		return -EIO;
3553 	}
3554 	return 0;
3555 
3556 unlock:
3557 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3558 	return ret;
3559 }
3560 
3561 /**
3562  *	t4_get_fw_version - read the firmware version
3563  *	@adapter: the adapter
3564  *	@vers: where to place the version
3565  *
3566  *	Reads the FW version from flash.
3567  */
3568 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3569 {
3570 	return t4_read_flash(adapter, FLASH_FW_START +
3571 			     offsetof(struct fw_hdr, fw_ver), 1,
3572 			     vers, 0);
3573 }
3574 
3575 /**
3576  *	t4_get_bs_version - read the firmware bootstrap version
3577  *	@adapter: the adapter
3578  *	@vers: where to place the version
3579  *
3580  *	Reads the FW Bootstrap version from flash.
3581  */
3582 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3583 {
3584 	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3585 			     offsetof(struct fw_hdr, fw_ver), 1,
3586 			     vers, 0);
3587 }
3588 
3589 /**
3590  *	t4_get_tp_version - read the TP microcode version
3591  *	@adapter: the adapter
3592  *	@vers: where to place the version
3593  *
3594  *	Reads the TP microcode version from flash.
3595  */
3596 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3597 {
3598 	return t4_read_flash(adapter, FLASH_FW_START +
3599 			     offsetof(struct fw_hdr, tp_microcode_ver),
3600 			     1, vers, 0);
3601 }
3602 
3603 /**
3604  *	t4_get_exprom_version - return the Expansion ROM version (if any)
3605  *	@adapter: the adapter
3606  *	@vers: where to place the version
3607  *
3608  *	Reads the Expansion ROM header from FLASH and returns the version
3609  *	number (if present) through the @vers return value pointer.  We return
3610  *	this in the Firmware Version Format since it's convenient.  Return
3611  *	0 on success, -ENOENT if no Expansion ROM is present.
3612  */
3613 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3614 {
3615 	struct exprom_header {
3616 		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3617 		unsigned char hdr_ver[4];	/* Expansion ROM version */
3618 	} *hdr;
3619 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3620 					   sizeof(u32))];
3621 	int ret;
3622 
3623 	ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3624 			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3625 			    0);
3626 	if (ret)
3627 		return ret;
3628 
3629 	hdr = (struct exprom_header *)exprom_header_buf;
3630 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3631 		return -ENOENT;
3632 
3633 	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3634 		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3635 		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3636 		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3637 	return 0;
3638 }
3639 
3640 /**
3641  *	t4_get_scfg_version - return the Serial Configuration version
3642  *	@adapter: the adapter
3643  *	@vers: where to place the version
3644  *
3645  *	Reads the Serial Configuration Version via the Firmware interface
3646  *	(thus this can only be called once we're ready to issue Firmware
3647  *	commands).  The format of the Serial Configuration version is
3648  *	adapter specific.  Returns 0 on success, an error on failure.
3649  *
3650  *	Note that early versions of the Firmware didn't include the ability
3651  *	to retrieve the Serial Configuration version, so we zero-out the
3652  *	return-value parameter in that case to avoid leaving it with
3653  *	garbage in it.
3654  *
3655  *	Also note that the Firmware will return its cached copy of the Serial
3656  *	Initialization Revision ID, not the actual Revision ID as written in
3657  *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3658  *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3659  *	it's best to defer calling this routine till after a FW_RESET_CMD has
3660  *	been issued if the Host Driver will be performing a full adapter
3661  *	initialization.
3662  */
3663 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3664 {
3665 	u32 scfgrev_param;
3666 	int ret;
3667 
3668 	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3669 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3670 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3671 			      1, &scfgrev_param, vers);
3672 	if (ret)
3673 		*vers = 0;
3674 	return ret;
3675 }
3676 
3677 /**
3678  *	t4_get_vpd_version - return the VPD version
3679  *	@adapter: the adapter
3680  *	@vers: where to place the version
3681  *
3682  *	Reads the VPD via the Firmware interface (thus this can only be called
3683  *	once we're ready to issue Firmware commands).  The format of the
3684  *	VPD version is adapter specific.  Returns 0 on success, an error on
3685  *	failure.
3686  *
3687  *	Note that early versions of the Firmware didn't include the ability
3688  *	to retrieve the VPD version, so we zero-out the return-value parameter
3689  *	in that case to avoid leaving it with garbage in it.
3690  *
3691  *	Also note that the Firmware will return its cached copy of the VPD
3692  *	Revision ID, not the actual Revision ID as written in the Serial
3693  *	EEPROM.  This is only an issue if a new VPD has been written and the
3694  *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3695  *	to defer calling this routine till after a FW_RESET_CMD has been issued
3696  *	if the Host Driver will be performing a full adapter initialization.
3697  */
3698 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3699 {
3700 	u32 vpdrev_param;
3701 	int ret;
3702 
3703 	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3704 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3705 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3706 			      1, &vpdrev_param, vers);
3707 	if (ret)
3708 		*vers = 0;
3709 	return ret;
3710 }
3711 
3712 /**
3713  *	t4_get_version_info - extract various chip/firmware version information
3714  *	@adapter: the adapter
3715  *
3716  *	Reads various chip/firmware version numbers and stores them into the
3717  *	adapter Adapter Parameters structure.  If any of the efforts fails
3718  *	the first failure will be returned, but all of the version numbers
3719  *	will be read.
3720  */
3721 int t4_get_version_info(struct adapter *adapter)
3722 {
3723 	int ret = 0;
3724 
3725 	#define FIRST_RET(__getvinfo) \
3726 	do { \
3727 		int __ret = __getvinfo; \
3728 		if (__ret && !ret) \
3729 			ret = __ret; \
3730 	} while (0)
3731 
3732 	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3733 	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3734 	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3735 	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3736 	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3737 	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3738 
3739 	#undef FIRST_RET
3740 
3741 	return ret;
3742 }
3743 
3744 /**
3745  *	t4_dump_version_info - dump all of the adapter configuration IDs
3746  *	@adapter: the adapter
3747  *
3748  *	Dumps all of the various bits of adapter configuration version/revision
3749  *	IDs information.  This is typically called at some point after
3750  *	t4_get_version_info() has been called.
3751  */
3752 void t4_dump_version_info(struct adapter *adapter)
3753 {
3754 	/*
3755 	 * Device information.
3756 	 */
3757 	CH_INFO(adapter, "Chelsio %s rev %d\n",
3758 		adapter->params.vpd.id,
3759 		CHELSIO_CHIP_RELEASE(adapter->params.chip));
3760 	CH_INFO(adapter, "S/N: %s, P/N: %s\n",
3761 		adapter->params.vpd.sn,
3762 		adapter->params.vpd.pn);
3763 
3764 	/*
3765 	 * Firmware Version.
3766 	 */
3767 	if (!adapter->params.fw_vers)
3768 		CH_WARN(adapter, "No firmware loaded\n");
3769 	else
3770 		CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n",
3771 			G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
3772 			G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
3773 			G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
3774 			G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
3775 
3776 	/*
3777 	 * Bootstrap Firmware Version.  (Some adapters don't have Bootstrap
3778 	 * Firmware, so dev_info() is more appropriate here.)
3779 	 */
3780 	if (!adapter->params.bs_vers)
3781 		CH_INFO(adapter, "No bootstrap loaded\n");
3782 	else
3783 		CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n",
3784 			G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
3785 			G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
3786 			G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
3787 			G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
3788 
3789 	/*
3790 	 * TP Microcode Version.
3791 	 */
3792 	if (!adapter->params.tp_vers)
3793 		CH_WARN(adapter, "No TP Microcode loaded\n");
3794 	else
3795 		CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n",
3796 			G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
3797 			G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
3798 			G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
3799 			G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
3800 
3801 	/*
3802 	 * Expansion ROM version.
3803 	 */
3804 	if (!adapter->params.er_vers)
3805 		CH_INFO(adapter, "No Expansion ROM loaded\n");
3806 	else
3807 		CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
3808 			G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
3809 			G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
3810 			G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
3811 			G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
3812 
3813 
3814 	/*
3815 	 * Serial Configuration version.
3816 	 */
3817 	CH_INFO(adapter, "Serial Configuration version: %x\n",
3818 		adapter->params.scfg_vers);
3819 
3820 	/*
3821 	 * VPD  version.
3822 	 */
3823 	CH_INFO(adapter, "VPD version: %x\n",
3824 		adapter->params.vpd_vers);
3825 }
3826 
3827 /**
3828  *	t4_check_fw_version - check if the FW is supported with this driver
3829  *	@adap: the adapter
3830  *
3831  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
3832  *	if there's exact match, a negative error if the version could not be
3833  *	read or there's a major version mismatch
3834  */
3835 int t4_check_fw_version(struct adapter *adap)
3836 {
3837 	int ret, major, minor, micro;
3838 	int exp_major, exp_minor, exp_micro;
3839 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3840 
3841 	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3842 	if (ret)
3843 		return ret;
3844 
3845 	major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers);
3846 	minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers);
3847 	micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers);
3848 
3849 	switch (chip_version) {
3850 	case CHELSIO_T4:
3851 		exp_major = T4FW_MIN_VERSION_MAJOR;
3852 		exp_minor = T4FW_MIN_VERSION_MINOR;
3853 		exp_micro = T4FW_MIN_VERSION_MICRO;
3854 		break;
3855 	case CHELSIO_T5:
3856 		exp_major = T5FW_MIN_VERSION_MAJOR;
3857 		exp_minor = T5FW_MIN_VERSION_MINOR;
3858 		exp_micro = T5FW_MIN_VERSION_MICRO;
3859 		break;
3860 	case CHELSIO_T6:
3861 		exp_major = T6FW_MIN_VERSION_MAJOR;
3862 		exp_minor = T6FW_MIN_VERSION_MINOR;
3863 		exp_micro = T6FW_MIN_VERSION_MICRO;
3864 		break;
3865 	default:
3866 		CH_ERR(adap, "Unsupported chip type, %x\n",
3867 			adap->params.chip);
3868 		return -EINVAL;
3869 	}
3870 
3871 	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3872 	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3873 		CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum "
3874 			"supported firmware is %u.%u.%u.\n", major, minor,
3875 			micro, exp_major, exp_minor, exp_micro);
3876 		return -EFAULT;
3877 	}
3878 	return 0;
3879 }
3880 
3881 /* Is the given firmware API compatible with the one the driver was compiled
3882  * with?
3883  */
3884 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3885 {
3886 
3887 	/* short circuit if it's the exact same firmware version */
3888 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3889 		return 1;
3890 
3891 	/*
3892 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
3893 	 * features that are supported in the driver.
3894 	 */
3895 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3896 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3897 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3898 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3899 		return 1;
3900 #undef SAME_INTF
3901 
3902 	return 0;
3903 }
3904 
3905 /* The firmware in the filesystem is usable, but should it be installed?
3906  * This routine explains itself in detail if it indicates the filesystem
3907  * firmware should be installed.
3908  */
3909 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3910 				int k, int c, int t4_fw_install)
3911 {
3912 	const char *reason;
3913 
3914 	if (!card_fw_usable) {
3915 		reason = "incompatible or unusable";
3916 		goto install;
3917 	}
3918 
3919 	if (k > c) {
3920 		reason = "older than the version bundled with this driver";
3921 		goto install;
3922 	}
3923 
3924 	if (t4_fw_install == 2 && k != c) {
3925 		reason = "different than the version bundled with this driver";
3926 		goto install;
3927 	}
3928 
3929 	return 0;
3930 
3931 install:
3932 	if (t4_fw_install == 0) {
3933 		CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3934 		       "but the driver is prohibited from installing a "
3935 		       "different firmware on the card.\n",
3936 		       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3937 		       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3938 		       reason);
3939 
3940 		return (0);
3941 	}
3942 
3943 	CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3944 	       "installing firmware %u.%u.%u.%u on card.\n",
3945 	       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3946 	       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3947 	       G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3948 	       G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3949 
3950 	return 1;
3951 }
3952 
3953 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3954 	       const u8 *fw_data, unsigned int fw_size,
3955 	       struct fw_hdr *card_fw, const int t4_fw_install,
3956 	       enum dev_state state, int *reset)
3957 {
3958 	int ret, card_fw_usable, fs_fw_usable;
3959 	const struct fw_hdr *fs_fw;
3960 	const struct fw_hdr *drv_fw;
3961 
3962 	drv_fw = &fw_info->fw_hdr;
3963 
3964 	/* Read the header of the firmware on the card */
3965 	ret = -t4_read_flash(adap, FLASH_FW_START,
3966 			    sizeof(*card_fw) / sizeof(uint32_t),
3967 			    (uint32_t *)card_fw, 1);
3968 	if (ret == 0) {
3969 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3970 	} else {
3971 		CH_ERR(adap,
3972 			"Unable to read card's firmware header: %d\n", ret);
3973 		card_fw_usable = 0;
3974 	}
3975 
3976 	if (fw_data != NULL) {
3977 		fs_fw = (const void *)fw_data;
3978 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3979 	} else {
3980 		fs_fw = NULL;
3981 		fs_fw_usable = 0;
3982 	}
3983 
3984 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3985 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3986 		/* Common case: the firmware on the card is an exact match and
3987 		 * the filesystem one is an exact match too, or the filesystem
3988 		 * one is absent/incompatible.  Note that t4_fw_install = 2
3989 		 * is ignored here -- use cxgbtool loadfw if you want to
3990 		 * reinstall the same firmware as the one on the card.
3991 		 */
3992 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3993 		   should_install_fs_fw(adap, card_fw_usable,
3994 					be32_to_cpu(fs_fw->fw_ver),
3995 					be32_to_cpu(card_fw->fw_ver),
3996 					 t4_fw_install)) {
3997 
3998 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3999 				     fw_size, 0);
4000 		if (ret != 0) {
4001 			CH_ERR(adap,
4002 				"failed to install firmware: %d\n", ret);
4003 			goto bye;
4004 		}
4005 
4006 		/* Installed successfully, update cached information */
4007 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
4008 		(void)t4_init_devlog_params(adap, 1);
4009 		card_fw_usable = 1;
4010 		*reset = 0;	/* already reset as part of load_fw */
4011 	}
4012 
4013 	if (!card_fw_usable) {
4014 		uint32_t d, c, k;
4015 
4016 		d = be32_to_cpu(drv_fw->fw_ver);
4017 		c = be32_to_cpu(card_fw->fw_ver);
4018 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
4019 
4020 		CH_ERR(adap, "Cannot find a usable firmware: "
4021 			"fw_install %d, chip state %d, "
4022 			"driver compiled with %d.%d.%d.%d, "
4023 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
4024 			t4_fw_install, state,
4025 			G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4026 			G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
4027 			G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4028 			G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4029 			G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4030 			G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4031 		ret = EINVAL;
4032 		goto bye;
4033 	}
4034 
4035 	/* We're using whatever's on the card and it's known to be good. */
4036 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
4037 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
4038 
4039 bye:
4040 	return ret;
4041 
4042 }
4043 
4044 /**
4045  *	t4_flash_erase_sectors - erase a range of flash sectors
4046  *	@adapter: the adapter
4047  *	@start: the first sector to erase
4048  *	@end: the last sector to erase
4049  *
4050  *	Erases the sectors in the given inclusive range.
4051  */
4052 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4053 {
4054 	int ret = 0;
4055 
4056 	if (end >= adapter->params.sf_nsec)
4057 		return -EINVAL;
4058 
4059 	while (start <= end) {
4060 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4061 		    (ret = sf1_write(adapter, 4, 0, 1,
4062 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
4063 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4064 			CH_ERR(adapter,
4065 				"erase of flash sector %d failed, error %d\n",
4066 				start, ret);
4067 			break;
4068 		}
4069 		start++;
4070 	}
4071 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
4072 	return ret;
4073 }
4074 
4075 /**
4076  *	t4_flash_cfg_addr - return the address of the flash configuration file
4077  *	@adapter: the adapter
4078  *
4079  *	Return the address within the flash where the Firmware Configuration
4080  *	File is stored, or an error if the device FLASH is too small to contain
4081  *	a Firmware Configuration File.
4082  */
4083 int t4_flash_cfg_addr(struct adapter *adapter)
4084 {
4085 	/*
4086 	 * If the device FLASH isn't large enough to hold a Firmware
4087 	 * Configuration File, return an error.
4088 	 */
4089 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
4090 		return -ENOSPC;
4091 
4092 	return FLASH_CFG_START;
4093 }
4094 
4095 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
4096  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
4097  * and emit an error message for mismatched firmware to save our caller the
4098  * effort ...
4099  */
4100 static int t4_fw_matches_chip(const struct adapter *adap,
4101 			      const struct fw_hdr *hdr)
4102 {
4103 	/*
4104 	 * The expression below will return FALSE for any unsupported adapter
4105 	 * which will keep us "honest" in the future ...
4106 	 */
4107 	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
4108 	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
4109 	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
4110 		return 1;
4111 
4112 	CH_ERR(adap,
4113 		"FW image (%d) is not suitable for this adapter (%d)\n",
4114 		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
4115 	return 0;
4116 }
4117 
4118 /**
4119  *	t4_load_fw - download firmware
4120  *	@adap: the adapter
4121  *	@fw_data: the firmware image to write
4122  *	@size: image size
4123  *	@bootstrap: indicates if the binary is a bootstrap fw
4124  *
4125  *	Write the supplied firmware image to the card's serial flash.
4126  */
4127 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size,
4128 	       unsigned int bootstrap)
4129 {
4130 	u32 csum;
4131 	int ret, addr;
4132 	unsigned int i;
4133 	u8 first_page[SF_PAGE_SIZE];
4134 	const __be32 *p = (const __be32 *)fw_data;
4135 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4136 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
4137 	unsigned int fw_start_sec;
4138 	unsigned int fw_start;
4139 	unsigned int fw_size;
4140 
4141 	if (bootstrap) {
4142 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
4143 		fw_start = FLASH_FWBOOTSTRAP_START;
4144 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
4145 	} else {
4146 		fw_start_sec = FLASH_FW_START_SEC;
4147  		fw_start = FLASH_FW_START;
4148 		fw_size = FLASH_FW_MAX_SIZE;
4149 	}
4150 
4151 	if (!size) {
4152 		CH_ERR(adap, "FW image has no data\n");
4153 		return -EINVAL;
4154 	}
4155 	if (size & 511) {
4156 		CH_ERR(adap,
4157 			"FW image size not multiple of 512 bytes\n");
4158 		return -EINVAL;
4159 	}
4160 	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4161 		CH_ERR(adap,
4162 			"FW image size differs from size in FW header\n");
4163 		return -EINVAL;
4164 	}
4165 	if (size > fw_size) {
4166 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
4167 			fw_size);
4168 		return -EFBIG;
4169 	}
4170 	if (!t4_fw_matches_chip(adap, hdr))
4171 		return -EINVAL;
4172 
4173 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4174 		csum += be32_to_cpu(p[i]);
4175 
4176 	if (csum != 0xffffffff) {
4177 		CH_ERR(adap,
4178 			"corrupted firmware image, checksum %#x\n", csum);
4179 		return -EINVAL;
4180 	}
4181 
4182 	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
4183 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4184 	if (ret)
4185 		goto out;
4186 
4187 	/*
4188 	 * We write the correct version at the end so the driver can see a bad
4189 	 * version if the FW write fails.  Start by writing a copy of the
4190 	 * first page with a bad version.
4191 	 */
4192 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
4193 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4194 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4195 	if (ret)
4196 		goto out;
4197 
4198 	addr = fw_start;
4199 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4200 		addr += SF_PAGE_SIZE;
4201 		fw_data += SF_PAGE_SIZE;
4202 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4203 		if (ret)
4204 			goto out;
4205 	}
4206 
4207 	ret = t4_write_flash(adap,
4208 			     fw_start + offsetof(struct fw_hdr, fw_ver),
4209 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4210 out:
4211 	if (ret)
4212 		CH_ERR(adap, "firmware download failed, error %d\n",
4213 			ret);
4214 	else {
4215 		if (bootstrap)
4216 			ret = t4_get_bs_version(adap, &adap->params.bs_vers);
4217 		else
4218 			ret = t4_get_fw_version(adap, &adap->params.fw_vers);
4219 	}
4220 	return ret;
4221 }
4222 
4223 /**
4224  *	t4_phy_fw_ver - return current PHY firmware version
4225  *	@adap: the adapter
4226  *	@phy_fw_ver: return value buffer for PHY firmware version
4227  *
4228  *	Returns the current version of external PHY firmware on the
4229  *	adapter.
4230  */
4231 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
4232 {
4233 	u32 param, val;
4234 	int ret;
4235 
4236 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4237 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4238 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4239 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
4240 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4241 			      &param, &val);
4242 	if (ret < 0)
4243 		return ret;
4244 	*phy_fw_ver = val;
4245 	return 0;
4246 }
4247 
4248 /**
4249  *	t4_load_phy_fw - download port PHY firmware
4250  *	@adap: the adapter
4251  *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
4252  *	@lock: the lock to use to guard the memory copy
4253  *	@phy_fw_version: function to check PHY firmware versions
4254  *	@phy_fw_data: the PHY firmware image to write
4255  *	@phy_fw_size: image size
4256  *
4257  *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
4258  *	@phy_fw_version is supplied, then it will be used to determine if
4259  *	it's necessary to perform the transfer by comparing the version
4260  *	of any existing adapter PHY firmware with that of the passed in
4261  *	PHY firmware image.  If @lock is non-NULL then it will be used
4262  *	around the call to t4_memory_rw() which transfers the PHY firmware
4263  *	to the adapter.
4264  *
4265  *	A negative error number will be returned if an error occurs.  If
4266  *	version number support is available and there's no need to upgrade
4267  *	the firmware, 0 will be returned.  If firmware is successfully
4268  *	transferred to the adapter, 1 will be retured.
4269  *
4270  *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
4271  *	a result, a RESET of the adapter would cause that RAM to lose its
4272  *	contents.  Thus, loading PHY firmware on such adapters must happen after any
4273  *	FW_RESET_CMDs ...
4274  */
4275 int t4_load_phy_fw(struct adapter *adap,
4276 		   int win, t4_os_lock_t *lock,
4277 		   int (*phy_fw_version)(const u8 *, size_t),
4278 		   const u8 *phy_fw_data, size_t phy_fw_size)
4279 {
4280 	unsigned long mtype = 0, maddr = 0;
4281 	u32 param, val;
4282 	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
4283 	int ret;
4284 
4285 	/*
4286 	 * If we have version number support, then check to see if the adapter
4287 	 * already has up-to-date PHY firmware loaded.
4288 	 */
4289 	 if (phy_fw_version) {
4290 		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
4291 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4292 		if (ret < 0)
4293 			return ret;;
4294 
4295 		if (cur_phy_fw_ver >= new_phy_fw_vers) {
4296 			CH_WARN(adap, "PHY Firmware already up-to-date, "
4297 				"version %#x\n", cur_phy_fw_ver);
4298 			return 0;
4299 		}
4300 	}
4301 
4302 	/*
4303 	 * Ask the firmware where it wants us to copy the PHY firmware image.
4304 	 * The size of the file requires a special version of the READ coommand
4305 	 * which will pass the file size via the values field in PARAMS_CMD and
4306 	 * retreive the return value from firmware and place it in the same
4307 	 * buffer values
4308 	 */
4309 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4310 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4311 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4312 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4313 	val = phy_fw_size;
4314 	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
4315 			      &param, &val, 1, true);
4316 	if (ret < 0)
4317 		return ret;
4318 	mtype = val >> 8;
4319 	maddr = (val & 0xff) << 16;
4320 
4321 	/*
4322 	 * Copy the supplied PHY Firmware image to the adapter memory location
4323 	 * allocated by the adapter firmware.
4324 	 */
4325 	if (lock)
4326 		t4_os_lock(lock);
4327 	ret = t4_memory_rw(adap, win, mtype, maddr,
4328 			   phy_fw_size, (__be32*)phy_fw_data,
4329 			   T4_MEMORY_WRITE);
4330 	if (lock)
4331 		t4_os_unlock(lock);
4332 	if (ret)
4333 		return ret;
4334 
4335 	/*
4336 	 * Tell the firmware that the PHY firmware image has been written to
4337 	 * RAM and it can now start copying it over to the PHYs.  The chip
4338 	 * firmware will RESET the affected PHYs as part of this operation
4339 	 * leaving them running the new PHY firmware image.
4340 	 */
4341 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4342 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4343 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4344 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4345 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
4346 				    &param, &val, 30000);
4347 
4348 	/*
4349 	 * If we have version number support, then check to see that the new
4350 	 * firmware got loaded properly.
4351 	 */
4352 	if (phy_fw_version) {
4353 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4354 		if (ret < 0)
4355 			return ret;
4356 
4357 		if (cur_phy_fw_ver != new_phy_fw_vers) {
4358 			CH_WARN(adap, "PHY Firmware did not update: "
4359 				"version on adapter %#x, "
4360 				"version flashed %#x\n",
4361 				cur_phy_fw_ver, new_phy_fw_vers);
4362 			return -ENXIO;
4363 		}
4364 	}
4365 
4366 	return 1;
4367 }
4368 
4369 /**
4370  *	t4_fwcache - firmware cache operation
4371  *	@adap: the adapter
4372  *	@op  : the operation (flush or flush and invalidate)
4373  */
4374 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4375 {
4376 	struct fw_params_cmd c;
4377 
4378 	memset(&c, 0, sizeof(c));
4379 	c.op_to_vfn =
4380 	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4381 			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4382 				V_FW_PARAMS_CMD_PFN(adap->pf) |
4383 				V_FW_PARAMS_CMD_VFN(0));
4384 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4385 	c.param[0].mnem =
4386 	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4387 			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4388 	c.param[0].val = (__force __be32)op;
4389 
4390 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4391 }
4392 
4393 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4394 			unsigned int *pif_req_wrptr,
4395 			unsigned int *pif_rsp_wrptr)
4396 {
4397 	int i, j;
4398 	u32 cfg, val, req, rsp;
4399 
4400 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4401 	if (cfg & F_LADBGEN)
4402 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4403 
4404 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4405 	req = G_POLADBGWRPTR(val);
4406 	rsp = G_PILADBGWRPTR(val);
4407 	if (pif_req_wrptr)
4408 		*pif_req_wrptr = req;
4409 	if (pif_rsp_wrptr)
4410 		*pif_rsp_wrptr = rsp;
4411 
4412 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4413 		for (j = 0; j < 6; j++) {
4414 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4415 				     V_PILADBGRDPTR(rsp));
4416 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4417 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4418 			req++;
4419 			rsp++;
4420 		}
4421 		req = (req + 2) & M_POLADBGRDPTR;
4422 		rsp = (rsp + 2) & M_PILADBGRDPTR;
4423 	}
4424 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4425 }
4426 
4427 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4428 {
4429 	u32 cfg;
4430 	int i, j, idx;
4431 
4432 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4433 	if (cfg & F_LADBGEN)
4434 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4435 
4436 	for (i = 0; i < CIM_MALA_SIZE; i++) {
4437 		for (j = 0; j < 5; j++) {
4438 			idx = 8 * i + j;
4439 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4440 				     V_PILADBGRDPTR(idx));
4441 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4442 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4443 		}
4444 	}
4445 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4446 }
4447 
4448 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4449 {
4450 	unsigned int i, j;
4451 
4452 	for (i = 0; i < 8; i++) {
4453 		u32 *p = la_buf + i;
4454 
4455 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4456 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4457 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4458 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4459 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4460 	}
4461 }
4462 
4463 #define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
4464 		     FW_PORT_CAP_ANEG)
4465 
4466 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4467 static inline unsigned int fwcap_to_cc_pause(unsigned int fw_pause)
4468 {
4469 	unsigned int cc_pause = 0;
4470 
4471 	if (fw_pause & FW_PORT_CAP_FC_RX)
4472 		cc_pause |= PAUSE_RX;
4473 	if (fw_pause & FW_PORT_CAP_FC_TX)
4474 		cc_pause |= PAUSE_TX;
4475 
4476 	return cc_pause;
4477 }
4478 
4479 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4480 static inline unsigned int cc_to_fwcap_pause(unsigned int cc_pause)
4481 {
4482 	unsigned int fw_pause = 0;
4483 
4484 	if (cc_pause & PAUSE_RX)
4485 		fw_pause |= FW_PORT_CAP_FC_RX;
4486 	if (cc_pause & PAUSE_TX)
4487 		fw_pause |= FW_PORT_CAP_FC_TX;
4488 
4489 	return fw_pause;
4490 }
4491 
4492 /* Translate Firmware Forward Error Correction specification to Common Code */
4493 static inline unsigned int fwcap_to_cc_fec(unsigned int fw_fec)
4494 {
4495 	unsigned int cc_fec = 0;
4496 
4497 	if (fw_fec & FW_PORT_CAP_FEC_RS)
4498 		cc_fec |= FEC_RS;
4499 	if (fw_fec & FW_PORT_CAP_FEC_BASER_RS)
4500 		cc_fec |= FEC_BASER_RS;
4501 
4502 	return cc_fec;
4503 }
4504 
4505 /* Translate Common Code Forward Error Correction specification to Firmware */
4506 static inline unsigned int cc_to_fwcap_fec(unsigned int cc_fec)
4507 {
4508 	unsigned int fw_fec = 0;
4509 
4510 	if (cc_fec & FEC_RS)
4511 		fw_fec |= FW_PORT_CAP_FEC_RS;
4512 	if (cc_fec & FEC_BASER_RS)
4513 		fw_fec |= FW_PORT_CAP_FEC_BASER_RS;
4514 
4515 	return fw_fec;
4516 }
4517 
4518 /**
4519  *	t4_link_l1cfg - apply link configuration to MAC/PHY
4520  *	@phy: the PHY to setup
4521  *	@mac: the MAC to setup
4522  *	@lc: the requested link configuration
4523  *
4524  *	Set up a port's MAC and PHY according to a desired link configuration.
4525  *	- If the PHY can auto-negotiate first decide what to advertise, then
4526  *	  enable/disable auto-negotiation as desired, and reset.
4527  *	- If the PHY does not auto-negotiate just reset it.
4528  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4529  *	  otherwise do it later based on the outcome of auto-negotiation.
4530  */
4531 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
4532 		  struct link_config *lc)
4533 {
4534 	struct fw_port_cmd c;
4535 	unsigned int fw_mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
4536 	unsigned int fw_fc, cc_fec, fw_fec;
4537 
4538 	lc->link_ok = 0;
4539 
4540 	/*
4541 	 * Convert driver coding of Pause Frame Flow Control settings into the
4542 	 * Firmware's API.
4543 	 */
4544 	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4545 
4546 	/*
4547 	 * Convert Common Code Forward Error Control settings into the
4548 	 * Firmware's API.  If the current Requested FEC has "Automatic"
4549 	 * (IEEE 802.3) specified, then we use whatever the Firmware
4550 	 * sent us as part of it's IEEE 802.3-based interpratation of
4551 	 * the Transceiver Module EPROM FEC parameters.  Otherwise we
4552 	 * use whatever is in the current Requested FEC settings.
4553 	 */
4554 	if (lc->requested_fec & FEC_AUTO)
4555 		cc_fec = lc->auto_fec;
4556 	else
4557 		cc_fec = lc->requested_fec;
4558 	fw_fec = cc_to_fwcap_fec(cc_fec);
4559 
4560 	memset(&c, 0, sizeof(c));
4561 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4562 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4563 				     V_FW_PORT_CMD_PORTID(port));
4564 	c.action_to_len16 =
4565 		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4566 			    FW_LEN16(c));
4567 
4568 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
4569 		c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
4570 					     fw_fc | fw_fec);
4571 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4572 		lc->fec = cc_fec;
4573 	} else if (lc->autoneg == AUTONEG_DISABLE) {
4574 		c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed |
4575 					     fw_fc | fw_fec | fw_mdi);
4576 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4577 		lc->fec = cc_fec;
4578 	} else
4579 		c.u.l1cfg.rcap = cpu_to_be32(lc->advertising |
4580 					     fw_fc | fw_fec | fw_mdi);
4581 
4582 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4583 }
4584 
4585 /**
4586  *	t4_restart_aneg - restart autonegotiation
4587  *	@adap: the adapter
4588  *	@mbox: mbox to use for the FW command
4589  *	@port: the port id
4590  *
4591  *	Restarts autonegotiation for the selected port.
4592  */
4593 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4594 {
4595 	struct fw_port_cmd c;
4596 
4597 	memset(&c, 0, sizeof(c));
4598 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4599 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4600 				     V_FW_PORT_CMD_PORTID(port));
4601 	c.action_to_len16 =
4602 		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4603 			    FW_LEN16(c));
4604 	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4605 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4606 }
4607 
4608 typedef void (*int_handler_t)(struct adapter *adap);
4609 
4610 struct intr_info {
4611 	unsigned int mask;	/* bits to check in interrupt status */
4612 	const char *msg;	/* message to print or NULL */
4613 	short stat_idx;		/* stat counter to increment or -1 */
4614 	unsigned short fatal;	/* whether the condition reported is fatal */
4615 	int_handler_t int_handler;	/* platform-specific int handler */
4616 };
4617 
4618 /**
4619  *	t4_handle_intr_status - table driven interrupt handler
4620  *	@adapter: the adapter that generated the interrupt
4621  *	@reg: the interrupt status register to process
4622  *	@acts: table of interrupt actions
4623  *
4624  *	A table driven interrupt handler that applies a set of masks to an
4625  *	interrupt status word and performs the corresponding actions if the
4626  *	interrupts described by the mask have occurred.  The actions include
4627  *	optionally emitting a warning or alert message.  The table is terminated
4628  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
4629  *	conditions.
4630  */
4631 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4632 				 const struct intr_info *acts)
4633 {
4634 	int fatal = 0;
4635 	unsigned int mask = 0;
4636 	unsigned int status = t4_read_reg(adapter, reg);
4637 
4638 	for ( ; acts->mask; ++acts) {
4639 		if (!(status & acts->mask))
4640 			continue;
4641 		if (acts->fatal) {
4642 			fatal++;
4643 			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4644 				  status & acts->mask);
4645 		} else if (acts->msg)
4646 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4647 				 status & acts->mask);
4648 		if (acts->int_handler)
4649 			acts->int_handler(adapter);
4650 		mask |= acts->mask;
4651 	}
4652 	status &= mask;
4653 	if (status)	/* clear processed interrupts */
4654 		t4_write_reg(adapter, reg, status);
4655 	return fatal;
4656 }
4657 
4658 /*
4659  * Interrupt handler for the PCIE module.
4660  */
4661 static void pcie_intr_handler(struct adapter *adapter)
4662 {
4663 	static const struct intr_info sysbus_intr_info[] = {
4664 		{ F_RNPP, "RXNP array parity error", -1, 1 },
4665 		{ F_RPCP, "RXPC array parity error", -1, 1 },
4666 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
4667 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
4668 		{ F_RFTP, "RXFT array parity error", -1, 1 },
4669 		{ 0 }
4670 	};
4671 	static const struct intr_info pcie_port_intr_info[] = {
4672 		{ F_TPCP, "TXPC array parity error", -1, 1 },
4673 		{ F_TNPP, "TXNP array parity error", -1, 1 },
4674 		{ F_TFTP, "TXFT array parity error", -1, 1 },
4675 		{ F_TCAP, "TXCA array parity error", -1, 1 },
4676 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
4677 		{ F_RCAP, "RXCA array parity error", -1, 1 },
4678 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
4679 		{ F_RDPE, "Rx data parity error", -1, 1 },
4680 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
4681 		{ 0 }
4682 	};
4683 	static const struct intr_info pcie_intr_info[] = {
4684 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4685 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4686 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4687 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4688 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4689 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4690 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4691 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4692 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4693 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4694 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4695 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4696 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4697 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4698 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4699 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4700 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4701 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4702 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4703 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4704 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4705 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4706 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4707 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4708 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4709 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4710 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4711 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
4712 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
4713 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4714 		  0 },
4715 		{ 0 }
4716 	};
4717 
4718 	static struct intr_info t5_pcie_intr_info[] = {
4719 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
4720 		  -1, 1 },
4721 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4722 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4723 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4724 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4725 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4726 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4727 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4728 		  -1, 1 },
4729 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4730 		  -1, 1 },
4731 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4732 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4733 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4734 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4735 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
4736 		  -1, 1 },
4737 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4738 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4739 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4740 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4741 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4742 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4743 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4744 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4745 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4746 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4747 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4748 		  -1, 1 },
4749 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4750 		  -1, 1 },
4751 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4752 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4753 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4754 		{ F_READRSPERR, "Outbound read error", -1,
4755 		  0 },
4756 		{ 0 }
4757 	};
4758 
4759 	int fat;
4760 
4761 	if (is_t4(adapter->params.chip))
4762 		fat = t4_handle_intr_status(adapter,
4763 				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4764 				sysbus_intr_info) +
4765 			t4_handle_intr_status(adapter,
4766 					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4767 					pcie_port_intr_info) +
4768 			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4769 					      pcie_intr_info);
4770 	else
4771 		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4772 					    t5_pcie_intr_info);
4773 	if (fat)
4774 		t4_fatal_err(adapter);
4775 }
4776 
4777 /*
4778  * TP interrupt handler.
4779  */
4780 static void tp_intr_handler(struct adapter *adapter)
4781 {
4782 	static const struct intr_info tp_intr_info[] = {
4783 		{ 0x3fffffff, "TP parity error", -1, 1 },
4784 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4785 		{ 0 }
4786 	};
4787 
4788 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4789 		t4_fatal_err(adapter);
4790 }
4791 
4792 /*
4793  * SGE interrupt handler.
4794  */
4795 static void sge_intr_handler(struct adapter *adapter)
4796 {
4797 	u64 v;
4798 	u32 err;
4799 
4800 	static const struct intr_info sge_intr_info[] = {
4801 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
4802 		  "SGE received CPL exceeding IQE size", -1, 1 },
4803 		{ F_ERR_INVALID_CIDX_INC,
4804 		  "SGE GTS CIDX increment too large", -1, 0 },
4805 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4806 		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4807 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4808 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
4809 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4810 		  0 },
4811 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4812 		  0 },
4813 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4814 		  0 },
4815 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4816 		  0 },
4817 		{ F_ERR_ING_CTXT_PRIO,
4818 		  "SGE too many priority ingress contexts", -1, 0 },
4819 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4820 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4821 		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4822 		  F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4823 		  "SGE PCIe error for a DBP thread", -1, 0 },
4824 		{ 0 }
4825 	};
4826 
4827 	static struct intr_info t4t5_sge_intr_info[] = {
4828 		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4829 		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4830 		{ F_ERR_EGR_CTXT_PRIO,
4831 		  "SGE too many priority egress contexts", -1, 0 },
4832 		{ 0 }
4833 	};
4834 
4835 	/*
4836  	* For now, treat below interrupts as fatal so that we disable SGE and
4837  	* get better debug */
4838 	static struct intr_info t6_sge_intr_info[] = {
4839 		{ F_FATAL_WRE_LEN,
4840 		  "SGE Actual WRE packet is less than advertized length",
4841 		  -1, 1 },
4842 		{ 0 }
4843 	};
4844 
4845 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4846 		((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4847 	if (v) {
4848 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4849 				(unsigned long long)v);
4850 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4851 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4852 	}
4853 
4854 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4855 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4856 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4857 					   t4t5_sge_intr_info);
4858 	else
4859 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4860 					   t6_sge_intr_info);
4861 
4862 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4863 	if (err & F_ERROR_QID_VALID) {
4864 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4865 		if (err & F_UNCAPTURED_ERROR)
4866 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4867 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4868 			     F_UNCAPTURED_ERROR);
4869 	}
4870 
4871 	if (v != 0)
4872 		t4_fatal_err(adapter);
4873 }
4874 
4875 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4876 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4877 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4878 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4879 
4880 /*
4881  * CIM interrupt handler.
4882  */
4883 static void cim_intr_handler(struct adapter *adapter)
4884 {
4885 	static const struct intr_info cim_intr_info[] = {
4886 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4887 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4888 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4889 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4890 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4891 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4892 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4893 		{ 0 }
4894 	};
4895 	static const struct intr_info cim_upintr_info[] = {
4896 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4897 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4898 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
4899 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
4900 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4901 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4902 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4903 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4904 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4905 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4906 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4907 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4908 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4909 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4910 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4911 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4912 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4913 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4914 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4915 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4916 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4917 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4918 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4919 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4920 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4921 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4922 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4923 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4924 		{ 0 }
4925 	};
4926 	int fat;
4927 
4928 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
4929 		t4_report_fw_error(adapter);
4930 
4931 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4932 				    cim_intr_info) +
4933 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4934 				    cim_upintr_info);
4935 	if (fat)
4936 		t4_fatal_err(adapter);
4937 }
4938 
4939 /*
4940  * ULP RX interrupt handler.
4941  */
4942 static void ulprx_intr_handler(struct adapter *adapter)
4943 {
4944 	static const struct intr_info ulprx_intr_info[] = {
4945 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4946 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4947 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4948 		{ 0 }
4949 	};
4950 
4951 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4952 		t4_fatal_err(adapter);
4953 }
4954 
4955 /*
4956  * ULP TX interrupt handler.
4957  */
4958 static void ulptx_intr_handler(struct adapter *adapter)
4959 {
4960 	static const struct intr_info ulptx_intr_info[] = {
4961 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4962 		  0 },
4963 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4964 		  0 },
4965 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4966 		  0 },
4967 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4968 		  0 },
4969 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4970 		{ 0 }
4971 	};
4972 
4973 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4974 		t4_fatal_err(adapter);
4975 }
4976 
4977 /*
4978  * PM TX interrupt handler.
4979  */
4980 static void pmtx_intr_handler(struct adapter *adapter)
4981 {
4982 	static const struct intr_info pmtx_intr_info[] = {
4983 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4984 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4985 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4986 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4987 		{ 0xffffff0, "PMTX framing error", -1, 1 },
4988 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4989 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4990 		  1 },
4991 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4992 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4993 		{ 0 }
4994 	};
4995 
4996 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4997 		t4_fatal_err(adapter);
4998 }
4999 
5000 /*
5001  * PM RX interrupt handler.
5002  */
5003 static void pmrx_intr_handler(struct adapter *adapter)
5004 {
5005 	static const struct intr_info pmrx_intr_info[] = {
5006 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
5007 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
5008 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
5009 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
5010 		  1 },
5011 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
5012 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
5013 		{ 0 }
5014 	};
5015 
5016 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
5017 		t4_fatal_err(adapter);
5018 }
5019 
5020 /*
5021  * CPL switch interrupt handler.
5022  */
5023 static void cplsw_intr_handler(struct adapter *adapter)
5024 {
5025 	static const struct intr_info cplsw_intr_info[] = {
5026 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
5027 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
5028 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
5029 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
5030 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
5031 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
5032 		{ 0 }
5033 	};
5034 
5035 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
5036 		t4_fatal_err(adapter);
5037 }
5038 
5039 /*
5040  * LE interrupt handler.
5041  */
5042 static void le_intr_handler(struct adapter *adap)
5043 {
5044 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
5045 	static const struct intr_info le_intr_info[] = {
5046 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
5047 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
5048 		{ F_PARITYERR, "LE parity error", -1, 1 },
5049 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
5050 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
5051 		{ 0 }
5052 	};
5053 
5054 	static struct intr_info t6_le_intr_info[] = {
5055 		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
5056 		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
5057 		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
5058 		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
5059 		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
5060 		{ 0 }
5061 	};
5062 
5063 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
5064 				  (chip_ver <= CHELSIO_T5) ?
5065 				  le_intr_info : t6_le_intr_info))
5066 		t4_fatal_err(adap);
5067 }
5068 
5069 /*
5070  * MPS interrupt handler.
5071  */
5072 static void mps_intr_handler(struct adapter *adapter)
5073 {
5074 	static const struct intr_info mps_rx_intr_info[] = {
5075 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
5076 		{ 0 }
5077 	};
5078 	static const struct intr_info mps_tx_intr_info[] = {
5079 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5080 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5081 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5082 		  -1, 1 },
5083 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5084 		  -1, 1 },
5085 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
5086 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5087 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
5088 		{ 0 }
5089 	};
5090 	static const struct intr_info mps_trc_intr_info[] = {
5091 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
5092 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
5093 		  1 },
5094 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
5095 		{ 0 }
5096 	};
5097 	static const struct intr_info mps_stat_sram_intr_info[] = {
5098 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
5099 		{ 0 }
5100 	};
5101 	static const struct intr_info mps_stat_tx_intr_info[] = {
5102 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
5103 		{ 0 }
5104 	};
5105 	static const struct intr_info mps_stat_rx_intr_info[] = {
5106 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
5107 		{ 0 }
5108 	};
5109 	static const struct intr_info mps_cls_intr_info[] = {
5110 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
5111 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
5112 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
5113 		{ 0 }
5114 	};
5115 
5116 	int fat;
5117 
5118 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
5119 				    mps_rx_intr_info) +
5120 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
5121 				    mps_tx_intr_info) +
5122 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
5123 				    mps_trc_intr_info) +
5124 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5125 				    mps_stat_sram_intr_info) +
5126 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5127 				    mps_stat_tx_intr_info) +
5128 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5129 				    mps_stat_rx_intr_info) +
5130 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
5131 				    mps_cls_intr_info);
5132 
5133 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
5134 	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
5135 	if (fat)
5136 		t4_fatal_err(adapter);
5137 }
5138 
5139 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
5140 		      F_ECC_UE_INT_CAUSE)
5141 
5142 /*
5143  * EDC/MC interrupt handler.
5144  */
5145 static void mem_intr_handler(struct adapter *adapter, int idx)
5146 {
5147 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
5148 
5149 	unsigned int addr, cnt_addr, v;
5150 
5151 	if (idx <= MEM_EDC1) {
5152 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
5153 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
5154 	} else if (idx == MEM_MC) {
5155 		if (is_t4(adapter->params.chip)) {
5156 			addr = A_MC_INT_CAUSE;
5157 			cnt_addr = A_MC_ECC_STATUS;
5158 		} else {
5159 			addr = A_MC_P_INT_CAUSE;
5160 			cnt_addr = A_MC_P_ECC_STATUS;
5161 		}
5162 	} else {
5163 		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
5164 		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
5165 	}
5166 
5167 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
5168 	if (v & F_PERR_INT_CAUSE)
5169 		CH_ALERT(adapter, "%s FIFO parity error\n",
5170 			  name[idx]);
5171 	if (v & F_ECC_CE_INT_CAUSE) {
5172 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
5173 
5174 		if (idx <= MEM_EDC1)
5175 			t4_edc_err_read(adapter, idx);
5176 
5177 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
5178 		CH_WARN_RATELIMIT(adapter,
5179 				  "%u %s correctable ECC data error%s\n",
5180 				  cnt, name[idx], cnt > 1 ? "s" : "");
5181 	}
5182 	if (v & F_ECC_UE_INT_CAUSE)
5183 		CH_ALERT(adapter,
5184 			 "%s uncorrectable ECC data error\n", name[idx]);
5185 
5186 	t4_write_reg(adapter, addr, v);
5187 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
5188 		t4_fatal_err(adapter);
5189 }
5190 
5191 /*
5192  * MA interrupt handler.
5193  */
5194 static void ma_intr_handler(struct adapter *adapter)
5195 {
5196 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
5197 
5198 	if (status & F_MEM_PERR_INT_CAUSE) {
5199 		CH_ALERT(adapter,
5200 			  "MA parity error, parity status %#x\n",
5201 			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
5202 		if (is_t5(adapter->params.chip))
5203 			CH_ALERT(adapter,
5204 				  "MA parity error, parity status %#x\n",
5205 				  t4_read_reg(adapter,
5206 					      A_MA_PARITY_ERROR_STATUS2));
5207 	}
5208 	if (status & F_MEM_WRAP_INT_CAUSE) {
5209 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
5210 		CH_ALERT(adapter, "MA address wrap-around error by "
5211 			  "client %u to address %#x\n",
5212 			  G_MEM_WRAP_CLIENT_NUM(v),
5213 			  G_MEM_WRAP_ADDRESS(v) << 4);
5214 	}
5215 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
5216 	t4_fatal_err(adapter);
5217 }
5218 
5219 /*
5220  * SMB interrupt handler.
5221  */
5222 static void smb_intr_handler(struct adapter *adap)
5223 {
5224 	static const struct intr_info smb_intr_info[] = {
5225 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
5226 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
5227 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
5228 		{ 0 }
5229 	};
5230 
5231 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
5232 		t4_fatal_err(adap);
5233 }
5234 
5235 /*
5236  * NC-SI interrupt handler.
5237  */
5238 static void ncsi_intr_handler(struct adapter *adap)
5239 {
5240 	static const struct intr_info ncsi_intr_info[] = {
5241 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
5242 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
5243 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
5244 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
5245 		{ 0 }
5246 	};
5247 
5248 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
5249 		t4_fatal_err(adap);
5250 }
5251 
5252 /*
5253  * XGMAC interrupt handler.
5254  */
5255 static void xgmac_intr_handler(struct adapter *adap, int port)
5256 {
5257 	u32 v, int_cause_reg;
5258 
5259 	if (is_t4(adap->params.chip))
5260 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5261 	else
5262 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5263 
5264 	v = t4_read_reg(adap, int_cause_reg);
5265 
5266 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
5267 	if (!v)
5268 		return;
5269 
5270 	if (v & F_TXFIFO_PRTY_ERR)
5271 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
5272 			  port);
5273 	if (v & F_RXFIFO_PRTY_ERR)
5274 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
5275 			  port);
5276 	t4_write_reg(adap, int_cause_reg, v);
5277 	t4_fatal_err(adap);
5278 }
5279 
5280 /*
5281  * PL interrupt handler.
5282  */
5283 static void pl_intr_handler(struct adapter *adap)
5284 {
5285 	static const struct intr_info pl_intr_info[] = {
5286 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
5287 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
5288 		{ 0 }
5289 	};
5290 
5291 	static struct intr_info t5_pl_intr_info[] = {
5292 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
5293 		{ 0 }
5294 	};
5295 
5296 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
5297 				  is_t4(adap->params.chip) ?
5298 				  pl_intr_info : t5_pl_intr_info))
5299 		t4_fatal_err(adap);
5300 }
5301 
5302 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5303 
5304 /**
5305  *	t4_slow_intr_handler - control path interrupt handler
5306  *	@adapter: the adapter
5307  *
5308  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
5309  *	The designation 'slow' is because it involves register reads, while
5310  *	data interrupts typically don't involve any MMIOs.
5311  */
5312 int t4_slow_intr_handler(struct adapter *adapter)
5313 {
5314 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
5315 
5316 	if (!(cause & GLBL_INTR_MASK))
5317 		return 0;
5318 	if (cause & F_CIM)
5319 		cim_intr_handler(adapter);
5320 	if (cause & F_MPS)
5321 		mps_intr_handler(adapter);
5322 	if (cause & F_NCSI)
5323 		ncsi_intr_handler(adapter);
5324 	if (cause & F_PL)
5325 		pl_intr_handler(adapter);
5326 	if (cause & F_SMB)
5327 		smb_intr_handler(adapter);
5328 	if (cause & F_MAC0)
5329 		xgmac_intr_handler(adapter, 0);
5330 	if (cause & F_MAC1)
5331 		xgmac_intr_handler(adapter, 1);
5332 	if (cause & F_MAC2)
5333 		xgmac_intr_handler(adapter, 2);
5334 	if (cause & F_MAC3)
5335 		xgmac_intr_handler(adapter, 3);
5336 	if (cause & F_PCIE)
5337 		pcie_intr_handler(adapter);
5338 	if (cause & F_MC0)
5339 		mem_intr_handler(adapter, MEM_MC);
5340 	if (is_t5(adapter->params.chip) && (cause & F_MC1))
5341 		mem_intr_handler(adapter, MEM_MC1);
5342 	if (cause & F_EDC0)
5343 		mem_intr_handler(adapter, MEM_EDC0);
5344 	if (cause & F_EDC1)
5345 		mem_intr_handler(adapter, MEM_EDC1);
5346 	if (cause & F_LE)
5347 		le_intr_handler(adapter);
5348 	if (cause & F_TP)
5349 		tp_intr_handler(adapter);
5350 	if (cause & F_MA)
5351 		ma_intr_handler(adapter);
5352 	if (cause & F_PM_TX)
5353 		pmtx_intr_handler(adapter);
5354 	if (cause & F_PM_RX)
5355 		pmrx_intr_handler(adapter);
5356 	if (cause & F_ULP_RX)
5357 		ulprx_intr_handler(adapter);
5358 	if (cause & F_CPL_SWITCH)
5359 		cplsw_intr_handler(adapter);
5360 	if (cause & F_SGE)
5361 		sge_intr_handler(adapter);
5362 	if (cause & F_ULP_TX)
5363 		ulptx_intr_handler(adapter);
5364 
5365 	/* Clear the interrupts just processed for which we are the master. */
5366 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
5367 	(void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
5368 	return 1;
5369 }
5370 
5371 /**
5372  *	t4_intr_enable - enable interrupts
5373  *	@adapter: the adapter whose interrupts should be enabled
5374  *
5375  *	Enable PF-specific interrupts for the calling function and the top-level
5376  *	interrupt concentrator for global interrupts.  Interrupts are already
5377  *	enabled at each module,	here we just enable the roots of the interrupt
5378  *	hierarchies.
5379  *
5380  *	Note: this function should be called only when the driver manages
5381  *	non PF-specific interrupts from the various HW modules.  Only one PCI
5382  *	function at a time should be doing this.
5383  */
5384 void t4_intr_enable(struct adapter *adapter)
5385 {
5386 	u32 val = 0;
5387 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5388 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5389 		  ? G_SOURCEPF(whoami)
5390 		  : G_T6_SOURCEPF(whoami));
5391 
5392 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5393 		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5394 	else
5395 		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5396 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
5397 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
5398 		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
5399 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
5400 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5401 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
5402 		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
5403 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5404 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
5405 }
5406 
5407 /**
5408  *	t4_intr_disable - disable interrupts
5409  *	@adapter: the adapter whose interrupts should be disabled
5410  *
5411  *	Disable interrupts.  We only disable the top-level interrupt
5412  *	concentrators.  The caller must be a PCI function managing global
5413  *	interrupts.
5414  */
5415 void t4_intr_disable(struct adapter *adapter)
5416 {
5417 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5418 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5419 		  ? G_SOURCEPF(whoami)
5420 		  : G_T6_SOURCEPF(whoami));
5421 
5422 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5423 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
5424 }
5425 
5426 /**
5427  *	t4_config_rss_range - configure a portion of the RSS mapping table
5428  *	@adapter: the adapter
5429  *	@mbox: mbox to use for the FW command
5430  *	@viid: virtual interface whose RSS subtable is to be written
5431  *	@start: start entry in the table to write
5432  *	@n: how many table entries to write
5433  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
5434  *	@nrspq: number of values in @rspq
5435  *
5436  *	Programs the selected part of the VI's RSS mapping table with the
5437  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
5438  *	until the full table range is populated.
5439  *
5440  *	The caller must ensure the values in @rspq are in the range allowed for
5441  *	@viid.
5442  */
5443 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5444 			int start, int n, const u16 *rspq, unsigned int nrspq)
5445 {
5446 	int ret;
5447 	const u16 *rsp = rspq;
5448 	const u16 *rsp_end = rspq + nrspq;
5449 	struct fw_rss_ind_tbl_cmd cmd;
5450 
5451 	memset(&cmd, 0, sizeof(cmd));
5452 	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5453 				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5454 				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
5455 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5456 
5457 	/* Each firmware RSS command can accommodate up to 32 RSS Ingress
5458 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
5459 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5460 	 * reserved.
5461 	 */
5462 	while (n > 0) {
5463 		int nq = min(n, 32);
5464 		int nq_packed = 0;
5465 		__be32 *qp = &cmd.iq0_to_iq2;
5466 
5467 		/* Set up the firmware RSS command header to send the next
5468 		 * "nq" Ingress Queue IDs to the firmware.
5469 		 */
5470 		cmd.niqid = cpu_to_be16(nq);
5471 		cmd.startidx = cpu_to_be16(start);
5472 
5473 		/* "nq" more done for the start of the next loop.
5474 		 */
5475 		start += nq;
5476 		n -= nq;
5477 
5478 		/* While there are still Ingress Queue IDs to stuff into the
5479 		 * current firmware RSS command, retrieve them from the
5480 		 * Ingress Queue ID array and insert them into the command.
5481 		 */
5482 		while (nq > 0) {
5483 			/* Grab up to the next 3 Ingress Queue IDs (wrapping
5484 			 * around the Ingress Queue ID array if necessary) and
5485 			 * insert them into the firmware RSS command at the
5486 			 * current 3-tuple position within the commad.
5487 			 */
5488 			u16 qbuf[3];
5489 			u16 *qbp = qbuf;
5490 			int nqbuf = min(3, nq);
5491 
5492 			nq -= nqbuf;
5493 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
5494 			while (nqbuf && nq_packed < 32) {
5495 				nqbuf--;
5496 				nq_packed++;
5497 				*qbp++ = *rsp++;
5498 				if (rsp >= rsp_end)
5499 					rsp = rspq;
5500 			}
5501 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5502 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5503 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5504 		}
5505 
5506 		/* Send this portion of the RRS table update to the firmware;
5507 		 * bail out on any errors.
5508 		 */
5509 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5510 		if (ret)
5511 			return ret;
5512 	}
5513 	return 0;
5514 }
5515 
5516 /**
5517  *	t4_config_glbl_rss - configure the global RSS mode
5518  *	@adapter: the adapter
5519  *	@mbox: mbox to use for the FW command
5520  *	@mode: global RSS mode
5521  *	@flags: mode-specific flags
5522  *
5523  *	Sets the global RSS mode.
5524  */
5525 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5526 		       unsigned int flags)
5527 {
5528 	struct fw_rss_glb_config_cmd c;
5529 
5530 	memset(&c, 0, sizeof(c));
5531 	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5532 				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5533 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5534 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5535 		c.u.manual.mode_pkd =
5536 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5537 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5538 		c.u.basicvirtual.mode_keymode =
5539 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5540 		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5541 	} else
5542 		return -EINVAL;
5543 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5544 }
5545 
5546 /**
5547  *	t4_config_vi_rss - configure per VI RSS settings
5548  *	@adapter: the adapter
5549  *	@mbox: mbox to use for the FW command
5550  *	@viid: the VI id
5551  *	@flags: RSS flags
5552  *	@defq: id of the default RSS queue for the VI.
5553  *	@skeyidx: RSS secret key table index for non-global mode
5554  *	@skey: RSS vf_scramble key for VI.
5555  *
5556  *	Configures VI-specific RSS properties.
5557  */
5558 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5559 		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
5560 		     unsigned int skey)
5561 {
5562 	struct fw_rss_vi_config_cmd c;
5563 
5564 	memset(&c, 0, sizeof(c));
5565 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5566 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5567 				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5568 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5569 	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5570 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5571 	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5572 					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5573 	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5574 
5575 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5576 }
5577 
5578 /* Read an RSS table row */
5579 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5580 {
5581 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5582 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5583 				   5, 0, val);
5584 }
5585 
5586 /**
5587  *	t4_read_rss - read the contents of the RSS mapping table
5588  *	@adapter: the adapter
5589  *	@map: holds the contents of the RSS mapping table
5590  *
5591  *	Reads the contents of the RSS hash->queue mapping table.
5592  */
5593 int t4_read_rss(struct adapter *adapter, u16 *map)
5594 {
5595 	u32 val;
5596 	int i, ret;
5597 
5598 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5599 		ret = rd_rss_row(adapter, i, &val);
5600 		if (ret)
5601 			return ret;
5602 		*map++ = G_LKPTBLQUEUE0(val);
5603 		*map++ = G_LKPTBLQUEUE1(val);
5604 	}
5605 	return 0;
5606 }
5607 
5608 /**
5609  * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5610  * @adap: the adapter
5611  * @cmd: TP fw ldst address space type
5612  * @vals: where the indirect register values are stored/written
5613  * @nregs: how many indirect registers to read/write
5614  * @start_idx: index of first indirect register to read/write
5615  * @rw: Read (1) or Write (0)
5616  * @sleep_ok: if true we may sleep while awaiting command completion
5617  *
5618  * Access TP indirect registers through LDST
5619  **/
5620 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5621 			    unsigned int nregs, unsigned int start_index,
5622 			    unsigned int rw, bool sleep_ok)
5623 {
5624 	int ret = 0;
5625 	unsigned int i;
5626 	struct fw_ldst_cmd c;
5627 
5628 	for (i = 0; i < nregs; i++) {
5629 		memset(&c, 0, sizeof(c));
5630 		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5631 						F_FW_CMD_REQUEST |
5632 						(rw ? F_FW_CMD_READ :
5633 						      F_FW_CMD_WRITE) |
5634 						V_FW_LDST_CMD_ADDRSPACE(cmd));
5635 		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5636 
5637 		c.u.addrval.addr = cpu_to_be32(start_index + i);
5638 		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5639 		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5640 				      sleep_ok);
5641 		if (ret)
5642 			return ret;
5643 
5644 		if (rw)
5645 			vals[i] = be32_to_cpu(c.u.addrval.val);
5646 	}
5647 	return 0;
5648 }
5649 
5650 /**
5651  * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5652  * @adap: the adapter
5653  * @reg_addr: Address Register
5654  * @reg_data: Data register
5655  * @buff: where the indirect register values are stored/written
5656  * @nregs: how many indirect registers to read/write
5657  * @start_index: index of first indirect register to read/write
5658  * @rw: READ(1) or WRITE(0)
5659  * @sleep_ok: if true we may sleep while awaiting command completion
5660  *
5661  * Read/Write TP indirect registers through LDST if possible.
5662  * Else, use backdoor access
5663  **/
5664 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5665 			      u32 *buff, u32 nregs, u32 start_index, int rw,
5666 			      bool sleep_ok)
5667 {
5668 	int rc = -EINVAL;
5669 	int cmd;
5670 
5671 	switch (reg_addr) {
5672 	case A_TP_PIO_ADDR:
5673 		cmd = FW_LDST_ADDRSPC_TP_PIO;
5674 		break;
5675 	case A_TP_TM_PIO_ADDR:
5676 		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5677 		break;
5678 	case A_TP_MIB_INDEX:
5679 		cmd = FW_LDST_ADDRSPC_TP_MIB;
5680 		break;
5681 	default:
5682 		goto indirect_access;
5683 	}
5684 
5685 	if (t4_use_ldst(adap))
5686 		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5687 				      sleep_ok);
5688 
5689 indirect_access:
5690 
5691 	if (rc) {
5692 		if (rw)
5693 			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5694 					 start_index);
5695 		else
5696 			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5697 					  start_index);
5698 	}
5699 }
5700 
5701 /**
5702  * t4_tp_pio_read - Read TP PIO registers
5703  * @adap: the adapter
5704  * @buff: where the indirect register values are written
5705  * @nregs: how many indirect registers to read
5706  * @start_index: index of first indirect register to read
5707  * @sleep_ok: if true we may sleep while awaiting command completion
5708  *
5709  * Read TP PIO Registers
5710  **/
5711 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5712 		    u32 start_index, bool sleep_ok)
5713 {
5714 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5715 			  start_index, 1, sleep_ok);
5716 }
5717 
5718 /**
5719  * t4_tp_pio_write - Write TP PIO registers
5720  * @adap: the adapter
5721  * @buff: where the indirect register values are stored
5722  * @nregs: how many indirect registers to write
5723  * @start_index: index of first indirect register to write
5724  * @sleep_ok: if true we may sleep while awaiting command completion
5725  *
5726  * Write TP PIO Registers
5727  **/
5728 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5729 		     u32 start_index, bool sleep_ok)
5730 {
5731 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5732 			  start_index, 0, sleep_ok);
5733 }
5734 
5735 /**
5736  * t4_tp_tm_pio_read - Read TP TM PIO registers
5737  * @adap: the adapter
5738  * @buff: where the indirect register values are written
5739  * @nregs: how many indirect registers to read
5740  * @start_index: index of first indirect register to read
5741  * @sleep_ok: if true we may sleep while awaiting command completion
5742  *
5743  * Read TP TM PIO Registers
5744  **/
5745 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5746 		       u32 start_index, bool sleep_ok)
5747 {
5748 	t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5749 			  nregs, start_index, 1, sleep_ok);
5750 }
5751 
5752 /**
5753  * t4_tp_mib_read - Read TP MIB registers
5754  * @adap: the adapter
5755  * @buff: where the indirect register values are written
5756  * @nregs: how many indirect registers to read
5757  * @start_index: index of first indirect register to read
5758  * @sleep_ok: if true we may sleep while awaiting command completion
5759  *
5760  * Read TP MIB Registers
5761  **/
5762 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5763 		    bool sleep_ok)
5764 {
5765 	t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5766 			  start_index, 1, sleep_ok);
5767 }
5768 
5769 /**
5770  *	t4_read_rss_key - read the global RSS key
5771  *	@adap: the adapter
5772  *	@key: 10-entry array holding the 320-bit RSS key
5773  * 	@sleep_ok: if true we may sleep while awaiting command completion
5774  *
5775  *	Reads the global 320-bit RSS key.
5776  */
5777 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5778 {
5779 	t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5780 }
5781 
5782 /**
5783  *	t4_write_rss_key - program one of the RSS keys
5784  *	@adap: the adapter
5785  *	@key: 10-entry array holding the 320-bit RSS key
5786  *	@idx: which RSS key to write
5787  * 	@sleep_ok: if true we may sleep while awaiting command completion
5788  *
5789  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5790  *	0..15 the corresponding entry in the RSS key table is written,
5791  *	otherwise the global RSS key is written.
5792  */
5793 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5794 		      bool sleep_ok)
5795 {
5796 	u8 rss_key_addr_cnt = 16;
5797 	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5798 
5799 	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5800 	 * allows access to key addresses 16-63 by using KeyWrAddrX
5801 	 * as index[5:4](upper 2) into key table
5802 	 */
5803 	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5804 	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5805 		rss_key_addr_cnt = 32;
5806 
5807 	t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5808 
5809 	if (idx >= 0 && idx < rss_key_addr_cnt) {
5810 		if (rss_key_addr_cnt > 16)
5811 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5812 				     vrt | V_KEYWRADDRX(idx >> 4) |
5813 				     V_T6_VFWRADDR(idx) | F_KEYWREN);
5814 		else
5815 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5816 				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5817 	}
5818 }
5819 
5820 /**
5821  *	t4_read_rss_pf_config - read PF RSS Configuration Table
5822  *	@adapter: the adapter
5823  *	@index: the entry in the PF RSS table to read
5824  *	@valp: where to store the returned value
5825  * 	@sleep_ok: if true we may sleep while awaiting command completion
5826  *
5827  *	Reads the PF RSS Configuration Table at the specified index and returns
5828  *	the value found there.
5829  */
5830 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5831 			   u32 *valp, bool sleep_ok)
5832 {
5833 	t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5834 }
5835 
5836 /**
5837  *	t4_write_rss_pf_config - write PF RSS Configuration Table
5838  *	@adapter: the adapter
5839  *	@index: the entry in the VF RSS table to read
5840  *	@val: the value to store
5841  * 	@sleep_ok: if true we may sleep while awaiting command completion
5842  *
5843  *	Writes the PF RSS Configuration Table at the specified index with the
5844  *	specified value.
5845  */
5846 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5847 			    u32 val, bool sleep_ok)
5848 {
5849 	t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5850 			sleep_ok);
5851 }
5852 
5853 /**
5854  *	t4_read_rss_vf_config - read VF RSS Configuration Table
5855  *	@adapter: the adapter
5856  *	@index: the entry in the VF RSS table to read
5857  *	@vfl: where to store the returned VFL
5858  *	@vfh: where to store the returned VFH
5859  * 	@sleep_ok: if true we may sleep while awaiting command completion
5860  *
5861  *	Reads the VF RSS Configuration Table at the specified index and returns
5862  *	the (VFL, VFH) values found there.
5863  */
5864 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5865 			   u32 *vfl, u32 *vfh, bool sleep_ok)
5866 {
5867 	u32 vrt, mask, data;
5868 
5869 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5870 		mask = V_VFWRADDR(M_VFWRADDR);
5871 		data = V_VFWRADDR(index);
5872 	} else {
5873 		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5874 		 data = V_T6_VFWRADDR(index);
5875 	}
5876 	/*
5877 	 * Request that the index'th VF Table values be read into VFL/VFH.
5878 	 */
5879 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5880 	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5881 	vrt |= data | F_VFRDEN;
5882 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5883 
5884 	/*
5885 	 * Grab the VFL/VFH values ...
5886 	 */
5887 	t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5888 	t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5889 }
5890 
5891 /**
5892  *	t4_read_rss_pf_map - read PF RSS Map
5893  *	@adapter: the adapter
5894  * 	@sleep_ok: if true we may sleep while awaiting command completion
5895  *
5896  *	Reads the PF RSS Map register and returns its value.
5897  */
5898 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5899 {
5900 	u32 pfmap;
5901 
5902 	t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5903 
5904 	return pfmap;
5905 }
5906 
5907 /**
5908  *	t4_read_rss_pf_mask - read PF RSS Mask
5909  *	@adapter: the adapter
5910  * 	@sleep_ok: if true we may sleep while awaiting command completion
5911  *
5912  *	Reads the PF RSS Mask register and returns its value.
5913  */
5914 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5915 {
5916 	u32 pfmask;
5917 
5918 	t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5919 
5920 	return pfmask;
5921 }
5922 
5923 /**
5924  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
5925  *	@adap: the adapter
5926  *	@v4: holds the TCP/IP counter values
5927  *	@v6: holds the TCP/IPv6 counter values
5928  * 	@sleep_ok: if true we may sleep while awaiting command completion
5929  *
5930  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5931  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5932  */
5933 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5934 			 struct tp_tcp_stats *v6, bool sleep_ok)
5935 {
5936 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5937 
5938 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5939 #define STAT(x)     val[STAT_IDX(x)]
5940 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5941 
5942 	if (v4) {
5943 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5944 			       A_TP_MIB_TCP_OUT_RST, sleep_ok);
5945 		v4->tcp_out_rsts = STAT(OUT_RST);
5946 		v4->tcp_in_segs  = STAT64(IN_SEG);
5947 		v4->tcp_out_segs = STAT64(OUT_SEG);
5948 		v4->tcp_retrans_segs = STAT64(RXT_SEG);
5949 	}
5950 	if (v6) {
5951 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5952 			       A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5953 		v6->tcp_out_rsts = STAT(OUT_RST);
5954 		v6->tcp_in_segs  = STAT64(IN_SEG);
5955 		v6->tcp_out_segs = STAT64(OUT_SEG);
5956 		v6->tcp_retrans_segs = STAT64(RXT_SEG);
5957 	}
5958 #undef STAT64
5959 #undef STAT
5960 #undef STAT_IDX
5961 }
5962 
5963 /**
5964  *	t4_tp_get_err_stats - read TP's error MIB counters
5965  *	@adap: the adapter
5966  *	@st: holds the counter values
5967  * 	@sleep_ok: if true we may sleep while awaiting command completion
5968  *
5969  *	Returns the values of TP's error counters.
5970  */
5971 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5972 			 bool sleep_ok)
5973 {
5974 	int nchan = adap->params.arch.nchan;
5975 
5976 	t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5977 		       sleep_ok);
5978 
5979 	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5980 		       sleep_ok);
5981 
5982 	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5983 		       sleep_ok);
5984 
5985 	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5986 		       A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5987 
5988 	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5989 		       A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5990 
5991 	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5992 		       sleep_ok);
5993 
5994 	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5995 		       A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5996 
5997 	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5998 		       A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
5999 
6000 	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6001 		       sleep_ok);
6002 }
6003 
6004 /**
6005  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
6006  *	@adap: the adapter
6007  *	@st: holds the counter values
6008  * 	@sleep_ok: if true we may sleep while awaiting command completion
6009  *
6010  *	Returns the values of TP's CPL counters.
6011  */
6012 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6013 			 bool sleep_ok)
6014 {
6015 	int nchan = adap->params.arch.nchan;
6016 
6017 	t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6018 
6019 	t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6020 }
6021 
6022 /**
6023  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6024  *	@adap: the adapter
6025  *	@st: holds the counter values
6026  *
6027  *	Returns the values of TP's RDMA counters.
6028  */
6029 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6030 			  bool sleep_ok)
6031 {
6032 	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6033 		       sleep_ok);
6034 }
6035 
6036 /**
6037  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6038  *	@adap: the adapter
6039  *	@idx: the port index
6040  *	@st: holds the counter values
6041  * 	@sleep_ok: if true we may sleep while awaiting command completion
6042  *
6043  *	Returns the values of TP's FCoE counters for the selected port.
6044  */
6045 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6046 		       struct tp_fcoe_stats *st, bool sleep_ok)
6047 {
6048 	u32 val[2];
6049 
6050 	t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6051 		       sleep_ok);
6052 
6053 	t4_tp_mib_read(adap, &st->frames_drop, 1,
6054 		       A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6055 
6056 	t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6057 		       sleep_ok);
6058 
6059 	st->octets_ddp = ((u64)val[0] << 32) | val[1];
6060 }
6061 
6062 /**
6063  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6064  *	@adap: the adapter
6065  *	@st: holds the counter values
6066  * 	@sleep_ok: if true we may sleep while awaiting command completion
6067  *
6068  *	Returns the values of TP's counters for non-TCP directly-placed packets.
6069  */
6070 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6071 		      bool sleep_ok)
6072 {
6073 	u32 val[4];
6074 
6075 	t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6076 
6077 	st->frames = val[0];
6078 	st->drops = val[1];
6079 	st->octets = ((u64)val[2] << 32) | val[3];
6080 }
6081 
6082 /**
6083  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
6084  *	@adap: the adapter
6085  *	@mtus: where to store the MTU values
6086  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
6087  *
6088  *	Reads the HW path MTU table.
6089  */
6090 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6091 {
6092 	u32 v;
6093 	int i;
6094 
6095 	for (i = 0; i < NMTUS; ++i) {
6096 		t4_write_reg(adap, A_TP_MTU_TABLE,
6097 			     V_MTUINDEX(0xffU) | V_MTUVALUE(i));
6098 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
6099 		mtus[i] = G_MTUVALUE(v);
6100 		if (mtu_log)
6101 			mtu_log[i] = G_MTUWIDTH(v);
6102 	}
6103 }
6104 
6105 /**
6106  *	t4_read_cong_tbl - reads the congestion control table
6107  *	@adap: the adapter
6108  *	@incr: where to store the alpha values
6109  *
6110  *	Reads the additive increments programmed into the HW congestion
6111  *	control table.
6112  */
6113 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6114 {
6115 	unsigned int mtu, w;
6116 
6117 	for (mtu = 0; mtu < NMTUS; ++mtu)
6118 		for (w = 0; w < NCCTRL_WIN; ++w) {
6119 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
6120 				     V_ROWINDEX(0xffffU) | (mtu << 5) | w);
6121 			incr[mtu][w] = (u16)t4_read_reg(adap,
6122 						A_TP_CCTRL_TABLE) & 0x1fff;
6123 		}
6124 }
6125 
6126 /**
6127  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6128  *	@adap: the adapter
6129  *	@addr: the indirect TP register address
6130  *	@mask: specifies the field within the register to modify
6131  *	@val: new value for the field
6132  *
6133  *	Sets a field of an indirect TP register to the given value.
6134  */
6135 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6136 			    unsigned int mask, unsigned int val)
6137 {
6138 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6139 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6140 	t4_write_reg(adap, A_TP_PIO_DATA, val);
6141 }
6142 
6143 /**
6144  *	init_cong_ctrl - initialize congestion control parameters
6145  *	@a: the alpha values for congestion control
6146  *	@b: the beta values for congestion control
6147  *
6148  *	Initialize the congestion control parameters.
6149  */
6150 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6151 {
6152 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6153 	a[9] = 2;
6154 	a[10] = 3;
6155 	a[11] = 4;
6156 	a[12] = 5;
6157 	a[13] = 6;
6158 	a[14] = 7;
6159 	a[15] = 8;
6160 	a[16] = 9;
6161 	a[17] = 10;
6162 	a[18] = 14;
6163 	a[19] = 17;
6164 	a[20] = 21;
6165 	a[21] = 25;
6166 	a[22] = 30;
6167 	a[23] = 35;
6168 	a[24] = 45;
6169 	a[25] = 60;
6170 	a[26] = 80;
6171 	a[27] = 100;
6172 	a[28] = 200;
6173 	a[29] = 300;
6174 	a[30] = 400;
6175 	a[31] = 500;
6176 
6177 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6178 	b[9] = b[10] = 1;
6179 	b[11] = b[12] = 2;
6180 	b[13] = b[14] = b[15] = b[16] = 3;
6181 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6182 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6183 	b[28] = b[29] = 6;
6184 	b[30] = b[31] = 7;
6185 }
6186 
6187 /* The minimum additive increment value for the congestion control table */
6188 #define CC_MIN_INCR 2U
6189 
6190 /**
6191  *	t4_load_mtus - write the MTU and congestion control HW tables
6192  *	@adap: the adapter
6193  *	@mtus: the values for the MTU table
6194  *	@alpha: the values for the congestion control alpha parameter
6195  *	@beta: the values for the congestion control beta parameter
6196  *
6197  *	Write the HW MTU table with the supplied MTUs and the high-speed
6198  *	congestion control table with the supplied alpha, beta, and MTUs.
6199  *	We write the two tables together because the additive increments
6200  *	depend on the MTUs.
6201  */
6202 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6203 		  const unsigned short *alpha, const unsigned short *beta)
6204 {
6205 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
6206 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6207 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6208 		28672, 40960, 57344, 81920, 114688, 163840, 229376
6209 	};
6210 
6211 	unsigned int i, w;
6212 
6213 	for (i = 0; i < NMTUS; ++i) {
6214 		unsigned int mtu = mtus[i];
6215 		unsigned int log2 = fls(mtu);
6216 
6217 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
6218 			log2--;
6219 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6220 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6221 
6222 		for (w = 0; w < NCCTRL_WIN; ++w) {
6223 			unsigned int inc;
6224 
6225 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6226 				  CC_MIN_INCR);
6227 
6228 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6229 				     (w << 16) | (beta[w] << 13) | inc);
6230 		}
6231 	}
6232 }
6233 
6234 /*
6235  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6236  * clocks.  The formula is
6237  *
6238  * bytes/s = bytes256 * 256 * ClkFreq / 4096
6239  *
6240  * which is equivalent to
6241  *
6242  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6243  */
6244 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6245 {
6246 	u64 v = bytes256 * adap->params.vpd.cclk;
6247 
6248 	return v * 62 + v / 2;
6249 }
6250 
6251 /**
6252  *	t4_get_chan_txrate - get the current per channel Tx rates
6253  *	@adap: the adapter
6254  *	@nic_rate: rates for NIC traffic
6255  *	@ofld_rate: rates for offloaded traffic
6256  *
6257  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
6258  *	for each channel.
6259  */
6260 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6261 {
6262 	u32 v;
6263 
6264 	v = t4_read_reg(adap, A_TP_TX_TRATE);
6265 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6266 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6267 	if (adap->params.arch.nchan == NCHAN) {
6268 		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6269 		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6270 	}
6271 
6272 	v = t4_read_reg(adap, A_TP_TX_ORATE);
6273 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6274 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6275 	if (adap->params.arch.nchan == NCHAN) {
6276 		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6277 		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6278 	}
6279 }
6280 
6281 /**
6282  *	t4_set_trace_filter - configure one of the tracing filters
6283  *	@adap: the adapter
6284  *	@tp: the desired trace filter parameters
6285  *	@idx: which filter to configure
6286  *	@enable: whether to enable or disable the filter
6287  *
6288  *	Configures one of the tracing filters available in HW.  If @enable is
6289  *	%0 @tp is not examined and may be %NULL. The user is responsible to
6290  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
6291  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
6292  *	docs/readme.txt for a complete description of how to setup traceing on
6293  *	T4.
6294  */
6295 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
6296 			int enable)
6297 {
6298 	int i, ofst = idx * 4;
6299 	u32 data_reg, mask_reg, cfg;
6300 
6301 	if (!enable) {
6302 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6303 		return 0;
6304 	}
6305 
6306 	/*
6307 	 * TODO - After T4 data book is updated, specify the exact
6308 	 * section below.
6309 	 *
6310 	 * See T4 data book - MPS section for a complete description
6311 	 * of the below if..else handling of A_MPS_TRC_CFG register
6312 	 * value.
6313 	 */
6314 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6315 	if (cfg & F_TRCMULTIFILTER) {
6316 		/*
6317 		 * If multiple tracers are enabled, then maximum
6318 		 * capture size is 2.5KB (FIFO size of a single channel)
6319 		 * minus 2 flits for CPL_TRACE_PKT header.
6320 		 */
6321 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6322 			return -EINVAL;
6323 	}
6324 	else {
6325 		/*
6326 		 * If multiple tracers are disabled, to avoid deadlocks
6327 		 * maximum packet capture size of 9600 bytes is recommended.
6328 		 * Also in this mode, only trace0 can be enabled and running.
6329 		 */
6330 		if (tp->snap_len > 9600 || idx)
6331 			return -EINVAL;
6332 	}
6333 
6334 	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
6335 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6336 	    tp->min_len > M_TFMINPKTSIZE)
6337 		return -EINVAL;
6338 
6339 	/* stop the tracer we'll be changing */
6340 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6341 
6342 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6343 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6344 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6345 
6346 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6347 		t4_write_reg(adap, data_reg, tp->data[i]);
6348 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6349 	}
6350 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6351 		     V_TFCAPTUREMAX(tp->snap_len) |
6352 		     V_TFMINPKTSIZE(tp->min_len));
6353 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6354 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
6355 		     (is_t4(adap->params.chip) ?
6356 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
6357 		     V_T5_TFPORT(tp->port) | F_T5_TFEN |
6358 		     V_T5_TFINVERTMATCH(tp->invert)));
6359 
6360 	return 0;
6361 }
6362 
6363 /**
6364  *	t4_get_trace_filter - query one of the tracing filters
6365  *	@adap: the adapter
6366  *	@tp: the current trace filter parameters
6367  *	@idx: which trace filter to query
6368  *	@enabled: non-zero if the filter is enabled
6369  *
6370  *	Returns the current settings of one of the HW tracing filters.
6371  */
6372 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6373 			 int *enabled)
6374 {
6375 	u32 ctla, ctlb;
6376 	int i, ofst = idx * 4;
6377 	u32 data_reg, mask_reg;
6378 
6379 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6380 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6381 
6382 	if (is_t4(adap->params.chip)) {
6383 		*enabled = !!(ctla & F_TFEN);
6384 		tp->port =  G_TFPORT(ctla);
6385 		tp->invert = !!(ctla & F_TFINVERTMATCH);
6386 	} else {
6387 		*enabled = !!(ctla & F_T5_TFEN);
6388 		tp->port = G_T5_TFPORT(ctla);
6389 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6390 	}
6391 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
6392 	tp->min_len = G_TFMINPKTSIZE(ctlb);
6393 	tp->skip_ofst = G_TFOFFSET(ctla);
6394 	tp->skip_len = G_TFLENGTH(ctla);
6395 
6396 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6397 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6398 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6399 
6400 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6401 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6402 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6403 	}
6404 }
6405 
6406 /**
6407  *	t4_read_tcb - read a hardware TCP Control Block structure
6408  *	@adap: the adapter
6409  *	@win: PCI-E Memory Window to use
6410  *	@tid: the TCB ID
6411  *	@tcb: the buffer to return the TCB in
6412  *
6413  *	Reads the indicated hardware TCP Control Block and returns it in
6414  *	the supplied buffer.  Returns 0 on success.
6415  */
6416 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4])
6417 {
6418 	u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE);
6419 	u32 tcb_addr = tcb_base + tid * TCB_SIZE;
6420 	__be32 raw_tcb[TCB_SIZE/4];
6421 	int ret, word;
6422 
6423 	ret = t4_memory_rw_addr(adap, win,
6424 				tcb_addr, sizeof raw_tcb, raw_tcb,
6425 				T4_MEMORY_READ);
6426 	if (ret)
6427 		return ret;
6428 
6429 	for (word = 0; word < 32; word++)
6430 		tcb[word] = be32_to_cpu(raw_tcb[word]);
6431 	return 0;
6432 }
6433 
6434 /**
6435  *	t4_pmtx_get_stats - returns the HW stats from PMTX
6436  *	@adap: the adapter
6437  *	@cnt: where to store the count statistics
6438  *	@cycles: where to store the cycle statistics
6439  *
6440  *	Returns performance statistics from PMTX.
6441  */
6442 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6443 {
6444 	int i;
6445 	u32 data[2];
6446 
6447 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6448 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6449 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6450 		if (is_t4(adap->params.chip)) {
6451 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6452 		} else {
6453 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6454 					 A_PM_TX_DBG_DATA, data, 2,
6455 					 A_PM_TX_DBG_STAT_MSB);
6456 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6457 		}
6458 	}
6459 }
6460 
6461 /**
6462  *	t4_pmrx_get_stats - returns the HW stats from PMRX
6463  *	@adap: the adapter
6464  *	@cnt: where to store the count statistics
6465  *	@cycles: where to store the cycle statistics
6466  *
6467  *	Returns performance statistics from PMRX.
6468  */
6469 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6470 {
6471 	int i;
6472 	u32 data[2];
6473 
6474 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6475 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6476 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6477 		if (is_t4(adap->params.chip)) {
6478 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6479 		} else {
6480 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6481 					 A_PM_RX_DBG_DATA, data, 2,
6482 					 A_PM_RX_DBG_STAT_MSB);
6483 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6484 		}
6485 	}
6486 }
6487 
6488 /**
6489  *	t4_get_mps_bg_map - return the buffer groups associated with a port
6490  *	@adapter: the adapter
6491  *	@pidx: the port index
6492  *
6493  *	Returns a bitmap indicating which MPS buffer groups are associated
6494  *	with the given Port.  Bit i is set if buffer group i is used by the
6495  *	Port.
6496  */
6497 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6498 {
6499 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6500 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6501 	u32 param, val;
6502 	int ret;
6503 
6504 	if (pidx >= nports) {
6505 		CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports);
6506 		return 0;
6507 	}
6508 
6509 	/* FW version >= 1.16.34.0 can determine buffergroup map using
6510 	 * FW_PARAMS_PARAM_DEV_MPSBGMAP API. We will initially try to
6511 	 * use this API. If it fails, revert back to old hardcoded way.
6512 	 * The value obtained from FW is encoded in below format
6513 	 * val = (( MPSBGMAP[Port 3] << 24 ) |
6514 	 *        ( MPSBGMAP[Port 2] << 16 ) |
6515 	 *        ( MPSBGMAP[Port 1] <<  8 ) |
6516 	 *        ( MPSBGMAP[Port 0] <<  0 ))
6517 	 */
6518 	if (adapter->flags & FW_OK) {
6519 		param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6520 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6521 		ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6522 					 0, 1, &param, &val);
6523 		if (!ret)
6524 			return (val >> (8 * pidx)) & 0xff;
6525 	}
6526 
6527 	/* FW_PARAMS_PARAM_DEV_MPSBGMAP API has failed. Falling back to driver
6528 	 * to determine bgmap.
6529 	 */
6530 	switch (chip_version) {
6531 	case CHELSIO_T4:
6532 	case CHELSIO_T5:
6533 		switch (nports) {
6534 		case 1: return 0xf;
6535 		case 2: return 3 << (2 * pidx);
6536 		case 4: return 1 << pidx;
6537 		}
6538 		break;
6539 
6540 	case CHELSIO_T6:
6541 		switch (nports) {
6542 		case 2: return 1 << (2 * pidx);
6543 		}
6544 		break;
6545 	}
6546 
6547 	CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6548 	       chip_version, nports);
6549 	return 0;
6550 }
6551 
6552 /**
6553  *      t4_get_tp_e2c_map - return the E2C channel map associated with a port
6554  *      @adapter: the adapter
6555  *      @pidx: the port index
6556  */
6557 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6558 {
6559 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6560 	u32 param, val = 0;
6561 	int ret;
6562 
6563 	if (pidx >= nports) {
6564 		CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports);
6565 		return 0;
6566 	}
6567 
6568 	/* FW version >= 1.16.44.0 can determine E2C channel map using
6569 	 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6570 	 */
6571 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6572 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
6573 	ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6574 					 0, 1, &param, &val);
6575 	if (!ret)
6576 		return (val >> (8*pidx)) & 0xff;
6577 
6578 	return 0;
6579 }
6580 
6581 /**
6582  *	t4_get_tp_ch_map - return TP ingress channels associated with a port
6583  *	@adapter: the adapter
6584  *	@pidx: the port index
6585  *
6586  *	Returns a bitmap indicating which TP Ingress Channels are associated with
6587  *	a given Port.  Bit i is set if TP Ingress Channel i is used by the Port.
6588  */
6589 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx)
6590 {
6591 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6592 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6593 
6594 	if (pidx >= nports) {
6595 		CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports);
6596 		return 0;
6597 	}
6598 
6599 	switch (chip_version) {
6600 	case CHELSIO_T4:
6601 	case CHELSIO_T5:
6602 		/*
6603 		 * Note that this happens to be the same values as the MPS
6604 		 * Buffer Group Map for these Chips.  But we replicate the code
6605 		 * here because they're really separate concepts.
6606 		 */
6607 		switch (nports) {
6608 		case 1: return 0xf;
6609 		case 2: return 3 << (2 * pidx);
6610 		case 4: return 1 << pidx;
6611 		}
6612 		break;
6613 
6614 	case CHELSIO_T6:
6615 		switch (nports) {
6616 		case 2: return 1 << pidx;
6617 		}
6618 		break;
6619 	}
6620 
6621 	CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
6622 	       chip_version, nports);
6623 	return 0;
6624 }
6625 
6626 /**
6627  *      t4_get_port_type_description - return Port Type string description
6628  *      @port_type: firmware Port Type enumeration
6629  */
6630 const char *t4_get_port_type_description(enum fw_port_type port_type)
6631 {
6632 	static const char *const port_type_description[] = {
6633 		"Fiber_XFI",
6634 		"Fiber_XAUI",
6635 		"BT_SGMII",
6636 		"BT_XFI",
6637 		"BT_XAUI",
6638 		"KX4",
6639 		"CX4",
6640 		"KX",
6641 		"KR",
6642 		"SFP",
6643 		"BP_AP",
6644 		"BP4_AP",
6645 		"QSFP_10G",
6646 		"QSA",
6647 		"QSFP",
6648 		"BP40_BA",
6649 		"KR4_100G",
6650 		"CR4_QSFP",
6651 		"CR_QSFP",
6652 		"CR2_QSFP",
6653 		"SFP28",
6654 		"KR_SFP28",
6655 	};
6656 
6657 	if (port_type < ARRAY_SIZE(port_type_description))
6658 		return port_type_description[port_type];
6659 	return "UNKNOWN";
6660 }
6661 
6662 /**
6663  *      t4_get_port_stats_offset - collect port stats relative to a previous
6664  *				   snapshot
6665  *      @adap: The adapter
6666  *      @idx: The port
6667  *      @stats: Current stats to fill
6668  *      @offset: Previous stats snapshot
6669  */
6670 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6671 		struct port_stats *stats,
6672 		struct port_stats *offset)
6673 {
6674 	u64 *s, *o;
6675 	int i;
6676 
6677 	t4_get_port_stats(adap, idx, stats);
6678 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6679 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
6680 			i++, s++, o++)
6681 		*s -= *o;
6682 }
6683 
6684 /**
6685  *	t4_get_port_stats - collect port statistics
6686  *	@adap: the adapter
6687  *	@idx: the port index
6688  *	@p: the stats structure to fill
6689  *
6690  *	Collect statistics related to the given port from HW.
6691  */
6692 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6693 {
6694 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6695 	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6696 
6697 #define GET_STAT(name) \
6698 	t4_read_reg64(adap, \
6699 	(is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6700 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6701 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6702 
6703 	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
6704 	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
6705 	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
6706 	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
6707 	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
6708 	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
6709 	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
6710 	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
6711 	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
6712 	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
6713 	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
6714 	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
6715 	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
6716 	p->tx_drop		= GET_STAT(TX_PORT_DROP);
6717 	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
6718 	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
6719 	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
6720 	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
6721 	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
6722 	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
6723 	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
6724 	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
6725 	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
6726 
6727 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6728 		if (stat_ctl & F_COUNTPAUSESTATTX) {
6729 			p->tx_frames -= p->tx_pause;
6730 			p->tx_octets -= p->tx_pause * 64;
6731 		}
6732 		if (stat_ctl & F_COUNTPAUSEMCTX)
6733 			p->tx_mcast_frames -= p->tx_pause;
6734 	}
6735 
6736 	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
6737 	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
6738 	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
6739 	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
6740 	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
6741 	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
6742 	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
6743 	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
6744 	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
6745 	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
6746 	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
6747 	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
6748 	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
6749 	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
6750 	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
6751 	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
6752 	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
6753 	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
6754 	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
6755 	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
6756 	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
6757 	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
6758 	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
6759 	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
6760 	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
6761 	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
6762 	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
6763 
6764 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6765 		if (stat_ctl & F_COUNTPAUSESTATRX) {
6766 			p->rx_frames -= p->rx_pause;
6767 			p->rx_octets -= p->rx_pause * 64;
6768 		}
6769 		if (stat_ctl & F_COUNTPAUSEMCRX)
6770 			p->rx_mcast_frames -= p->rx_pause;
6771 	}
6772 
6773 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6774 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6775 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6776 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6777 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6778 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6779 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6780 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6781 
6782 #undef GET_STAT
6783 #undef GET_STAT_COM
6784 }
6785 
6786 /**
6787  *	t4_get_lb_stats - collect loopback port statistics
6788  *	@adap: the adapter
6789  *	@idx: the loopback port index
6790  *	@p: the stats structure to fill
6791  *
6792  *	Return HW statistics for the given loopback port.
6793  */
6794 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6795 {
6796 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6797 
6798 #define GET_STAT(name) \
6799 	t4_read_reg64(adap, \
6800 	(is_t4(adap->params.chip) ? \
6801 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6802 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6803 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6804 
6805 	p->octets	= GET_STAT(BYTES);
6806 	p->frames	= GET_STAT(FRAMES);
6807 	p->bcast_frames	= GET_STAT(BCAST);
6808 	p->mcast_frames	= GET_STAT(MCAST);
6809 	p->ucast_frames	= GET_STAT(UCAST);
6810 	p->error_frames	= GET_STAT(ERROR);
6811 
6812 	p->frames_64		= GET_STAT(64B);
6813 	p->frames_65_127	= GET_STAT(65B_127B);
6814 	p->frames_128_255	= GET_STAT(128B_255B);
6815 	p->frames_256_511	= GET_STAT(256B_511B);
6816 	p->frames_512_1023	= GET_STAT(512B_1023B);
6817 	p->frames_1024_1518	= GET_STAT(1024B_1518B);
6818 	p->frames_1519_max	= GET_STAT(1519B_MAX);
6819 	p->drop			= GET_STAT(DROP_FRAMES);
6820 
6821 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6822 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6823 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6824 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6825 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6826 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6827 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6828 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6829 
6830 #undef GET_STAT
6831 #undef GET_STAT_COM
6832 }
6833 
6834 /*	t4_mk_filtdelwr - create a delete filter WR
6835  *	@ftid: the filter ID
6836  *	@wr: the filter work request to populate
6837  *	@rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6
6838  *	@qid: ingress queue to receive the delete notification
6839  *
6840  *	Creates a filter work request to delete the supplied filter.  If @qid
6841  *	is negative the delete notification is suppressed.
6842  */
6843 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr,
6844 		     int rqtype, int qid)
6845 {
6846 	memset(wr, 0, sizeof(*wr));
6847 	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6848 	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6849 	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6850 				    V_FW_FILTER_WR_RQTYPE(rqtype) |
6851 				    V_FW_FILTER_WR_NOREPLY(qid < 0));
6852 	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6853 	if (qid >= 0)
6854 		wr->rx_chan_rx_rpl_iq =
6855 				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6856 }
6857 
6858 #define INIT_CMD(var, cmd, rd_wr) do { \
6859 	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6860 					F_FW_CMD_REQUEST | \
6861 					F_FW_CMD_##rd_wr); \
6862 	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6863 } while (0)
6864 
6865 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6866 			  u32 addr, u32 val)
6867 {
6868 	u32 ldst_addrspace;
6869 	struct fw_ldst_cmd c;
6870 
6871 	memset(&c, 0, sizeof(c));
6872 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6873 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6874 					F_FW_CMD_REQUEST |
6875 					F_FW_CMD_WRITE |
6876 					ldst_addrspace);
6877 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6878 	c.u.addrval.addr = cpu_to_be32(addr);
6879 	c.u.addrval.val = cpu_to_be32(val);
6880 
6881 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6882 }
6883 
6884 /**
6885  *	t4_mdio_rd - read a PHY register through MDIO
6886  *	@adap: the adapter
6887  *	@mbox: mailbox to use for the FW command
6888  *	@phy_addr: the PHY address
6889  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6890  *	@reg: the register to read
6891  *	@valp: where to store the value
6892  *
6893  *	Issues a FW command through the given mailbox to read a PHY register.
6894  */
6895 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6896 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
6897 {
6898 	int ret;
6899 	u32 ldst_addrspace;
6900 	struct fw_ldst_cmd c;
6901 
6902 	memset(&c, 0, sizeof(c));
6903 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6904 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6905 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6906 					ldst_addrspace);
6907 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6908 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6909 					 V_FW_LDST_CMD_MMD(mmd));
6910 	c.u.mdio.raddr = cpu_to_be16(reg);
6911 
6912 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6913 	if (ret == 0)
6914 		*valp = be16_to_cpu(c.u.mdio.rval);
6915 	return ret;
6916 }
6917 
6918 /**
6919  *	t4_mdio_wr - write a PHY register through MDIO
6920  *	@adap: the adapter
6921  *	@mbox: mailbox to use for the FW command
6922  *	@phy_addr: the PHY address
6923  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6924  *	@reg: the register to write
6925  *	@valp: value to write
6926  *
6927  *	Issues a FW command through the given mailbox to write a PHY register.
6928  */
6929 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6930 	       unsigned int mmd, unsigned int reg, unsigned int val)
6931 {
6932 	u32 ldst_addrspace;
6933 	struct fw_ldst_cmd c;
6934 
6935 	memset(&c, 0, sizeof(c));
6936 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6937 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6938 					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6939 					ldst_addrspace);
6940 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6941 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6942 					 V_FW_LDST_CMD_MMD(mmd));
6943 	c.u.mdio.raddr = cpu_to_be16(reg);
6944 	c.u.mdio.rval = cpu_to_be16(val);
6945 
6946 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6947 }
6948 
6949 /**
6950  *
6951  *	t4_sge_decode_idma_state - decode the idma state
6952  *	@adap: the adapter
6953  *	@state: the state idma is stuck in
6954  */
6955 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6956 {
6957 	static const char * const t4_decode[] = {
6958 		"IDMA_IDLE",
6959 		"IDMA_PUSH_MORE_CPL_FIFO",
6960 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6961 		"Not used",
6962 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6963 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6964 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6965 		"IDMA_SEND_FIFO_TO_IMSG",
6966 		"IDMA_FL_REQ_DATA_FL_PREP",
6967 		"IDMA_FL_REQ_DATA_FL",
6968 		"IDMA_FL_DROP",
6969 		"IDMA_FL_H_REQ_HEADER_FL",
6970 		"IDMA_FL_H_SEND_PCIEHDR",
6971 		"IDMA_FL_H_PUSH_CPL_FIFO",
6972 		"IDMA_FL_H_SEND_CPL",
6973 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6974 		"IDMA_FL_H_SEND_IP_HDR",
6975 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6976 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6977 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6978 		"IDMA_FL_D_SEND_PCIEHDR",
6979 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6980 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6981 		"IDMA_FL_SEND_PCIEHDR",
6982 		"IDMA_FL_PUSH_CPL_FIFO",
6983 		"IDMA_FL_SEND_CPL",
6984 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6985 		"IDMA_FL_SEND_PAYLOAD",
6986 		"IDMA_FL_REQ_NEXT_DATA_FL",
6987 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6988 		"IDMA_FL_SEND_PADDING",
6989 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6990 		"IDMA_FL_SEND_FIFO_TO_IMSG",
6991 		"IDMA_FL_REQ_DATAFL_DONE",
6992 		"IDMA_FL_REQ_HEADERFL_DONE",
6993 	};
6994 	static const char * const t5_decode[] = {
6995 		"IDMA_IDLE",
6996 		"IDMA_ALMOST_IDLE",
6997 		"IDMA_PUSH_MORE_CPL_FIFO",
6998 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6999 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7000 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7001 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7002 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7003 		"IDMA_SEND_FIFO_TO_IMSG",
7004 		"IDMA_FL_REQ_DATA_FL",
7005 		"IDMA_FL_DROP",
7006 		"IDMA_FL_DROP_SEND_INC",
7007 		"IDMA_FL_H_REQ_HEADER_FL",
7008 		"IDMA_FL_H_SEND_PCIEHDR",
7009 		"IDMA_FL_H_PUSH_CPL_FIFO",
7010 		"IDMA_FL_H_SEND_CPL",
7011 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7012 		"IDMA_FL_H_SEND_IP_HDR",
7013 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7014 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7015 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7016 		"IDMA_FL_D_SEND_PCIEHDR",
7017 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7018 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7019 		"IDMA_FL_SEND_PCIEHDR",
7020 		"IDMA_FL_PUSH_CPL_FIFO",
7021 		"IDMA_FL_SEND_CPL",
7022 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7023 		"IDMA_FL_SEND_PAYLOAD",
7024 		"IDMA_FL_REQ_NEXT_DATA_FL",
7025 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7026 		"IDMA_FL_SEND_PADDING",
7027 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7028 	};
7029 	static const char * const t6_decode[] = {
7030 		"IDMA_IDLE",
7031 		"IDMA_PUSH_MORE_CPL_FIFO",
7032 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7033 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7034 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7035 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7036 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7037 		"IDMA_FL_REQ_DATA_FL",
7038 		"IDMA_FL_DROP",
7039 		"IDMA_FL_DROP_SEND_INC",
7040 		"IDMA_FL_H_REQ_HEADER_FL",
7041 		"IDMA_FL_H_SEND_PCIEHDR",
7042 		"IDMA_FL_H_PUSH_CPL_FIFO",
7043 		"IDMA_FL_H_SEND_CPL",
7044 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7045 		"IDMA_FL_H_SEND_IP_HDR",
7046 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7047 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7048 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7049 		"IDMA_FL_D_SEND_PCIEHDR",
7050 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7051 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7052 		"IDMA_FL_SEND_PCIEHDR",
7053 		"IDMA_FL_PUSH_CPL_FIFO",
7054 		"IDMA_FL_SEND_CPL",
7055 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7056 		"IDMA_FL_SEND_PAYLOAD",
7057 		"IDMA_FL_REQ_NEXT_DATA_FL",
7058 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7059 		"IDMA_FL_SEND_PADDING",
7060 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7061 	};
7062 	static const u32 sge_regs[] = {
7063 		A_SGE_DEBUG_DATA_LOW_INDEX_2,
7064 		A_SGE_DEBUG_DATA_LOW_INDEX_3,
7065 		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7066 	};
7067 	const char **sge_idma_decode;
7068 	int sge_idma_decode_nstates;
7069 	int i;
7070 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
7071 
7072 	/* Select the right set of decode strings to dump depending on the
7073 	 * adapter chip type.
7074 	 */
7075 	switch (chip_version) {
7076 	case CHELSIO_T4:
7077 		sge_idma_decode = (const char **)t4_decode;
7078 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7079 		break;
7080 
7081 	case CHELSIO_T5:
7082 		sge_idma_decode = (const char **)t5_decode;
7083 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7084 		break;
7085 
7086 	case CHELSIO_T6:
7087 		sge_idma_decode = (const char **)t6_decode;
7088 		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7089 		break;
7090 
7091 	default:
7092 		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
7093 		return;
7094 	}
7095 
7096 	if (state < sge_idma_decode_nstates)
7097 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7098 	else
7099 		CH_WARN(adapter, "idma state %d unknown\n", state);
7100 
7101 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7102 		CH_WARN(adapter, "SGE register %#x value %#x\n",
7103 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7104 }
7105 
7106 /**
7107  *      t4_sge_ctxt_flush - flush the SGE context cache
7108  *      @adap: the adapter
7109  *      @mbox: mailbox to use for the FW command
7110  *
7111  *      Issues a FW command through the given mailbox to flush the
7112  *      SGE context cache.
7113  */
7114 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
7115 {
7116 	int ret;
7117 	u32 ldst_addrspace;
7118 	struct fw_ldst_cmd c;
7119 
7120 	memset(&c, 0, sizeof(c));
7121 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
7122 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7123 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7124 					ldst_addrspace);
7125 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7126 	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7127 
7128 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7129 	return ret;
7130 }
7131 
7132 /**
7133  *      t4_fw_hello - establish communication with FW
7134  *      @adap: the adapter
7135  *      @mbox: mailbox to use for the FW command
7136  *      @evt_mbox: mailbox to receive async FW events
7137  *      @master: specifies the caller's willingness to be the device master
7138  *	@state: returns the current device state (if non-NULL)
7139  *
7140  *	Issues a command to establish communication with FW.  Returns either
7141  *	an error (negative integer) or the mailbox of the Master PF.
7142  */
7143 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7144 		enum dev_master master, enum dev_state *state)
7145 {
7146 	int ret;
7147 	struct fw_hello_cmd c;
7148 	u32 v;
7149 	unsigned int master_mbox;
7150 	int retries = FW_CMD_HELLO_RETRIES;
7151 
7152 retry:
7153 	memset(&c, 0, sizeof(c));
7154 	INIT_CMD(c, HELLO, WRITE);
7155 	c.err_to_clearinit = cpu_to_be32(
7156 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7157 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7158 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7159 					mbox : M_FW_HELLO_CMD_MBMASTER) |
7160 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7161 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7162 		F_FW_HELLO_CMD_CLEARINIT);
7163 
7164 	/*
7165 	 * Issue the HELLO command to the firmware.  If it's not successful
7166 	 * but indicates that we got a "busy" or "timeout" condition, retry
7167 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
7168 	 * retry limit, check to see if the firmware left us any error
7169 	 * information and report that if so ...
7170 	 */
7171 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7172 	if (ret != FW_SUCCESS) {
7173 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7174 			goto retry;
7175 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7176 			t4_report_fw_error(adap);
7177 		return ret;
7178 	}
7179 
7180 	v = be32_to_cpu(c.err_to_clearinit);
7181 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7182 	if (state) {
7183 		if (v & F_FW_HELLO_CMD_ERR)
7184 			*state = DEV_STATE_ERR;
7185 		else if (v & F_FW_HELLO_CMD_INIT)
7186 			*state = DEV_STATE_INIT;
7187 		else
7188 			*state = DEV_STATE_UNINIT;
7189 	}
7190 
7191 	/*
7192 	 * If we're not the Master PF then we need to wait around for the
7193 	 * Master PF Driver to finish setting up the adapter.
7194 	 *
7195 	 * Note that we also do this wait if we're a non-Master-capable PF and
7196 	 * there is no current Master PF; a Master PF may show up momentarily
7197 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
7198 	 * OS loads lots of different drivers rapidly at the same time).  In
7199 	 * this case, the Master PF returned by the firmware will be
7200 	 * M_PCIE_FW_MASTER so the test below will work ...
7201 	 */
7202 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7203 	    master_mbox != mbox) {
7204 		int waiting = FW_CMD_HELLO_TIMEOUT;
7205 
7206 		/*
7207 		 * Wait for the firmware to either indicate an error or
7208 		 * initialized state.  If we see either of these we bail out
7209 		 * and report the issue to the caller.  If we exhaust the
7210 		 * "hello timeout" and we haven't exhausted our retries, try
7211 		 * again.  Otherwise bail with a timeout error.
7212 		 */
7213 		for (;;) {
7214 			u32 pcie_fw;
7215 
7216 			msleep(50);
7217 			waiting -= 50;
7218 
7219 			/*
7220 			 * If neither Error nor Initialialized are indicated
7221 			 * by the firmware keep waiting till we exaust our
7222 			 * timeout ... and then retry if we haven't exhausted
7223 			 * our retries ...
7224 			 */
7225 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7226 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7227 				if (waiting <= 0) {
7228 					if (retries-- > 0)
7229 						goto retry;
7230 
7231 					return -ETIMEDOUT;
7232 				}
7233 				continue;
7234 			}
7235 
7236 			/*
7237 			 * We either have an Error or Initialized condition
7238 			 * report errors preferentially.
7239 			 */
7240 			if (state) {
7241 				if (pcie_fw & F_PCIE_FW_ERR)
7242 					*state = DEV_STATE_ERR;
7243 				else if (pcie_fw & F_PCIE_FW_INIT)
7244 					*state = DEV_STATE_INIT;
7245 			}
7246 
7247 			/*
7248 			 * If we arrived before a Master PF was selected and
7249 			 * there's not a valid Master PF, grab its identity
7250 			 * for our caller.
7251 			 */
7252 			if (master_mbox == M_PCIE_FW_MASTER &&
7253 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
7254 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7255 			break;
7256 		}
7257 	}
7258 
7259 	return master_mbox;
7260 }
7261 
7262 /**
7263  *	t4_fw_bye - end communication with FW
7264  *	@adap: the adapter
7265  *	@mbox: mailbox to use for the FW command
7266  *
7267  *	Issues a command to terminate communication with FW.
7268  */
7269 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7270 {
7271 	struct fw_bye_cmd c;
7272 
7273 	memset(&c, 0, sizeof(c));
7274 	INIT_CMD(c, BYE, WRITE);
7275 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7276 }
7277 
7278 /**
7279  *	t4_fw_reset - issue a reset to FW
7280  *	@adap: the adapter
7281  *	@mbox: mailbox to use for the FW command
7282  *	@reset: specifies the type of reset to perform
7283  *
7284  *	Issues a reset command of the specified type to FW.
7285  */
7286 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7287 {
7288 	struct fw_reset_cmd c;
7289 
7290 	memset(&c, 0, sizeof(c));
7291 	INIT_CMD(c, RESET, WRITE);
7292 	c.val = cpu_to_be32(reset);
7293 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7294 }
7295 
7296 /**
7297  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7298  *	@adap: the adapter
7299  *	@mbox: mailbox to use for the FW RESET command (if desired)
7300  *	@force: force uP into RESET even if FW RESET command fails
7301  *
7302  *	Issues a RESET command to firmware (if desired) with a HALT indication
7303  *	and then puts the microprocessor into RESET state.  The RESET command
7304  *	will only be issued if a legitimate mailbox is provided (mbox <=
7305  *	M_PCIE_FW_MASTER).
7306  *
7307  *	This is generally used in order for the host to safely manipulate the
7308  *	adapter without fear of conflicting with whatever the firmware might
7309  *	be doing.  The only way out of this state is to RESTART the firmware
7310  *	...
7311  */
7312 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7313 {
7314 	int ret = 0;
7315 
7316 	/*
7317 	 * If a legitimate mailbox is provided, issue a RESET command
7318 	 * with a HALT indication.
7319 	 */
7320 	if (mbox <= M_PCIE_FW_MASTER) {
7321 		struct fw_reset_cmd c;
7322 
7323 		memset(&c, 0, sizeof(c));
7324 		INIT_CMD(c, RESET, WRITE);
7325 		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7326 		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7327 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7328 	}
7329 
7330 	/*
7331 	 * Normally we won't complete the operation if the firmware RESET
7332 	 * command fails but if our caller insists we'll go ahead and put the
7333 	 * uP into RESET.  This can be useful if the firmware is hung or even
7334 	 * missing ...  We'll have to take the risk of putting the uP into
7335 	 * RESET without the cooperation of firmware in that case.
7336 	 *
7337 	 * We also force the firmware's HALT flag to be on in case we bypassed
7338 	 * the firmware RESET command above or we're dealing with old firmware
7339 	 * which doesn't have the HALT capability.  This will serve as a flag
7340 	 * for the incoming firmware to know that it's coming out of a HALT
7341 	 * rather than a RESET ... if it's new enough to understand that ...
7342 	 */
7343 	if (ret == 0 || force) {
7344 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7345 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7346 				 F_PCIE_FW_HALT);
7347 	}
7348 
7349 	/*
7350 	 * And we always return the result of the firmware RESET command
7351 	 * even when we force the uP into RESET ...
7352 	 */
7353 	return ret;
7354 }
7355 
7356 /**
7357  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
7358  *	@adap: the adapter
7359  *	@reset: if we want to do a RESET to restart things
7360  *
7361  *	Restart firmware previously halted by t4_fw_halt().  On successful
7362  *	return the previous PF Master remains as the new PF Master and there
7363  *	is no need to issue a new HELLO command, etc.
7364  *
7365  *	We do this in two ways:
7366  *
7367  *	 1. If we're dealing with newer firmware we'll simply want to take
7368  *	    the chip's microprocessor out of RESET.  This will cause the
7369  *	    firmware to start up from its start vector.  And then we'll loop
7370  *	    until the firmware indicates it's started again (PCIE_FW.HALT
7371  *	    reset to 0) or we timeout.
7372  *
7373  *	 2. If we're dealing with older firmware then we'll need to RESET
7374  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
7375  *	    flag and automatically RESET itself on startup.
7376  */
7377 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7378 {
7379 	if (reset) {
7380 		/*
7381 		 * Since we're directing the RESET instead of the firmware
7382 		 * doing it automatically, we need to clear the PCIE_FW.HALT
7383 		 * bit.
7384 		 */
7385 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
7386 
7387 		/*
7388 		 * If we've been given a valid mailbox, first try to get the
7389 		 * firmware to do the RESET.  If that works, great and we can
7390 		 * return success.  Otherwise, if we haven't been given a
7391 		 * valid mailbox or the RESET command failed, fall back to
7392 		 * hitting the chip with a hammer.
7393 		 */
7394 		if (mbox <= M_PCIE_FW_MASTER) {
7395 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7396 			msleep(100);
7397 			if (t4_fw_reset(adap, mbox,
7398 					F_PIORST | F_PIORSTMODE) == 0)
7399 				return 0;
7400 		}
7401 
7402 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7403 		msleep(2000);
7404 	} else {
7405 		int ms;
7406 
7407 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7408 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7409 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7410 				return FW_SUCCESS;
7411 			msleep(100);
7412 			ms += 100;
7413 		}
7414 		return -ETIMEDOUT;
7415 	}
7416 	return 0;
7417 }
7418 
7419 /**
7420  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7421  *	@adap: the adapter
7422  *	@mbox: mailbox to use for the FW RESET command (if desired)
7423  *	@fw_data: the firmware image to write
7424  *	@size: image size
7425  *	@force: force upgrade even if firmware doesn't cooperate
7426  *
7427  *	Perform all of the steps necessary for upgrading an adapter's
7428  *	firmware image.  Normally this requires the cooperation of the
7429  *	existing firmware in order to halt all existing activities
7430  *	but if an invalid mailbox token is passed in we skip that step
7431  *	(though we'll still put the adapter microprocessor into RESET in
7432  *	that case).
7433  *
7434  *	On successful return the new firmware will have been loaded and
7435  *	the adapter will have been fully RESET losing all previous setup
7436  *	state.  On unsuccessful return the adapter may be completely hosed ...
7437  *	positive errno indicates that the adapter is ~probably~ intact, a
7438  *	negative errno indicates that things are looking bad ...
7439  */
7440 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7441 		  const u8 *fw_data, unsigned int size, int force)
7442 {
7443 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7444 	unsigned int bootstrap =
7445 	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7446 	int reset, ret;
7447 
7448 	if (!t4_fw_matches_chip(adap, fw_hdr))
7449 		return -EINVAL;
7450 
7451 	/* Disable FW_OK flags so that mbox commands with FW_OK flags check
7452 	 * wont be send when we are flashing FW.
7453 	 */
7454 	adap->flags &= ~FW_OK;
7455 
7456 	if (!bootstrap) {
7457 		ret = t4_fw_halt(adap, mbox, force);
7458 		if (ret < 0 && !force)
7459 			goto out;
7460 	}
7461 
7462 	ret = t4_load_fw(adap, fw_data, size, bootstrap);
7463 	if (ret < 0 || bootstrap)
7464 		goto out;
7465 
7466 	/*
7467 	 * Older versions of the firmware don't understand the new
7468 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7469 	 * restart.  So for newly loaded older firmware we'll have to do the
7470 	 * RESET for it so it starts up on a clean slate.  We can tell if
7471 	 * the newly loaded firmware will handle this right by checking
7472 	 * its header flags to see if it advertises the capability.
7473 	 */
7474 	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7475 	ret = t4_fw_restart(adap, mbox, reset);
7476 out:
7477 	adap->flags |= FW_OK;
7478 	return ret;
7479 }
7480 
7481 /**
7482  *	t4_fl_pkt_align - return the fl packet alignment
7483  *	@adap: the adapter
7484  *	is_packed: True when the driver uses packed FLM mode
7485  *
7486  *	T4 has a single field to specify the packing and padding boundary.
7487  *	T5 onwards has separate fields for this and hence the alignment for
7488  *	next packet offset is maximum of these two.
7489  *
7490  */
7491 int t4_fl_pkt_align(struct adapter *adap, bool is_packed)
7492 {
7493 	u32 sge_control, sge_control2;
7494 	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7495 
7496 	sge_control = t4_read_reg(adap, A_SGE_CONTROL);
7497 
7498 	/* T4 uses a single control field to specify both the PCIe Padding and
7499 	 * Packing Boundary.  T5 introduced the ability to specify these
7500 	 * separately.  The actual Ingress Packet Data alignment boundary
7501 	 * within Packed Buffer Mode is the maximum of these two
7502 	 * specifications.  (Note that it makes no real practical sense to
7503 	 * have the Pading Boudary be larger than the Packing Boundary but you
7504 	 * could set the chip up that way and, in fact, legacy T4 code would
7505 	 * end doing this because it would initialize the Padding Boundary and
7506 	 * leave the Packing Boundary initialized to 0 (16 bytes).)
7507 	 * Padding Boundary values in T6 starts from 8B,
7508 	 * where as it is 32B for T4 and T5.
7509 	 */
7510 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7511 		ingpad_shift = X_INGPADBOUNDARY_SHIFT;
7512 	else
7513 		ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
7514 
7515 	ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
7516 
7517 	fl_align = ingpadboundary;
7518 	if (!is_t4(adap->params.chip) && is_packed) {
7519 		/* T5 has a weird interpretation of one of the PCIe Packing
7520 		 * Boundary values.  No idea why ...
7521 		 */
7522 		sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
7523 		ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
7524 		if (ingpackboundary == X_INGPACKBOUNDARY_16B)
7525 			ingpackboundary = 16;
7526 		else
7527 			ingpackboundary = 1 << (ingpackboundary +
7528 						X_INGPACKBOUNDARY_SHIFT);
7529 
7530 		fl_align = max(ingpadboundary, ingpackboundary);
7531 	}
7532 	return fl_align;
7533 }
7534 
7535 /**
7536  *	t4_fixup_host_params_compat - fix up host-dependent parameters
7537  *	@adap: the adapter
7538  *	@page_size: the host's Base Page Size
7539  *	@cache_line_size: the host's Cache Line Size
7540  *	@chip_compat: maintain compatibility with designated chip
7541  *
7542  *	Various registers in the chip contain values which are dependent on the
7543  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7544  *	those registers with the appropriate values as passed in ...
7545  *
7546  *	@chip_compat is used to limit the set of changes that are made
7547  *	to be compatible with the indicated chip release.  This is used by
7548  *	drivers to maintain compatibility with chip register settings when
7549  *	the drivers haven't [yet] been updated with new chip support.
7550  */
7551 int t4_fixup_host_params_compat(struct adapter *adap,
7552 				unsigned int page_size,
7553 				unsigned int cache_line_size,
7554 				enum chip_type chip_compat)
7555 {
7556 	unsigned int page_shift = fls(page_size) - 1;
7557 	unsigned int sge_hps = page_shift - 10;
7558 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7559 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7560 	unsigned int fl_align_log = fls(fl_align) - 1;
7561 
7562 	t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
7563 		     V_HOSTPAGESIZEPF0(sge_hps) |
7564 		     V_HOSTPAGESIZEPF1(sge_hps) |
7565 		     V_HOSTPAGESIZEPF2(sge_hps) |
7566 		     V_HOSTPAGESIZEPF3(sge_hps) |
7567 		     V_HOSTPAGESIZEPF4(sge_hps) |
7568 		     V_HOSTPAGESIZEPF5(sge_hps) |
7569 		     V_HOSTPAGESIZEPF6(sge_hps) |
7570 		     V_HOSTPAGESIZEPF7(sge_hps));
7571 
7572 	if (is_t4(adap->params.chip) || is_t4(chip_compat)) {
7573 		t4_set_reg_field(adap, A_SGE_CONTROL,
7574 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7575 				 F_EGRSTATUSPAGESIZE,
7576 				 V_INGPADBOUNDARY(fl_align_log -
7577 						  X_INGPADBOUNDARY_SHIFT) |
7578 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
7579 	} else {
7580 		unsigned int pack_align;
7581 		unsigned int ingpad, ingpack;
7582 		unsigned int pcie_cap;
7583 
7584 		/* T5 introduced the separation of the Free List Padding and
7585 		 * Packing Boundaries.  Thus, we can select a smaller Padding
7586 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7587 		 * Bandwidth, and use a Packing Boundary which is large enough
7588 		 * to avoid false sharing between CPUs, etc.
7589 		 *
7590 		 * For the PCI Link, the smaller the Padding Boundary the
7591 		 * better.  For the Memory Controller, a smaller Padding
7592 		 * Boundary is better until we cross under the Memory Line
7593 		 * Size (the minimum unit of transfer to/from Memory).  If we
7594 		 * have a Padding Boundary which is smaller than the Memory
7595 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
7596 		 * Memory Controller which is never good.
7597 		 */
7598 
7599 		/* We want the Packing Boundary to be based on the Cache Line
7600 		 * Size in order to help avoid False Sharing performance
7601 		 * issues between CPUs, etc.  We also want the Packing
7602 		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
7603 		 * get best performance when the Packing Boundary is a
7604 		 * multiple of the Maximum Payload Size.
7605 		 */
7606 		pack_align = fl_align;
7607 		pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
7608 		if (pcie_cap) {
7609 			unsigned int mps, mps_log;
7610 			u16 devctl;
7611 
7612 			/*
7613 			 * The PCIe Device Control Maximum Payload Size field
7614 			 * [bits 7:5] encodes sizes as powers of 2 starting at
7615 			 * 128 bytes.
7616 			 */
7617 			t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
7618 					    &devctl);
7619 			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7620 			mps = 1 << mps_log;
7621 			if (mps > pack_align)
7622 				pack_align = mps;
7623 		}
7624 
7625 		/* N.B. T5/T6 have a crazy special interpretation of the "0"
7626 		 * value for the Packing Boundary.  This corresponds to 16
7627 		 * bytes instead of the expected 32 bytes.  So if we want 32
7628 		 * bytes, the best we can really do is 64 bytes ...
7629 		 */
7630 		if (pack_align <= 16) {
7631 			ingpack = X_INGPACKBOUNDARY_16B;
7632 			fl_align = 16;
7633 		} else if (pack_align == 32) {
7634 			ingpack = X_INGPACKBOUNDARY_64B;
7635 			fl_align = 64;
7636 		} else {
7637 			unsigned int pack_align_log = fls(pack_align) - 1;
7638 			ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
7639 			fl_align = pack_align;
7640 		}
7641 
7642 		/* Use the smallest Ingress Padding which isn't smaller than
7643 		 * the Memory Controller Read/Write Size.  We'll take that as
7644 		 * being 8 bytes since we don't know of any system with a
7645 		 * wider Memory Controller Bus Width.
7646 		 */
7647 		if (is_t5(adap->params.chip))
7648 			ingpad = X_INGPADBOUNDARY_32B;
7649 		else
7650 			ingpad = X_T6_INGPADBOUNDARY_8B;
7651 
7652 		t4_set_reg_field(adap, A_SGE_CONTROL,
7653 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7654 				 F_EGRSTATUSPAGESIZE,
7655 				 V_INGPADBOUNDARY(ingpad) |
7656 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
7657 		t4_set_reg_field(adap, A_SGE_CONTROL2,
7658 				 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
7659 				 V_INGPACKBOUNDARY(ingpack));
7660 	}
7661 	/*
7662 	 * Adjust various SGE Free List Host Buffer Sizes.
7663 	 *
7664 	 * This is something of a crock since we're using fixed indices into
7665 	 * the array which are also known by the sge.c code and the T4
7666 	 * Firmware Configuration File.  We need to come up with a much better
7667 	 * approach to managing this array.  For now, the first four entries
7668 	 * are:
7669 	 *
7670 	 *   0: Host Page Size
7671 	 *   1: 64KB
7672 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7673 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7674 	 *
7675 	 * For the single-MTU buffers in unpacked mode we need to include
7676 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7677 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7678 	 * Padding boundary.  All of these are accommodated in the Factory
7679 	 * Default Firmware Configuration File but we need to adjust it for
7680 	 * this host's cache line size.
7681 	 */
7682 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
7683 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
7684 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1)
7685 		     & ~(fl_align-1));
7686 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
7687 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1)
7688 		     & ~(fl_align-1));
7689 
7690 	t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
7691 
7692 	return 0;
7693 }
7694 
7695 /**
7696  *	t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
7697  *	@adap: the adapter
7698  *	@page_size: the host's Base Page Size
7699  *	@cache_line_size: the host's Cache Line Size
7700  *
7701  *	Various registers in T4 contain values which are dependent on the
7702  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7703  *	those registers with the appropriate values as passed in ...
7704  *
7705  *	This routine makes changes which are compatible with T4 chips.
7706  */
7707 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7708 			 unsigned int cache_line_size)
7709 {
7710 	return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
7711 					   T4_LAST_REV);
7712 }
7713 
7714 /**
7715  *	t4_fw_initialize - ask FW to initialize the device
7716  *	@adap: the adapter
7717  *	@mbox: mailbox to use for the FW command
7718  *
7719  *	Issues a command to FW to partially initialize the device.  This
7720  *	performs initialization that generally doesn't depend on user input.
7721  */
7722 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7723 {
7724 	struct fw_initialize_cmd c;
7725 
7726 	memset(&c, 0, sizeof(c));
7727 	INIT_CMD(c, INITIALIZE, WRITE);
7728 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7729 }
7730 
7731 /**
7732  *	t4_query_params_rw - query FW or device parameters
7733  *	@adap: the adapter
7734  *	@mbox: mailbox to use for the FW command
7735  *	@pf: the PF
7736  *	@vf: the VF
7737  *	@nparams: the number of parameters
7738  *	@params: the parameter names
7739  *	@val: the parameter values
7740  *	@rw: Write and read flag
7741  *	@sleep_ok: if true, we may sleep awaiting mbox cmd completion
7742  *
7743  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
7744  *	queried at once.
7745  */
7746 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7747 		       unsigned int vf, unsigned int nparams, const u32 *params,
7748 		       u32 *val, int rw, bool sleep_ok)
7749 {
7750 	int i, ret;
7751 	struct fw_params_cmd c;
7752 	__be32 *p = &c.param[0].mnem;
7753 
7754 	if (nparams > 7)
7755 		return -EINVAL;
7756 
7757 	memset(&c, 0, sizeof(c));
7758 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7759 				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
7760 				  V_FW_PARAMS_CMD_PFN(pf) |
7761 				  V_FW_PARAMS_CMD_VFN(vf));
7762 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7763 
7764 	for (i = 0; i < nparams; i++) {
7765 		*p++ = cpu_to_be32(*params++);
7766 		if (rw)
7767 			*p = cpu_to_be32(*(val + i));
7768 		p++;
7769 	}
7770 
7771 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7772 	if (ret == 0)
7773 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7774 			*val++ = be32_to_cpu(*p);
7775 	return ret;
7776 }
7777 
7778 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7779 		    unsigned int vf, unsigned int nparams, const u32 *params,
7780 		    u32 *val)
7781 {
7782 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7783 				  true);
7784 }
7785 
7786 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7787 		    unsigned int vf, unsigned int nparams, const u32 *params,
7788 		    u32 *val)
7789 {
7790 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7791 				  false);
7792 }
7793 
7794 /**
7795  *      t4_set_params_timeout - sets FW or device parameters
7796  *      @adap: the adapter
7797  *      @mbox: mailbox to use for the FW command
7798  *      @pf: the PF
7799  *      @vf: the VF
7800  *      @nparams: the number of parameters
7801  *      @params: the parameter names
7802  *      @val: the parameter values
7803  *      @timeout: the timeout time
7804  *
7805  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
7806  *      specified at once.
7807  */
7808 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7809 			  unsigned int pf, unsigned int vf,
7810 			  unsigned int nparams, const u32 *params,
7811 			  const u32 *val, int timeout)
7812 {
7813 	struct fw_params_cmd c;
7814 	__be32 *p = &c.param[0].mnem;
7815 
7816 	if (nparams > 7)
7817 		return -EINVAL;
7818 
7819 	memset(&c, 0, sizeof(c));
7820 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7821 				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7822 				  V_FW_PARAMS_CMD_PFN(pf) |
7823 				  V_FW_PARAMS_CMD_VFN(vf));
7824 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7825 
7826 	while (nparams--) {
7827 		*p++ = cpu_to_be32(*params++);
7828 		*p++ = cpu_to_be32(*val++);
7829 	}
7830 
7831 	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7832 }
7833 
7834 /**
7835  *	t4_set_params - sets FW or device parameters
7836  *	@adap: the adapter
7837  *	@mbox: mailbox to use for the FW command
7838  *	@pf: the PF
7839  *	@vf: the VF
7840  *	@nparams: the number of parameters
7841  *	@params: the parameter names
7842  *	@val: the parameter values
7843  *
7844  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
7845  *	specified at once.
7846  */
7847 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7848 		  unsigned int vf, unsigned int nparams, const u32 *params,
7849 		  const u32 *val)
7850 {
7851 	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7852 				     FW_CMD_MAX_TIMEOUT);
7853 }
7854 
7855 /**
7856  *	t4_cfg_pfvf - configure PF/VF resource limits
7857  *	@adap: the adapter
7858  *	@mbox: mailbox to use for the FW command
7859  *	@pf: the PF being configured
7860  *	@vf: the VF being configured
7861  *	@txq: the max number of egress queues
7862  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
7863  *	@rxqi: the max number of interrupt-capable ingress queues
7864  *	@rxq: the max number of interruptless ingress queues
7865  *	@tc: the PCI traffic class
7866  *	@vi: the max number of virtual interfaces
7867  *	@cmask: the channel access rights mask for the PF/VF
7868  *	@pmask: the port access rights mask for the PF/VF
7869  *	@nexact: the maximum number of exact MPS filters
7870  *	@rcaps: read capabilities
7871  *	@wxcaps: write/execute capabilities
7872  *
7873  *	Configures resource limits and capabilities for a physical or virtual
7874  *	function.
7875  */
7876 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7877 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7878 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
7879 		unsigned int vi, unsigned int cmask, unsigned int pmask,
7880 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7881 {
7882 	struct fw_pfvf_cmd c;
7883 
7884 	memset(&c, 0, sizeof(c));
7885 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7886 				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7887 				  V_FW_PFVF_CMD_VFN(vf));
7888 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7889 	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7890 				     V_FW_PFVF_CMD_NIQ(rxq));
7891 	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7892 				    V_FW_PFVF_CMD_PMASK(pmask) |
7893 				    V_FW_PFVF_CMD_NEQ(txq));
7894 	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7895 				      V_FW_PFVF_CMD_NVI(vi) |
7896 				      V_FW_PFVF_CMD_NEXACTF(nexact));
7897 	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7898 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7899 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7900 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7901 }
7902 
7903 /**
7904  *	t4_alloc_vi_func - allocate a virtual interface
7905  *	@adap: the adapter
7906  *	@mbox: mailbox to use for the FW command
7907  *	@port: physical port associated with the VI
7908  *	@pf: the PF owning the VI
7909  *	@vf: the VF owning the VI
7910  *	@nmac: number of MAC addresses needed (1 to 5)
7911  *	@mac: the MAC addresses of the VI
7912  *	@rss_size: size of RSS table slice associated with this VI
7913  *	@portfunc: which Port Application Function MAC Address is desired
7914  *	@idstype: Intrusion Detection Type
7915  *
7916  *	Allocates a virtual interface for the given physical port.  If @mac is
7917  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
7918  *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7919  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
7920  *	stored consecutively so the space needed is @nmac * 6 bytes.
7921  *	Returns a negative error number or the non-negative VI id.
7922  */
7923 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7924 		     unsigned int port, unsigned int pf, unsigned int vf,
7925 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
7926 		     unsigned int portfunc, unsigned int idstype)
7927 {
7928 	int ret;
7929 	struct fw_vi_cmd c;
7930 
7931 	memset(&c, 0, sizeof(c));
7932 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7933 				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7934 				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7935 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7936 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7937 				     V_FW_VI_CMD_FUNC(portfunc));
7938 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7939 	c.nmac = nmac - 1;
7940 	if(!rss_size)
7941 		c.norss_rsssize = F_FW_VI_CMD_NORSS;
7942 
7943 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7944 	if (ret)
7945 		return ret;
7946 
7947 	if (mac) {
7948 		memcpy(mac, c.mac, sizeof(c.mac));
7949 		switch (nmac) {
7950 		case 5:
7951 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7952 			/* FALLTHRU */
7953 		case 4:
7954 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7955 			/* FALLTHRU */
7956 		case 3:
7957 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7958 			/* FALLTHRU */
7959 		case 2:
7960 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
7961 		}
7962 	}
7963 	if (rss_size)
7964 		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7965 	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7966 }
7967 
7968 /**
7969  *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7970  *      @adap: the adapter
7971  *      @mbox: mailbox to use for the FW command
7972  *      @port: physical port associated with the VI
7973  *      @pf: the PF owning the VI
7974  *      @vf: the VF owning the VI
7975  *      @nmac: number of MAC addresses needed (1 to 5)
7976  *      @mac: the MAC addresses of the VI
7977  *      @rss_size: size of RSS table slice associated with this VI
7978  *
7979  *	backwards compatible and convieniance routine to allocate a Virtual
7980  *	Interface with a Ethernet Port Application Function and Intrustion
7981  *	Detection System disabled.
7982  */
7983 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7984 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7985 		unsigned int *rss_size)
7986 {
7987 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7988 				FW_VI_FUNC_ETH, 0);
7989 }
7990 
7991 
7992 /**
7993  * 	t4_free_vi - free a virtual interface
7994  * 	@adap: the adapter
7995  * 	@mbox: mailbox to use for the FW command
7996  * 	@pf: the PF owning the VI
7997  * 	@vf: the VF owning the VI
7998  * 	@viid: virtual interface identifiler
7999  *
8000  * 	Free a previously allocated virtual interface.
8001  */
8002 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8003 	       unsigned int vf, unsigned int viid)
8004 {
8005 	struct fw_vi_cmd c;
8006 
8007 	memset(&c, 0, sizeof(c));
8008 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8009 				  F_FW_CMD_REQUEST |
8010 				  F_FW_CMD_EXEC |
8011 				  V_FW_VI_CMD_PFN(pf) |
8012 				  V_FW_VI_CMD_VFN(vf));
8013 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8014 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8015 
8016 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8017 }
8018 
8019 /**
8020  *	t4_set_rxmode - set Rx properties of a virtual interface
8021  *	@adap: the adapter
8022  *	@mbox: mailbox to use for the FW command
8023  *	@viid: the VI id
8024  *	@mtu: the new MTU or -1
8025  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8026  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8027  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8028  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8029  *	@sleep_ok: if true we may sleep while awaiting command completion
8030  *
8031  *	Sets Rx properties of a virtual interface.
8032  */
8033 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8034 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
8035 		  bool sleep_ok)
8036 {
8037 	struct fw_vi_rxmode_cmd c;
8038 
8039 	/* convert to FW values */
8040 	if (mtu < 0)
8041 		mtu = M_FW_VI_RXMODE_CMD_MTU;
8042 	if (promisc < 0)
8043 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8044 	if (all_multi < 0)
8045 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8046 	if (bcast < 0)
8047 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8048 	if (vlanex < 0)
8049 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8050 
8051 	memset(&c, 0, sizeof(c));
8052 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8053 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8054 				   V_FW_VI_RXMODE_CMD_VIID(viid));
8055 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8056 	c.mtu_to_vlanexen =
8057 		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8058 			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8059 			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8060 			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8061 			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8062 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8063 }
8064 
8065 /**
8066  *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8067  *	@adap: the adapter
8068  *	@viid: the VI id
8069  *	@mac: the MAC address
8070  *	@mask: the mask
8071  *	@idx: index at which to add this entry
8072  *	@lookup_type: MAC address for inner (1) or outer (0) header
8073  *	@sleep_ok: call is allowed to sleep
8074  *
8075  *	Adds the mac entry at the specified index using raw mac interface.
8076  *
8077  *	Returns a negative error number or the allocated index for this mac.
8078  */
8079 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8080 			  const u8 *addr, const u8 *mask, unsigned int idx,
8081 			  u8 lookup_type, bool sleep_ok)
8082 {
8083 	int ret = 0;
8084 	struct fw_vi_mac_cmd c;
8085 	struct fw_vi_mac_raw *p = &c.u.raw;
8086 	u32 val;
8087 
8088 	memset(&c, 0, sizeof(c));
8089 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8090 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8091 				   V_FW_VI_MAC_CMD_VIID(viid));
8092 	val = V_FW_CMD_LEN16(1) |
8093 	      V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8094 	c.freemacs_to_len16 = cpu_to_be32(val);
8095 
8096 	/* Specify that this is an inner mac address */
8097 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8098 
8099 	/* Lookup Type. Outer header: 0, Inner header: 1 */
8100 	p->data0_pkd = cpu_to_be32(lookup_type << 10);
8101 	p->data0m_pkd = cpu_to_be64(3 << 10); /* Lookup mask */
8102 
8103 	/* Copy the address and the mask */
8104 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8105 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8106 
8107 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8108 	if (ret == 0) {
8109 		ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8110 		if (ret != idx)
8111 			ret = -ENOMEM;
8112 	}
8113 
8114 	return ret;
8115 }
8116 
8117 /**
8118  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8119  *	@adap: the adapter
8120  *	@mbox: mailbox to use for the FW command
8121  *	@viid: the VI id
8122  *	@free: if true any existing filters for this VI id are first removed
8123  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8124  *	@addr: the MAC address(es)
8125  *	@idx: where to store the index of each allocated filter
8126  *	@hash: pointer to hash address filter bitmap
8127  *	@sleep_ok: call is allowed to sleep
8128  *
8129  *	Allocates an exact-match filter for each of the supplied addresses and
8130  *	sets it to the corresponding address.  If @idx is not %NULL it should
8131  *	have at least @naddr entries, each of which will be set to the index of
8132  *	the filter allocated for the corresponding MAC address.  If a filter
8133  *	could not be allocated for an address its index is set to 0xffff.
8134  *	If @hash is not %NULL addresses that fail to allocate an exact filter
8135  *	are hashed and update the hash filter bitmap pointed at by @hash.
8136  *
8137  *	Returns a negative error number or the number of filters allocated.
8138  */
8139 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8140 		      unsigned int viid, bool free, unsigned int naddr,
8141 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8142 {
8143 	int offset, ret = 0;
8144 	struct fw_vi_mac_cmd c;
8145 	unsigned int nfilters = 0;
8146 	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8147 	unsigned int rem = naddr;
8148 
8149 	if (naddr > max_naddr)
8150 		return -EINVAL;
8151 
8152 	for (offset = 0; offset < naddr ; /**/) {
8153 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8154 					 ? rem
8155 					 : ARRAY_SIZE(c.u.exact));
8156 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8157 						     u.exact[fw_naddr]), 16);
8158 		struct fw_vi_mac_exact *p;
8159 		int i;
8160 
8161 		memset(&c, 0, sizeof(c));
8162 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8163 					   F_FW_CMD_REQUEST |
8164 					   F_FW_CMD_WRITE |
8165 					   V_FW_CMD_EXEC(free) |
8166 					   V_FW_VI_MAC_CMD_VIID(viid));
8167 		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8168 						  V_FW_CMD_LEN16(len16));
8169 
8170 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8171 			p->valid_to_idx =
8172 				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8173 					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8174 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8175 		}
8176 
8177 		/*
8178 		 * It's okay if we run out of space in our MAC address arena.
8179 		 * Some of the addresses we submit may get stored so we need
8180 		 * to run through the reply to see what the results were ...
8181 		 */
8182 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8183 		if (ret && ret != -FW_ENOMEM)
8184 			break;
8185 
8186 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8187 			u16 index = G_FW_VI_MAC_CMD_IDX(
8188 						be16_to_cpu(p->valid_to_idx));
8189 
8190 			if (idx)
8191 				idx[offset+i] = (index >=  max_naddr
8192 						 ? 0xffff
8193 						 : index);
8194 			if (index < max_naddr)
8195 				nfilters++;
8196 			else if (hash)
8197 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8198 		}
8199 
8200 		free = false;
8201 		offset += fw_naddr;
8202 		rem -= fw_naddr;
8203 	}
8204 
8205 	if (ret == 0 || ret == -FW_ENOMEM)
8206 		ret = nfilters;
8207 	return ret;
8208 }
8209 
8210 /**
8211  *	t4_free_mac_filt - frees exact-match filters of given MAC addresses
8212  *	@adap: the adapter
8213  *	@mbox: mailbox to use for the FW command
8214  *	@viid: the VI id
8215  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8216  *	@addr: the MAC address(es)
8217  *	@sleep_ok: call is allowed to sleep
8218  *
8219  *	Frees the exact-match filter for each of the supplied addresses
8220  *
8221  *	Returns a negative error number or the number of filters freed.
8222  */
8223 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8224 		      unsigned int viid, unsigned int naddr,
8225 		      const u8 **addr, bool sleep_ok)
8226 {
8227 	int offset, ret = 0;
8228 	struct fw_vi_mac_cmd c;
8229 	unsigned int nfilters = 0;
8230 	unsigned int max_naddr = is_t4(adap->params.chip) ?
8231 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
8232 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8233 	unsigned int rem = naddr;
8234 
8235 	if (naddr > max_naddr)
8236 		return -EINVAL;
8237 
8238 	for (offset = 0; offset < (int)naddr ; /**/) {
8239 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8240 					 ? rem
8241 					 : ARRAY_SIZE(c.u.exact));
8242 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8243 						     u.exact[fw_naddr]), 16);
8244 		struct fw_vi_mac_exact *p;
8245 		int i;
8246 
8247 		memset(&c, 0, sizeof(c));
8248 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8249 				     F_FW_CMD_REQUEST |
8250 				     F_FW_CMD_WRITE |
8251 				     V_FW_CMD_EXEC(0) |
8252 				     V_FW_VI_MAC_CMD_VIID(viid));
8253 		c.freemacs_to_len16 =
8254 				cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8255 					    V_FW_CMD_LEN16(len16));
8256 
8257 		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8258 			p->valid_to_idx = cpu_to_be16(
8259 				F_FW_VI_MAC_CMD_VALID |
8260 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8261 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8262 		}
8263 
8264 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8265 		if (ret)
8266 			break;
8267 
8268 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8269 			u16 index = G_FW_VI_MAC_CMD_IDX(
8270 						be16_to_cpu(p->valid_to_idx));
8271 
8272 			if (index < max_naddr)
8273 				nfilters++;
8274 		}
8275 
8276 		offset += fw_naddr;
8277 		rem -= fw_naddr;
8278 	}
8279 
8280 	if (ret == 0)
8281 		ret = nfilters;
8282 	return ret;
8283 }
8284 
8285 /**
8286  *	t4_change_mac - modifies the exact-match filter for a MAC address
8287  *	@adap: the adapter
8288  *	@mbox: mailbox to use for the FW command
8289  *	@viid: the VI id
8290  *	@idx: index of existing filter for old value of MAC address, or -1
8291  *	@addr: the new MAC address value
8292  *	@persist: whether a new MAC allocation should be persistent
8293  *	@add_smt: if true also add the address to the HW SMT
8294  *
8295  *	Modifies an exact-match filter and sets it to the new MAC address if
8296  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
8297  *	latter case the address is added persistently if @persist is %true.
8298  *
8299  *	Note that in general it is not possible to modify the value of a given
8300  *	filter so the generic way to modify an address filter is to free the one
8301  *	being used by the old address value and allocate a new filter for the
8302  *	new address value.
8303  *
8304  *	Returns a negative error number or the index of the filter with the new
8305  *	MAC value.  Note that this index may differ from @idx.
8306  */
8307 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8308 		  int idx, const u8 *addr, bool persist, bool add_smt)
8309 {
8310 	int ret, mode;
8311 	struct fw_vi_mac_cmd c;
8312 	struct fw_vi_mac_exact *p = c.u.exact;
8313 	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8314 
8315 	if (idx < 0)		/* new allocation */
8316 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8317 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8318 
8319 	memset(&c, 0, sizeof(c));
8320 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8321 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8322 				   V_FW_VI_MAC_CMD_VIID(viid));
8323 	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
8324 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8325 				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
8326 				      V_FW_VI_MAC_CMD_IDX(idx));
8327 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8328 
8329 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8330 	if (ret == 0) {
8331 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8332 		if (ret >= max_mac_addr)
8333 			ret = -ENOMEM;
8334 	}
8335 	return ret;
8336 }
8337 
8338 /**
8339  *	t4_set_addr_hash - program the MAC inexact-match hash filter
8340  *	@adap: the adapter
8341  *	@mbox: mailbox to use for the FW command
8342  *	@viid: the VI id
8343  *	@ucast: whether the hash filter should also match unicast addresses
8344  *	@vec: the value to be written to the hash filter
8345  *	@sleep_ok: call is allowed to sleep
8346  *
8347  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
8348  */
8349 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8350 		     bool ucast, u64 vec, bool sleep_ok)
8351 {
8352 	struct fw_vi_mac_cmd c;
8353 	u32 val;
8354 
8355 	memset(&c, 0, sizeof(c));
8356 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8357 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8358 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8359 	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8360 	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8361 	c.freemacs_to_len16 = cpu_to_be32(val);
8362 	c.u.hash.hashvec = cpu_to_be64(vec);
8363 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8364 }
8365 
8366 /**
8367  *      t4_enable_vi_params - enable/disable a virtual interface
8368  *      @adap: the adapter
8369  *      @mbox: mailbox to use for the FW command
8370  *      @viid: the VI id
8371  *      @rx_en: 1=enable Rx, 0=disable Rx
8372  *      @tx_en: 1=enable Tx, 0=disable Tx
8373  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8374  *
8375  *      Enables/disables a virtual interface.  Note that setting DCB Enable
8376  *      only makes sense when enabling a Virtual Interface ...
8377  */
8378 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8379 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8380 {
8381 	struct fw_vi_enable_cmd c;
8382 
8383 	memset(&c, 0, sizeof(c));
8384 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8385 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8386 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8387 	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8388 				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8389 				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8390 				     FW_LEN16(c));
8391 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8392 }
8393 
8394 /**
8395  *	t4_enable_vi - enable/disable a virtual interface
8396  *	@adap: the adapter
8397  *	@mbox: mailbox to use for the FW command
8398  *	@viid: the VI id
8399  *	@rx_en: 1=enable Rx, 0=disable Rx
8400  *	@tx_en: 1=enable Tx, 0=disable Tx
8401  *
8402  *	Enables/disables a virtual interface.  Note that setting DCB Enable
8403  *	only makes sense when enabling a Virtual Interface ...
8404  */
8405 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8406 		 bool rx_en, bool tx_en)
8407 {
8408 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8409 }
8410 
8411 /**
8412  *	t4_identify_port - identify a VI's port by blinking its LED
8413  *	@adap: the adapter
8414  *	@mbox: mailbox to use for the FW command
8415  *	@viid: the VI id
8416  *	@nblinks: how many times to blink LED at 2.5 Hz
8417  *
8418  *	Identifies a VI's port by blinking its LED.
8419  */
8420 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8421 		     unsigned int nblinks)
8422 {
8423 	struct fw_vi_enable_cmd c;
8424 
8425 	memset(&c, 0, sizeof(c));
8426 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8427 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8428 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8429 	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8430 	c.blinkdur = cpu_to_be16(nblinks);
8431 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8432 }
8433 
8434 /**
8435  *	t4_iq_stop - stop an ingress queue and its FLs
8436  *	@adap: the adapter
8437  *	@mbox: mailbox to use for the FW command
8438  *	@pf: the PF owning the queues
8439  *	@vf: the VF owning the queues
8440  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8441  *	@iqid: ingress queue id
8442  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8443  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8444  *
8445  *	Stops an ingress queue and its associated FLs, if any.  This causes
8446  *	any current or future data/messages destined for these queues to be
8447  *	tossed.
8448  */
8449 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8450 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8451 	       unsigned int fl0id, unsigned int fl1id)
8452 {
8453 	struct fw_iq_cmd c;
8454 
8455 	memset(&c, 0, sizeof(c));
8456 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8457 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8458 				  V_FW_IQ_CMD_VFN(vf));
8459 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8460 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8461 	c.iqid = cpu_to_be16(iqid);
8462 	c.fl0id = cpu_to_be16(fl0id);
8463 	c.fl1id = cpu_to_be16(fl1id);
8464 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8465 }
8466 
8467 /**
8468  *	t4_iq_free - free an ingress queue and its FLs
8469  *	@adap: the adapter
8470  *	@mbox: mailbox to use for the FW command
8471  *	@pf: the PF owning the queues
8472  *	@vf: the VF owning the queues
8473  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8474  *	@iqid: ingress queue id
8475  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8476  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8477  *
8478  *	Frees an ingress queue and its associated FLs, if any.
8479  */
8480 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8481 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8482 	       unsigned int fl0id, unsigned int fl1id)
8483 {
8484 	struct fw_iq_cmd c;
8485 
8486 	memset(&c, 0, sizeof(c));
8487 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8488 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8489 				  V_FW_IQ_CMD_VFN(vf));
8490 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8491 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8492 	c.iqid = cpu_to_be16(iqid);
8493 	c.fl0id = cpu_to_be16(fl0id);
8494 	c.fl1id = cpu_to_be16(fl1id);
8495 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8496 }
8497 
8498 /**
8499  *	t4_eth_eq_free - free an Ethernet egress queue
8500  *	@adap: the adapter
8501  *	@mbox: mailbox to use for the FW command
8502  *	@pf: the PF owning the queue
8503  *	@vf: the VF owning the queue
8504  *	@eqid: egress queue id
8505  *
8506  *	Frees an Ethernet egress queue.
8507  */
8508 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8509 		   unsigned int vf, unsigned int eqid)
8510 {
8511 	struct fw_eq_eth_cmd c;
8512 
8513 	memset(&c, 0, sizeof(c));
8514 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8515 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8516 				  V_FW_EQ_ETH_CMD_PFN(pf) |
8517 				  V_FW_EQ_ETH_CMD_VFN(vf));
8518 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8519 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8520 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8521 }
8522 
8523 /**
8524  *	t4_ctrl_eq_free - free a control egress queue
8525  *	@adap: the adapter
8526  *	@mbox: mailbox to use for the FW command
8527  *	@pf: the PF owning the queue
8528  *	@vf: the VF owning the queue
8529  *	@eqid: egress queue id
8530  *
8531  *	Frees a control egress queue.
8532  */
8533 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8534 		    unsigned int vf, unsigned int eqid)
8535 {
8536 	struct fw_eq_ctrl_cmd c;
8537 
8538 	memset(&c, 0, sizeof(c));
8539 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8540 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8541 				  V_FW_EQ_CTRL_CMD_PFN(pf) |
8542 				  V_FW_EQ_CTRL_CMD_VFN(vf));
8543 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8544 	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8545 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8546 }
8547 
8548 /**
8549  *	t4_ofld_eq_free - free an offload egress queue
8550  *	@adap: the adapter
8551  *	@mbox: mailbox to use for the FW command
8552  *	@pf: the PF owning the queue
8553  *	@vf: the VF owning the queue
8554  *	@eqid: egress queue id
8555  *
8556  *	Frees a control egress queue.
8557  */
8558 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8559 		    unsigned int vf, unsigned int eqid)
8560 {
8561 	struct fw_eq_ofld_cmd c;
8562 
8563 	memset(&c, 0, sizeof(c));
8564 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8565 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8566 				  V_FW_EQ_OFLD_CMD_PFN(pf) |
8567 				  V_FW_EQ_OFLD_CMD_VFN(vf));
8568 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8569 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8570 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8571 }
8572 
8573 /**
8574  *	t4_link_down_rc_str - return a string for a Link Down Reason Code
8575  *	@link_down_rc: Link Down Reason Code
8576  *
8577  *	Returns a string representation of the Link Down Reason Code.
8578  */
8579 const char *t4_link_down_rc_str(unsigned char link_down_rc)
8580 {
8581 	static const char * const reason[] = {
8582 		"Link Down",
8583 		"Remote Fault",
8584 		"Auto-negotiation Failure",
8585 		"Reserved",
8586 		"Insufficient Airflow",
8587 		"Unable To Determine Reason",
8588 		"No RX Signal Detected",
8589 		"Reserved",
8590 	};
8591 
8592 	if (link_down_rc >= ARRAY_SIZE(reason))
8593 		return "Bad Reason Code";
8594 
8595 	return reason[link_down_rc];
8596 }
8597 
8598 /**
8599  * Get the highest speed for the port from the advertised port capabilities.
8600  * It will be either the highest speed from the list of speeds or
8601  * whatever user has set using ethtool.
8602  */
8603 static inline unsigned int fwcap_to_fw_speed(unsigned int acaps)
8604 {
8605 	if (acaps & FW_PORT_CAP_SPEED_100G)
8606 		return FW_PORT_CAP_SPEED_100G;
8607 	if (acaps & FW_PORT_CAP_SPEED_40G)
8608 		return FW_PORT_CAP_SPEED_40G;
8609 	if (acaps & FW_PORT_CAP_SPEED_25G)
8610 		return FW_PORT_CAP_SPEED_25G;
8611 	if (acaps & FW_PORT_CAP_SPEED_10G)
8612 		return FW_PORT_CAP_SPEED_10G;
8613 	if (acaps & FW_PORT_CAP_SPEED_1G)
8614 		return FW_PORT_CAP_SPEED_1G;
8615 	if (acaps & FW_PORT_CAP_SPEED_100M)
8616 		return FW_PORT_CAP_SPEED_100M;
8617 	return 0;
8618 }
8619 
8620 /**
8621  *	t4_handle_get_port_info - process a FW reply message
8622  *	@pi: the port info
8623  *	@rpl: start of the FW message
8624  *
8625  *	Processes a GET_PORT_INFO FW reply message.
8626  */
8627 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8628 {
8629 	const struct fw_port_cmd *p = (const void *)rpl;
8630 	unsigned int acaps = be16_to_cpu(p->u.info.acap);
8631 	struct adapter *adap = pi->adapter;
8632 
8633 	/* link/module state change message */
8634 	int speed = 0;
8635 	unsigned int fc, fec;
8636 	struct link_config *lc;
8637 	u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
8638 	int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
8639 	u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
8640 
8641 	/*
8642 	 * Unfortunately the format of the Link Status returned by the
8643 	 * Firmware isn't the same as the Firmware Port Capabilities bitfield
8644 	 * used everywhere else ...
8645 	 */
8646 	fc = 0;
8647 	if (stat & F_FW_PORT_CMD_RXPAUSE)
8648 		fc |= PAUSE_RX;
8649 	if (stat & F_FW_PORT_CMD_TXPAUSE)
8650 		fc |= PAUSE_TX;
8651 
8652 	fec = fwcap_to_cc_fec(acaps);
8653 
8654 	if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
8655 		speed = 100;
8656 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
8657 		speed = 1000;
8658 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
8659 		speed = 10000;
8660 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
8661 		speed = 25000;
8662 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
8663 		speed = 40000;
8664 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
8665 		speed = 100000;
8666 
8667 	lc = &pi->link_cfg;
8668 
8669 	if (mod != pi->mod_type) {
8670 		/*
8671 		 * When a new Transceiver Module is inserted, the Firmware
8672 		 * will examine any Forward Error Correction parameters
8673 		 * present in the Transceiver Module i2c EPROM and determine
8674 		 * the supported and recommended FEC settings from those
8675 		 * based on IEEE 802.3 standards.  We always record the
8676 		 * IEEE 802.3 recommended "automatic" settings.
8677 		 */
8678 		lc->auto_fec = fec;
8679 
8680 		pi->mod_type = mod;
8681 		t4_os_portmod_changed(adap, pi->port_id);
8682 	}
8683 
8684 	if (link_ok != lc->link_ok || speed != lc->speed ||
8685 	    fc != lc->fc || fec != lc->fec) {	/* something changed */
8686 		if (!link_ok && lc->link_ok) {
8687 			unsigned char rc = G_FW_PORT_CMD_LINKDNRC(stat);
8688 
8689 			lc->link_down_rc = rc;
8690 			CH_WARN_RATELIMIT(adap,
8691 				"Port %d link down, reason: %s\n",
8692 				pi->tx_chan, t4_link_down_rc_str(rc));
8693 		}
8694 		lc->link_ok = link_ok;
8695 		lc->speed = speed;
8696 		lc->fc = fc;
8697 		lc->fec = fec;
8698 
8699 		lc->supported = be16_to_cpu(p->u.info.pcap);
8700 		lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
8701 		lc->advertising = be16_to_cpu(p->u.info.acap) & ADVERT_MASK;
8702 
8703 		if (lc->advertising & FW_PORT_CAP_ANEG) {
8704 			lc->autoneg = AUTONEG_ENABLE;
8705 		} else {
8706 			/* When Autoneg is disabled, user needs to set
8707 			 * single speed.
8708 			 * Similar to cxgb4_ethtool.c: set_link_ksettings
8709 			 */
8710 			lc->advertising = 0;
8711 			lc->requested_speed = fwcap_to_fw_speed(acaps);
8712 			lc->autoneg = AUTONEG_DISABLE;
8713 		}
8714 
8715 		t4_os_link_changed(adap, pi->port_id, link_ok);
8716 	}
8717 }
8718 
8719 /**
8720  *	t4_update_port_info - retrieve and update port information if changed
8721  *	@pi: the port_info
8722  *
8723  *	We issue a Get Port Information Command to the Firmware and, if
8724  *	successful, we check to see if anything is different from what we
8725  *	last recorded and update things accordingly.
8726  */
8727  int t4_update_port_info(struct port_info *pi)
8728  {
8729 	struct fw_port_cmd port_cmd;
8730 	int ret;
8731 
8732 	memset(&port_cmd, 0, sizeof port_cmd);
8733 	port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
8734 					    F_FW_CMD_REQUEST | F_FW_CMD_READ |
8735 					    V_FW_PORT_CMD_PORTID(pi->tx_chan));
8736 	port_cmd.action_to_len16 = cpu_to_be32(
8737 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
8738 		FW_LEN16(port_cmd));
8739 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8740 			 &port_cmd, sizeof(port_cmd), &port_cmd);
8741 	if (ret)
8742 		return ret;
8743 
8744 	t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8745 	return 0;
8746 }
8747 
8748 /**
8749  *      t4_handle_fw_rpl - process a FW reply message
8750  *      @adap: the adapter
8751  *      @rpl: start of the FW message
8752  *
8753  *      Processes a FW message, such as link state change messages.
8754  */
8755 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8756 {
8757 	u8 opcode = *(const u8 *)rpl;
8758 
8759 	/*
8760 	 * This might be a port command ... this simplifies the following
8761 	 * conditionals ...  We can get away with pre-dereferencing
8762 	 * action_to_len16 because it's in the first 16 bytes and all messages
8763 	 * will be at least that long.
8764 	 */
8765 	const struct fw_port_cmd *p = (const void *)rpl;
8766 	unsigned int action =
8767 		G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
8768 
8769 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
8770 		int i;
8771 		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
8772 		struct port_info *pi = NULL;
8773 
8774 		for_each_port(adap, i) {
8775 			pi = adap2pinfo(adap, i);
8776 			if (pi->tx_chan == chan)
8777 				break;
8778 		}
8779 
8780 		t4_handle_get_port_info(pi, rpl);
8781 	} else {
8782 		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
8783 		return -EINVAL;
8784 	}
8785 	return 0;
8786 }
8787 
8788 /**
8789  *	get_pci_mode - determine a card's PCI mode
8790  *	@adapter: the adapter
8791  *	@p: where to store the PCI settings
8792  *
8793  *	Determines a card's PCI mode and associated parameters, such as speed
8794  *	and width.
8795  */
8796 static void get_pci_mode(struct adapter *adapter,
8797 				   struct pci_params *p)
8798 {
8799 	u16 val;
8800 	u32 pcie_cap;
8801 
8802 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8803 	if (pcie_cap) {
8804 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
8805 		p->speed = val & PCI_EXP_LNKSTA_CLS;
8806 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8807 	}
8808 }
8809 
8810 /**
8811  *	init_link_config - initialize a link's SW state
8812  *	@lc: pointer to structure holding the link state
8813  *	@pcaps: link Port Capabilities
8814  *	@acaps: link current Advertised Port Capabilities
8815  *
8816  *	Initializes the SW state maintained for each link, including the link's
8817  *	capabilities and default speed/flow-control/autonegotiation settings.
8818  */
8819 static void init_link_config(struct link_config *lc, unsigned int pcaps,
8820 			     unsigned int acaps)
8821 {
8822 	lc->supported = pcaps;
8823 	lc->lp_advertising = 0;
8824 	lc->requested_speed = 0;
8825 	lc->speed = 0;
8826 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8827 
8828 	/*
8829 	 * For Forward Error Control, we default to whatever the Firmware
8830 	 * tells us the Link is currently advertising.
8831 	 */
8832 	lc->auto_fec = fwcap_to_cc_fec(acaps);
8833 	lc->requested_fec = FEC_AUTO;
8834 	lc->fec = lc->auto_fec;
8835 
8836 	if (lc->supported & FW_PORT_CAP_ANEG) {
8837 		lc->advertising = lc->supported & ADVERT_MASK;
8838 		lc->autoneg = AUTONEG_ENABLE;
8839 		lc->requested_fc |= PAUSE_AUTONEG;
8840 	} else {
8841 		lc->advertising = 0;
8842 		lc->autoneg = AUTONEG_DISABLE;
8843 	}
8844 }
8845 
8846 /**
8847  *	t4_wait_dev_ready - wait till to reads of registers work
8848  *
8849  *	Right after the device is RESET is can take a small amount of time
8850  *	for it to respond to register reads.  Until then, all reads will
8851  *	return either 0xff...ff or 0xee...ee.  Return an error if reads
8852  *	don't work within a reasonable time frame.
8853  */
8854 int t4_wait_dev_ready(struct adapter *adapter)
8855 {
8856 	u32 whoami;
8857 
8858 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
8859 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
8860 		return 0;
8861 
8862 	msleep(500);
8863 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
8864 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
8865 		return 0;
8866 
8867 	CH_ERR(adapter, "Device didn't become ready for access, "
8868 	       "whoami = %#x\n", whoami);
8869 	return -EIO;
8870 }
8871 
8872 struct flash_desc {
8873 	u32 vendor_and_model_id;
8874 	u32 size_mb;
8875 };
8876 
8877 int t4_get_flash_params(struct adapter *adapter)
8878 {
8879 	/*
8880 	 * Table for non-standard supported Flash parts.  Note, all Flash
8881 	 * parts must have 64KB sectors.
8882 	 */
8883 	static struct flash_desc supported_flash[] = {
8884 		{ 0x00150201, 4 << 20 },	/* Spansion 4MB S25FL032P */
8885 	};
8886 
8887 	int ret;
8888 	u32 flashid = 0;
8889 	unsigned int part, manufacturer;
8890 	unsigned int density, size = 0;
8891 
8892 
8893 	/*
8894 	 * Issue a Read ID Command to the Flash part.  We decode supported
8895 	 * Flash parts and their sizes from this.  There's a newer Query
8896 	 * Command which can retrieve detailed geometry information but many
8897 	 * Flash parts don't support it.
8898 	 */
8899 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
8900 	if (!ret)
8901 		ret = sf1_read(adapter, 3, 0, 1, &flashid);
8902 	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
8903 	if (ret < 0)
8904 		return ret;
8905 
8906 	/*
8907 	 * Check to see if it's one of our non-standard supported Flash parts.
8908 	 */
8909 	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8910 		if (supported_flash[part].vendor_and_model_id == flashid) {
8911 			adapter->params.sf_size =
8912 				supported_flash[part].size_mb;
8913 			adapter->params.sf_nsec =
8914 				adapter->params.sf_size / SF_SEC_SIZE;
8915 			goto found;
8916 		}
8917 
8918 	/*
8919 	 * Decode Flash part size.  The code below looks repetitive with
8920 	 * common encodings, but that's not guaranteed in the JEDEC
8921 	 * specification for the Read JEDEC ID command.  The only thing that
8922 	 * we're guaranteed by the JEDEC specification is where the
8923 	 * Manufacturer ID is in the returned result.  After that each
8924 	 * Manufacturer ~could~ encode things completely differently.
8925 	 * Note, all Flash parts must have 64KB sectors.
8926 	 */
8927 	manufacturer = flashid & 0xff;
8928 	switch (manufacturer) {
8929 	case 0x20: { /* Micron/Numonix */
8930 		/*
8931 		 * This Density -> Size decoding table is taken from Micron
8932 		 * Data Sheets.
8933 		 */
8934 		density = (flashid >> 16) & 0xff;
8935 		switch (density) {
8936 		case 0x14: size = 1 << 20; break; /*   1MB */
8937 		case 0x15: size = 1 << 21; break; /*   2MB */
8938 		case 0x16: size = 1 << 22; break; /*   4MB */
8939 		case 0x17: size = 1 << 23; break; /*   8MB */
8940 		case 0x18: size = 1 << 24; break; /*  16MB */
8941 		case 0x19: size = 1 << 25; break; /*  32MB */
8942 		case 0x20: size = 1 << 26; break; /*  64MB */
8943 		case 0x21: size = 1 << 27; break; /* 128MB */
8944 		case 0x22: size = 1 << 28; break; /* 256MB */
8945 		}
8946 		break;
8947 	}
8948 
8949 	case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
8950 		/*
8951 		 * This Density -> Size decoding table is taken from ISSI
8952 		 * Data Sheets.
8953 		 */
8954 		density = (flashid >> 16) & 0xff;
8955 		switch (density) {
8956 		case 0x16: size = 1 << 25; break; /*  32MB */
8957 		case 0x17: size = 1 << 26; break; /*  64MB */
8958 		}
8959 		break;
8960 	}
8961 
8962 	case 0xc2: { /* Macronix */
8963 		/*
8964 		 * This Density -> Size decoding table is taken from Macronix
8965 		 * Data Sheets.
8966 		 */
8967 		density = (flashid >> 16) & 0xff;
8968 		switch (density) {
8969 		case 0x17: size = 1 << 23; break; /*   8MB */
8970 		case 0x18: size = 1 << 24; break; /*  16MB */
8971 		}
8972 		break;
8973 	}
8974 
8975 	case 0xef: { /* Winbond */
8976 		/*
8977 		 * This Density -> Size decoding table is taken from Winbond
8978 		 * Data Sheets.
8979 		 */
8980 		density = (flashid >> 16) & 0xff;
8981 		switch (density) {
8982 		case 0x17: size = 1 << 23; break; /*   8MB */
8983 		case 0x18: size = 1 << 24; break; /*  16MB */
8984 		}
8985 		break;
8986 	}
8987 	}
8988 
8989 	/*
8990 	 * If we didn't recognize the FLASH part, that's no real issue: the
8991 	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8992 	 * use a FLASH part which is at least 4MB in size and has 64KB
8993 	 * sectors.  The unrecognized FLASH part is likely to be much larger
8994 	 * than 4MB, but that's all we really need.
8995 	 */
8996 	if (size == 0) {
8997 		CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
8998 		size = 1 << 22;
8999 	}
9000 
9001 	/*
9002 	 * Store decoded Flash size and fall through into vetting code.
9003 	 */
9004 	adapter->params.sf_size = size;
9005 	adapter->params.sf_nsec = size / SF_SEC_SIZE;
9006 
9007  found:
9008 	/*
9009 	 * We should ~probably~ reject adapters with FLASHes which are too
9010 	 * small but we have some legacy FPGAs with small FLASHes that we'd
9011 	 * still like to use.  So instead we emit a scary message ...
9012 	 */
9013 	if (adapter->params.sf_size < FLASH_MIN_SIZE)
9014 		CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9015 			flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
9016 
9017 	return 0;
9018 }
9019 
9020 static void set_pcie_completion_timeout(struct adapter *adapter,
9021 						  u8 range)
9022 {
9023 	u16 val;
9024 	u32 pcie_cap;
9025 
9026 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9027 	if (pcie_cap) {
9028 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
9029 		val &= 0xfff0;
9030 		val |= range ;
9031 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
9032 	}
9033 }
9034 
9035 /**
9036  *	t4_get_chip_type - Determine chip type from device ID
9037  *	@adap: the adapter
9038  *	@ver: adapter version
9039  */
9040 enum chip_type t4_get_chip_type(struct adapter *adap, int ver)
9041 {
9042 	enum chip_type chip = 0;
9043 	u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
9044 
9045 	/* Retrieve adapter's device ID */
9046 	switch (ver) {
9047 		case CHELSIO_T4_FPGA:
9048 			chip |= CHELSIO_CHIP_FPGA;
9049 			/*FALLTHROUGH*/
9050 		case CHELSIO_T4:
9051 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9052 			break;
9053 		case CHELSIO_T5_FPGA:
9054 			chip |= CHELSIO_CHIP_FPGA;
9055 			/*FALLTHROUGH*/
9056 		case CHELSIO_T5:
9057 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9058 			break;
9059 		case CHELSIO_T6_FPGA:
9060 			chip |= CHELSIO_CHIP_FPGA;
9061 			/*FALLTHROUGH*/
9062 		case CHELSIO_T6:
9063 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9064 			break;
9065 		default:
9066 			CH_ERR(adap, "Device %d is not supported\n",
9067 			       adap->params.pci.device_id);
9068 			return -EINVAL;
9069 	}
9070 
9071 	/* T4A1 chip is no longer supported */
9072 	if (chip == T4_A1) {
9073 		CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n");
9074 		return -EINVAL;
9075 	}
9076 	return chip;
9077 }
9078 
9079 /**
9080  *	t4_prep_pf - prepare SW and HW for PF operation
9081  *	@adapter: the adapter
9082  *
9083  *	Initialize adapter SW state for the various HW modules, set initial
9084  *	values for some adapter tunables on each PF.
9085  */
9086 int t4_prep_pf(struct adapter *adapter)
9087 {
9088 	int ret, ver;
9089 
9090 	ret = t4_wait_dev_ready(adapter);
9091 	if (ret < 0)
9092 		return ret;
9093 
9094 	get_pci_mode(adapter, &adapter->params.pci);
9095 
9096 
9097 	/* Retrieve adapter's device ID
9098 	 */
9099 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id);
9100 	t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id);
9101 
9102 	ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
9103 	adapter->params.chip = t4_get_chip_type(adapter, ver);
9104 	if (is_t4(adapter->params.chip)) {
9105 		adapter->params.arch.sge_fl_db = F_DBPRIO;
9106 		adapter->params.arch.mps_tcam_size =
9107 				 NUM_MPS_CLS_SRAM_L_INSTANCES;
9108 		adapter->params.arch.mps_rplc_size = 128;
9109 		adapter->params.arch.nchan = NCHAN;
9110 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9111 		adapter->params.arch.vfcount = 128;
9112 		/* Congestion map is for 4 channels so that
9113 		 * MPS can have 4 priority per port.
9114 		 */
9115 		adapter->params.arch.cng_ch_bits_log = 2;
9116 	} else if (is_t5(adapter->params.chip)) {
9117 		adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
9118 		adapter->params.arch.mps_tcam_size =
9119 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9120 		adapter->params.arch.mps_rplc_size = 128;
9121 		adapter->params.arch.nchan = NCHAN;
9122 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9123 		adapter->params.arch.vfcount = 128;
9124 		adapter->params.arch.cng_ch_bits_log = 2;
9125 	} else if (is_t6(adapter->params.chip)) {
9126 		adapter->params.arch.sge_fl_db = 0;
9127 		adapter->params.arch.mps_tcam_size =
9128 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9129 		adapter->params.arch.mps_rplc_size = 256;
9130 		adapter->params.arch.nchan = 2;
9131 		adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9132 		adapter->params.arch.vfcount = 256;
9133 		/* Congestion map will be for 2 channels so that
9134 		 * MPS can have 8 priority per port.
9135 		 */
9136 		adapter->params.arch.cng_ch_bits_log = 3;
9137 	} else {
9138 		CH_ERR(adapter, "Device %d is not supported\n",
9139 			adapter->params.pci.device_id);
9140 		return -EINVAL;
9141 	}
9142 
9143 	adapter->params.pci.vpd_cap_addr =
9144 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
9145 
9146 	if (is_fpga(adapter->params.chip)) {
9147 		/* FPGA */
9148 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
9149 	} else {
9150 		/* ASIC */
9151 		adapter->params.cim_la_size = CIMLA_SIZE;
9152 	}
9153 
9154 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9155 
9156 	/*
9157 	 * Default port and clock for debugging in case we can't reach FW.
9158 	 */
9159 	adapter->params.nports = 1;
9160 	adapter->params.portvec = 1;
9161 	adapter->params.vpd.cclk = 50000;
9162 
9163 	/* Set pci completion timeout value to 4 seconds. */
9164 	set_pcie_completion_timeout(adapter, 0xd);
9165 	return 0;
9166 }
9167 
9168 /**
9169  *      t4_prep_master_pf - prepare SW for master PF operations
9170  *      @adapter: the adapter
9171  *
9172  */
9173 int t4_prep_master_pf(struct adapter *adapter)
9174 {
9175 	int ret;
9176 
9177 	ret = t4_prep_pf(adapter);
9178 	if (ret < 0)
9179 		return ret;
9180 
9181 	ret = t4_get_flash_params(adapter);
9182 	if (ret < 0) {
9183 		CH_ERR(adapter,
9184 		       "Unable to retrieve Flash parameters ret = %d\n", -ret);
9185 		return ret;
9186 	}
9187 
9188 	return 0;
9189 }
9190 
9191 /**
9192  *      t4_prep_adapter - prepare SW and HW for operation
9193  *      @adapter: the adapter
9194  *      @reset: if true perform a HW reset
9195  *
9196  *      Initialize adapter SW state for the various HW modules, set initial
9197  *      values for some adapter tunables.
9198  */
9199 int t4_prep_adapter(struct adapter *adapter, bool reset)
9200 {
9201 	return t4_prep_master_pf(adapter);
9202 }
9203 
9204 /**
9205  *	t4_shutdown_adapter - shut down adapter, host & wire
9206  *	@adapter: the adapter
9207  *
9208  *	Perform an emergency shutdown of the adapter and stop it from
9209  *	continuing any further communication on the ports or DMA to the
9210  *	host.  This is typically used when the adapter and/or firmware
9211  *	have crashed and we want to prevent any further accidental
9212  *	communication with the rest of the world.  This will also force
9213  *	the port Link Status to go down -- if register writes work --
9214  *	which should help our peers figure out that we're down.
9215  */
9216 int t4_shutdown_adapter(struct adapter *adapter)
9217 {
9218 	int port;
9219 
9220 	t4_intr_disable(adapter);
9221 	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
9222 	for_each_port(adapter, port) {
9223 		u32 a_port_cfg = is_t4(adapter->params.chip) ?
9224 				 PORT_REG(port, A_XGMAC_PORT_CFG) :
9225 				 T5_PORT_REG(port, A_MAC_PORT_CFG);
9226 
9227 		t4_write_reg(adapter, a_port_cfg,
9228 			     t4_read_reg(adapter, a_port_cfg)
9229 			     & ~V_SIGNAL_DET(1));
9230 	}
9231 	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
9232 
9233 	return 0;
9234 }
9235 
9236 /**
9237  *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9238  *	@adapter: the adapter
9239  *	@qid: the Queue ID
9240  *	@qtype: the Ingress or Egress type for @qid
9241  *	@user: true if this request is for a user mode queue
9242  *	@pbar2_qoffset: BAR2 Queue Offset
9243  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9244  *
9245  *	Returns the BAR2 SGE Queue Registers information associated with the
9246  *	indicated Absolute Queue ID.  These are passed back in return value
9247  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9248  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9249  *
9250  *	This may return an error which indicates that BAR2 SGE Queue
9251  *	registers aren't available.  If an error is not returned, then the
9252  *	following values are returned:
9253  *
9254  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9255  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9256  *
9257  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9258  *	require the "Inferred Queue ID" ability may be used.  E.g. the
9259  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9260  *	then these "Inferred Queue ID" register may not be used.
9261  */
9262 int t4_bar2_sge_qregs(struct adapter *adapter,
9263 		      unsigned int qid,
9264 		      enum t4_bar2_qtype qtype,
9265 		      int user,
9266 		      u64 *pbar2_qoffset,
9267 		      unsigned int *pbar2_qid)
9268 {
9269 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9270 	u64 bar2_page_offset, bar2_qoffset;
9271 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9272 
9273 	/* T4 doesn't support BAR2 SGE Queue registers for kernel
9274 	 * mode queues.
9275 	 */
9276 	if (!user && is_t4(adapter->params.chip))
9277 		return -EINVAL;
9278 
9279 	/* Get our SGE Page Size parameters.
9280 	 */
9281 	page_shift = adapter->params.sge.hps + 10;
9282 	page_size = 1 << page_shift;
9283 
9284 	/* Get the right Queues per Page parameters for our Queue.
9285 	 */
9286 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9287 		     ? adapter->params.sge.eq_qpp
9288 		     : adapter->params.sge.iq_qpp);
9289 	qpp_mask = (1 << qpp_shift) - 1;
9290 
9291 	/* Calculate the basics of the BAR2 SGE Queue register area:
9292 	 *  o The BAR2 page the Queue registers will be in.
9293 	 *  o The BAR2 Queue ID.
9294 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
9295 	 */
9296 	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9297 	bar2_qid = qid & qpp_mask;
9298 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9299 
9300 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
9301 	 * hardware will infer the Absolute Queue ID simply from the writes to
9302 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9303 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
9304 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9305 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9306 	 * from the BAR2 Page and BAR2 Queue ID.
9307 	 *
9308 	 * One important censequence of this is that some BAR2 SGE registers
9309 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9310 	 * there.  But other registers synthesize the SGE Queue ID purely
9311 	 * from the writes to the registers -- the Write Combined Doorbell
9312 	 * Buffer is a good example.  These BAR2 SGE Registers are only
9313 	 * available for those BAR2 SGE Register areas where the SGE Absolute
9314 	 * Queue ID can be inferred from simple writes.
9315 	 */
9316 	bar2_qoffset = bar2_page_offset;
9317 	bar2_qinferred = (bar2_qid_offset < page_size);
9318 	if (bar2_qinferred) {
9319 		bar2_qoffset += bar2_qid_offset;
9320 		bar2_qid = 0;
9321 	}
9322 
9323 	*pbar2_qoffset = bar2_qoffset;
9324 	*pbar2_qid = bar2_qid;
9325 	return 0;
9326 }
9327 
9328 /**
9329  *	t4_init_devlog_params - initialize adapter->params.devlog
9330  *	@adap: the adapter
9331  *	@fw_attach: whether we can talk to the firmware
9332  *
9333  *	Initialize various fields of the adapter's Firmware Device Log
9334  *	Parameters structure.
9335  */
9336 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
9337 {
9338 	struct devlog_params *dparams = &adap->params.devlog;
9339 	u32 pf_dparams;
9340 	unsigned int devlog_meminfo;
9341 	struct fw_devlog_cmd devlog_cmd;
9342 	int ret;
9343 
9344 	/* If we're dealing with newer firmware, the Device Log Paramerters
9345 	 * are stored in a designated register which allows us to access the
9346 	 * Device Log even if we can't talk to the firmware.
9347 	 */
9348 	pf_dparams =
9349 		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
9350 	if (pf_dparams) {
9351 		unsigned int nentries, nentries128;
9352 
9353 		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
9354 		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
9355 
9356 		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
9357 		nentries = (nentries128 + 1) * 128;
9358 		dparams->size = nentries * sizeof(struct fw_devlog_e);
9359 
9360 		return 0;
9361 	}
9362 
9363 	/*
9364 	 * For any failing returns ...
9365 	 */
9366 	memset(dparams, 0, sizeof *dparams);
9367 
9368 	/*
9369 	 * If we can't talk to the firmware, there's really nothing we can do
9370 	 * at this point.
9371 	 */
9372 	if (!fw_attach)
9373 		return -ENXIO;
9374 
9375 	/* Otherwise, ask the firmware for it's Device Log Parameters.
9376 	 */
9377 	memset(&devlog_cmd, 0, sizeof devlog_cmd);
9378 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9379 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
9380 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9381 	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9382 			 &devlog_cmd);
9383 	if (ret)
9384 		return ret;
9385 
9386 	devlog_meminfo =
9387 		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9388 	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
9389 	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
9390 	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9391 
9392 	return 0;
9393 }
9394 
9395 /**
9396  *	t4_init_sge_params - initialize adap->params.sge
9397  *	@adapter: the adapter
9398  *
9399  *	Initialize various fields of the adapter's SGE Parameters structure.
9400  */
9401 int t4_init_sge_params(struct adapter *adapter)
9402 {
9403 	struct sge_params *sge_params = &adapter->params.sge;
9404 	u32 hps, qpp;
9405 	unsigned int s_hps, s_qpp;
9406 
9407 	/* Extract the SGE Page Size for our PF.
9408 	 */
9409 	hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
9410 	s_hps = (S_HOSTPAGESIZEPF0 +
9411 		 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf);
9412 	sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
9413 
9414 	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9415 	 */
9416 	s_qpp = (S_QUEUESPERPAGEPF0 +
9417 		(S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
9418 	qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
9419 	sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
9420 	qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
9421 	sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
9422 
9423 	return 0;
9424 }
9425 
9426 /**
9427  *      t4_init_tp_params - initialize adap->params.tp
9428  *      @adap: the adapter
9429  * 	@sleep_ok: if true we may sleep while awaiting command completion
9430  *
9431  *      Initialize various fields of the adapter's TP Parameters structure.
9432  */
9433 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9434 {
9435 	int chan;
9436 	u32 v;
9437 
9438 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
9439 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
9440 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
9441 
9442 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9443 	for (chan = 0; chan < NCHAN; chan++)
9444 		adap->params.tp.tx_modq[chan] = chan;
9445 
9446 	/* Cache the adapter's Compressed Filter Mode and global Incress
9447 	 * Configuration.
9448 	 */
9449 	t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9450 		       A_TP_VLAN_PRI_MAP, sleep_ok);
9451 	t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9452 		       A_TP_INGRESS_CONFIG, sleep_ok);
9453 
9454 	/* For T6, cache the adapter's compressed error vector
9455 	 * and passing outer header info for encapsulated packets.
9456 	 */
9457 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9458 		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
9459 		adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
9460 	}
9461 
9462 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9463 	 * shift positions of several elements of the Compressed Filter Tuple
9464 	 * for this adapter which we need frequently ...
9465 	 */
9466 	adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
9467 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
9468 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
9469 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
9470 	adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
9471 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
9472 	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9473 								F_ETHERTYPE);
9474 	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9475 								F_MACMATCH);
9476 	adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9477 								F_MPSHITTYPE);
9478 	adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9479 							   F_FRAGMENTATION);
9480 
9481 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9482 	 * represents the presence of an Outer VLAN instead of a VNIC ID.
9483 	 */
9484 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
9485 		adap->params.tp.vnic_shift = -1;
9486 
9487 	return 0;
9488 }
9489 
9490 /**
9491  *      t4_filter_field_shift - calculate filter field shift
9492  *      @adap: the adapter
9493  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9494  *
9495  *      Return the shift position of a filter field within the Compressed
9496  *      Filter Tuple.  The filter field is specified via its selection bit
9497  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
9498  */
9499 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9500 {
9501 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9502 	unsigned int sel;
9503 	int field_shift;
9504 
9505 	if ((filter_mode & filter_sel) == 0)
9506 		return -1;
9507 
9508 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9509 		switch (filter_mode & sel) {
9510 		case F_FCOE:
9511 			field_shift += W_FT_FCOE;
9512 			break;
9513 		case F_PORT:
9514 			field_shift += W_FT_PORT;
9515 			break;
9516 		case F_VNIC_ID:
9517 			field_shift += W_FT_VNIC_ID;
9518 			break;
9519 		case F_VLAN:
9520 			field_shift += W_FT_VLAN;
9521 			break;
9522 		case F_TOS:
9523 			field_shift += W_FT_TOS;
9524 			break;
9525 		case F_PROTOCOL:
9526 			field_shift += W_FT_PROTOCOL;
9527 			break;
9528 		case F_ETHERTYPE:
9529 			field_shift += W_FT_ETHERTYPE;
9530 			break;
9531 		case F_MACMATCH:
9532 			field_shift += W_FT_MACMATCH;
9533 			break;
9534 		case F_MPSHITTYPE:
9535 			field_shift += W_FT_MPSHITTYPE;
9536 			break;
9537 		case F_FRAGMENTATION:
9538 			field_shift += W_FT_FRAGMENTATION;
9539 			break;
9540 		}
9541 	}
9542 	return field_shift;
9543 }
9544 
9545 /**
9546  *	t4_create_filter_info - return Compressed Filter Value/Mask tuple
9547  *	@adapter: the adapter
9548  *	@filter_value: Filter Value return value pointer
9549  *	@filter_mask: Filter Mask return value pointer
9550  *	@fcoe: FCoE filter selection
9551  *	@port: physical port filter selection
9552  *	@vnic: Virtual NIC ID filter selection
9553  *	@vlan: VLAN ID filter selection
9554  *	@vlan_pcp: VLAN Priority Code Point
9555  *	@vlan_dei: VLAN Drop Eligibility Indicator
9556  *	@tos: Type Of Server filter selection
9557  *	@protocol: IP Protocol filter selection
9558  *	@ethertype: Ethernet Type filter selection
9559  *	@macmatch: MPS MAC Index filter selection
9560  *	@matchtype: MPS Hit Type filter selection
9561  *	@frag: IP Fragmentation filter selection
9562  *
9563  *	Construct a Compressed Filter Value/Mask tuple based on a set of
9564  *	"filter selection" values.  For each passed filter selection value
9565  *	which is greater than or equal to 0, we put that value into the
9566  *	constructed Filter Value and the appropriate mask into the Filter
9567  *	Mask.  If a filter selections is specified which is not currently
9568  *	configured into the hardware, an error will be returned.  Otherwise
9569  *	the constructed FIlter Value/Mask tuple will be returned via the
9570  *	specified return value pointers and success will be returned.
9571  *
9572  *	All filter selection values and the returned Filter Value/Mask values
9573  *	are in Host-Endian format.
9574  */
9575 int t4_create_filter_info(const struct adapter *adapter,
9576 			  u64 *filter_value, u64 *filter_mask,
9577 			  int fcoe, int port, int vnic,
9578 			  int vlan, int vlan_pcp, int vlan_dei,
9579 			  int tos, int protocol, int ethertype,
9580 			  int macmatch, int matchtype, int frag)
9581 {
9582 	const struct tp_params *tp = &adapter->params.tp;
9583 	u64 v, m;
9584 
9585 	/*
9586 	 * If any selected filter field isn't enabled, return an error.
9587 	 */
9588 	#define BAD_FILTER(__field) \
9589 		((__field) >= 0 && tp->__field##_shift < 0)
9590 	if (BAD_FILTER(fcoe)       ||
9591 	    BAD_FILTER(port)       ||
9592 	    BAD_FILTER(vnic)       ||
9593 	    BAD_FILTER(vlan)       ||
9594 	    BAD_FILTER(tos)        ||
9595 	    BAD_FILTER(protocol)   ||
9596 	    BAD_FILTER(ethertype)  ||
9597 	    BAD_FILTER(macmatch)   ||
9598 	    BAD_FILTER(matchtype) ||
9599 	    BAD_FILTER(frag))
9600 		return -EINVAL;
9601 	#undef BAD_FILTER
9602 
9603 	/*
9604 	 * We have to have VLAN ID selected if we want to also select on
9605 	 * either the Priority Code Point or Drop Eligibility Indicator
9606 	 * fields.
9607 	 */
9608 	if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0)
9609 		return -EINVAL;
9610 
9611 	/*
9612 	 * Construct Filter Value and Mask.
9613 	 */
9614 	v = m = 0;
9615 	#define SET_FILTER_FIELD(__field, __width) \
9616 	do { \
9617 		if ((__field) >= 0) { \
9618 			const int shift = tp->__field##_shift; \
9619 			\
9620 			v |= (__field) << shift; \
9621 			m |= ((1ULL << (__width)) - 1) << shift; \
9622 		} \
9623 	} while (0)
9624 	SET_FILTER_FIELD(fcoe,      W_FT_FCOE);
9625 	SET_FILTER_FIELD(port,      W_FT_PORT);
9626 	SET_FILTER_FIELD(tos,       W_FT_TOS);
9627 	SET_FILTER_FIELD(protocol,  W_FT_PROTOCOL);
9628 	SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE);
9629 	SET_FILTER_FIELD(macmatch,  W_FT_MACMATCH);
9630 	SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE);
9631 	SET_FILTER_FIELD(frag,      W_FT_FRAGMENTATION);
9632 	#undef SET_FILTER_FIELD
9633 
9634 	/*
9635 	 * We handle VNIC ID and VLANs separately because they're slightly
9636 	 * different than the rest of the fields.  Both require that a
9637 	 * corresponding "valid" bit be set in the Filter Value and Mask.
9638 	 * These bits are in the top bit of the field.  Additionally, we can
9639 	 * select the Priority Code Point and Drop Eligibility Indicator
9640 	 * fields for VLANs as an option.  Remember that the format of a VLAN
9641 	 * Tag is:
9642 	 *
9643 	 * bits: 3  1      12
9644 	 *     +---+-+------------+
9645 	 *     |PCP|D|   VLAN ID  |
9646 	 *     +---+-+------------+
9647 	 */
9648 	if (vnic >= 0) {
9649 		v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift;
9650 		m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift;
9651 	}
9652 	if (vlan >= 0) {
9653 		v |= ((1ULL << (W_FT_VLAN-1)) | vlan)  << tp->vlan_shift;
9654 		m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift;
9655 
9656 		if (vlan_dei >= 0) {
9657 			v |= vlan_dei << (tp->vlan_shift + 12);
9658 			m |= 0x7      << (tp->vlan_shift + 12);
9659 		}
9660 		if (vlan_pcp >= 0) {
9661 			v |= vlan_pcp << (tp->vlan_shift + 13);
9662 			m |= 0x7      << (tp->vlan_shift + 13);
9663 		}
9664 	}
9665 
9666 	/*
9667 	 * Pass back computed Filter Value and Mask; return success.
9668 	 */
9669 	*filter_value = v;
9670 	*filter_mask = m;
9671 	return 0;
9672 }
9673 
9674 int t4_init_rss_mode(struct adapter *adap, int mbox)
9675 {
9676 	int i, ret;
9677 	struct fw_rss_vi_config_cmd rvc;
9678 
9679 	memset(&rvc, 0, sizeof(rvc));
9680 
9681 	for_each_port(adap, i) {
9682 		struct port_info *p = adap2pinfo(adap, i);
9683 		rvc.op_to_viid =
9684 			cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
9685 				    F_FW_CMD_REQUEST | F_FW_CMD_READ |
9686 				    V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
9687 		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9688 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9689 		if (ret)
9690 			return ret;
9691 		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9692 	}
9693 	return 0;
9694 }
9695 
9696 static int t4_init_portmirror(struct port_info *pi, int mbox,
9697 		       int port, int pf, int vf)
9698 {
9699 	struct adapter *adapter = pi->adapter;
9700 	int ret;
9701 
9702 	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL);
9703 	if (ret < 0)
9704 		return ret;
9705 
9706 	CH_INFO(adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n",
9707 		port, G_FW_VIID_PFN(ret), G_FW_VIID_VIN(ret));
9708 
9709 	pi->viid_mirror = ret;
9710 	return 0;
9711 }
9712 
9713 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf)
9714 {
9715 	int ret, i, j = 0;
9716 
9717 	for_each_port(adap, i) {
9718 		struct port_info *pi = adap2pinfo(adap, i);
9719 
9720 		while ((adap->params.portvec & (1 << j)) == 0)
9721 			j++;
9722 
9723 		ret = t4_init_portmirror(pi, mbox, j, pf, vf);
9724 		if (ret)
9725 			return ret;
9726 		j++;
9727 	}
9728 	return 0;
9729 }
9730 
9731 /**
9732  *	t4_init_portinfo - allocate a virtual interface and initialize port_info
9733  *	@pi: the port_info
9734  *	@mbox: mailbox to use for the FW command
9735  *	@port: physical port associated with the VI
9736  *	@pf: the PF owning the VI
9737  *	@vf: the VF owning the VI
9738  *	@mac: the MAC address of the VI
9739  *
9740  *	Allocates a virtual interface for the given physical port.  If @mac is
9741  *	not %NULL it contains the MAC address of the VI as assigned by FW.
9742  *	@mac should be large enough to hold an Ethernet address.
9743  *	Returns < 0 on error.
9744  */
9745 int t4_init_portinfo(struct port_info *pi, int mbox,
9746 		     int port, int pf, int vf, u8 mac[])
9747 {
9748 	int ret;
9749 	struct fw_port_cmd c;
9750 	unsigned int rss_size;
9751 
9752 	memset(&c, 0, sizeof(c));
9753 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9754 				     F_FW_CMD_REQUEST | F_FW_CMD_READ |
9755 				     V_FW_PORT_CMD_PORTID(port));
9756 	c.action_to_len16 = cpu_to_be32(
9757 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
9758 		FW_LEN16(c));
9759 	ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
9760 	if (ret)
9761 		return ret;
9762 
9763 	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9764 	if (ret < 0)
9765 		return ret;
9766 
9767 	pi->viid = ret;
9768 	pi->tx_chan = port;
9769 	pi->lport = port;
9770 	pi->rss_size = rss_size;
9771 	pi->rx_chan = t4_get_tp_e2c_map(pi->adapter, port);
9772 
9773 	ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
9774 	pi->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
9775 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
9776 	pi->port_type = G_FW_PORT_CMD_PTYPE(ret);
9777 	pi->mod_type = FW_PORT_MOD_TYPE_NA;
9778 
9779 	init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap),
9780 			 be16_to_cpu(c.u.info.acap));
9781 	return 0;
9782 }
9783 
9784 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9785 {
9786 	u8 addr[6];
9787 	int ret, i, j = 0;
9788 
9789 	for_each_port(adap, i) {
9790 		struct port_info *pi = adap2pinfo(adap, i);
9791 
9792 		while ((adap->params.portvec & (1 << j)) == 0)
9793 			j++;
9794 
9795 		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9796 		if (ret)
9797 			return ret;
9798 
9799 		t4_os_set_hw_addr(adap, i, addr);
9800 		j++;
9801 	}
9802 	return 0;
9803 }
9804 
9805 /**
9806  *	t4_read_cimq_cfg - read CIM queue configuration
9807  *	@adap: the adapter
9808  *	@base: holds the queue base addresses in bytes
9809  *	@size: holds the queue sizes in bytes
9810  *	@thres: holds the queue full thresholds in bytes
9811  *
9812  *	Returns the current configuration of the CIM queues, starting with
9813  *	the IBQs, then the OBQs.
9814  */
9815 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9816 {
9817 	unsigned int i, v;
9818 	int cim_num_obq = is_t4(adap->params.chip) ?
9819 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9820 
9821 	for (i = 0; i < CIM_NUM_IBQ; i++) {
9822 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
9823 			     V_QUENUMSELECT(i));
9824 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9825 		/* value is in 256-byte units */
9826 		*base++ = G_CIMQBASE(v) * 256;
9827 		*size++ = G_CIMQSIZE(v) * 256;
9828 		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
9829 	}
9830 	for (i = 0; i < cim_num_obq; i++) {
9831 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9832 			     V_QUENUMSELECT(i));
9833 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9834 		/* value is in 256-byte units */
9835 		*base++ = G_CIMQBASE(v) * 256;
9836 		*size++ = G_CIMQSIZE(v) * 256;
9837 	}
9838 }
9839 
9840 /**
9841  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
9842  *	@adap: the adapter
9843  *	@qid: the queue index
9844  *	@data: where to store the queue contents
9845  *	@n: capacity of @data in 32-bit words
9846  *
9847  *	Reads the contents of the selected CIM queue starting at address 0 up
9848  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9849  *	error and the number of 32-bit words actually read on success.
9850  */
9851 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9852 {
9853 	int i, err, attempts;
9854 	unsigned int addr;
9855 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
9856 
9857 	if (qid > 5 || (n & 3))
9858 		return -EINVAL;
9859 
9860 	addr = qid * nwords;
9861 	if (n > nwords)
9862 		n = nwords;
9863 
9864 	/* It might take 3-10ms before the IBQ debug read access is allowed.
9865 	 * Wait for 1 Sec with a delay of 1 usec.
9866 	 */
9867 	attempts = 1000000;
9868 
9869 	for (i = 0; i < n; i++, addr++) {
9870 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
9871 			     F_IBQDBGEN);
9872 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
9873 				      attempts, 1);
9874 		if (err)
9875 			return err;
9876 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
9877 	}
9878 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
9879 	return i;
9880 }
9881 
9882 /**
9883  *	t4_read_cim_obq - read the contents of a CIM outbound queue
9884  *	@adap: the adapter
9885  *	@qid: the queue index
9886  *	@data: where to store the queue contents
9887  *	@n: capacity of @data in 32-bit words
9888  *
9889  *	Reads the contents of the selected CIM queue starting at address 0 up
9890  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9891  *	error and the number of 32-bit words actually read on success.
9892  */
9893 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9894 {
9895 	int i, err;
9896 	unsigned int addr, v, nwords;
9897 	int cim_num_obq = is_t4(adap->params.chip) ?
9898 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9899 
9900 	if ((qid > (cim_num_obq - 1)) || (n & 3))
9901 		return -EINVAL;
9902 
9903 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9904 		     V_QUENUMSELECT(qid));
9905 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9906 
9907 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
9908 	nwords = G_CIMQSIZE(v) * 64;  /* same */
9909 	if (n > nwords)
9910 		n = nwords;
9911 
9912 	for (i = 0; i < n; i++, addr++) {
9913 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
9914 			     F_OBQDBGEN);
9915 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
9916 				      2, 1);
9917 		if (err)
9918 			return err;
9919 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
9920 	}
9921 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
9922 	return i;
9923 }
9924 
9925 /**
9926  *	t4_cim_read - read a block from CIM internal address space
9927  *	@adap: the adapter
9928  *	@addr: the start address within the CIM address space
9929  *	@n: number of words to read
9930  *	@valp: where to store the result
9931  *
9932  *	Reads a block of 4-byte words from the CIM intenal address space.
9933  */
9934 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9935 		unsigned int *valp)
9936 {
9937 	int ret = 0;
9938 
9939 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9940 		return -EBUSY;
9941 
9942 	for ( ; !ret && n--; addr += 4) {
9943 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
9944 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9945 				      0, 5, 2);
9946 		if (!ret)
9947 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
9948 	}
9949 	return ret;
9950 }
9951 
9952 /**
9953  *	t4_cim_write - write a block into CIM internal address space
9954  *	@adap: the adapter
9955  *	@addr: the start address within the CIM address space
9956  *	@n: number of words to write
9957  *	@valp: set of values to write
9958  *
9959  *	Writes a block of 4-byte words into the CIM intenal address space.
9960  */
9961 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9962 		 const unsigned int *valp)
9963 {
9964 	int ret = 0;
9965 
9966 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9967 		return -EBUSY;
9968 
9969 	for ( ; !ret && n--; addr += 4) {
9970 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
9971 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
9972 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9973 				      0, 5, 2);
9974 	}
9975 	return ret;
9976 }
9977 
9978 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9979 			 unsigned int val)
9980 {
9981 	return t4_cim_write(adap, addr, 1, &val);
9982 }
9983 
9984 /**
9985  *	t4_cim_read_la - read CIM LA capture buffer
9986  *	@adap: the adapter
9987  *	@la_buf: where to store the LA data
9988  *	@wrptr: the HW write pointer within the capture buffer
9989  *
9990  *	Reads the contents of the CIM LA buffer with the most recent entry at
9991  *	the end	of the returned data and with the entry at @wrptr first.
9992  *	We try to leave the LA in the running state we find it in.
9993  */
9994 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9995 {
9996 	int i, ret;
9997 	unsigned int cfg, val, idx;
9998 
9999 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
10000 	if (ret)
10001 		return ret;
10002 
10003 	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
10004 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
10005 		if (ret)
10006 			return ret;
10007 	}
10008 
10009 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10010 	if (ret)
10011 		goto restart;
10012 
10013 	idx = G_UPDBGLAWRPTR(val);
10014 	if (wrptr)
10015 		*wrptr = idx;
10016 
10017 	for (i = 0; i < adap->params.cim_la_size; i++) {
10018 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
10019 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
10020 		if (ret)
10021 			break;
10022 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10023 		if (ret)
10024 			break;
10025 		if (val & F_UPDBGLARDEN) {
10026 			ret = -ETIMEDOUT;
10027 			break;
10028 		}
10029 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
10030 		if (ret)
10031 			break;
10032 
10033 		/* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
10034 		idx = (idx + 1) & M_UPDBGLARDPTR;
10035 		/*
10036 		 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
10037 		 * identify the 32-bit portion of the full 312-bit data
10038 		 */
10039 		if (is_t6(adap->params.chip))
10040 			while ((idx & 0xf) > 9)
10041 				idx = (idx + 1) % M_UPDBGLARDPTR;
10042 	}
10043 restart:
10044 	if (cfg & F_UPDBGLAEN) {
10045 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
10046 				      cfg & ~F_UPDBGLARDEN);
10047 		if (!ret)
10048 			ret = r;
10049 	}
10050 	return ret;
10051 }
10052 
10053 /**
10054  *	t4_tp_read_la - read TP LA capture buffer
10055  *	@adap: the adapter
10056  *	@la_buf: where to store the LA data
10057  *	@wrptr: the HW write pointer within the capture buffer
10058  *
10059  *	Reads the contents of the TP LA buffer with the most recent entry at
10060  *	the end	of the returned data and with the entry at @wrptr first.
10061  *	We leave the LA in the running state we find it in.
10062  */
10063 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10064 {
10065 	bool last_incomplete;
10066 	unsigned int i, cfg, val, idx;
10067 
10068 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
10069 	if (cfg & F_DBGLAENABLE)			/* freeze LA */
10070 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10071 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
10072 
10073 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
10074 	idx = G_DBGLAWPTR(val);
10075 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
10076 	if (last_incomplete)
10077 		idx = (idx + 1) & M_DBGLARPTR;
10078 	if (wrptr)
10079 		*wrptr = idx;
10080 
10081 	val &= 0xffff;
10082 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
10083 	val |= adap->params.tp.la_mask;
10084 
10085 	for (i = 0; i < TPLA_SIZE; i++) {
10086 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
10087 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
10088 		idx = (idx + 1) & M_DBGLARPTR;
10089 	}
10090 
10091 	/* Wipe out last entry if it isn't valid */
10092 	if (last_incomplete)
10093 		la_buf[TPLA_SIZE - 1] = ~0ULL;
10094 
10095 	if (cfg & F_DBGLAENABLE)		/* restore running state */
10096 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10097 			     cfg | adap->params.tp.la_mask);
10098 }
10099 
10100 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10101  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
10102  * state for more than the Warning Threshold then we'll issue a warning about
10103  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
10104  * appears to be hung every Warning Repeat second till the situation clears.
10105  * If the situation clears, we'll note that as well.
10106  */
10107 #define SGE_IDMA_WARN_THRESH 1
10108 #define SGE_IDMA_WARN_REPEAT 300
10109 
10110 /**
10111  *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10112  *	@adapter: the adapter
10113  *	@idma: the adapter IDMA Monitor state
10114  *
10115  *	Initialize the state of an SGE Ingress DMA Monitor.
10116  */
10117 void t4_idma_monitor_init(struct adapter *adapter,
10118 			  struct sge_idma_monitor_state *idma)
10119 {
10120 	/* Initialize the state variables for detecting an SGE Ingress DMA
10121 	 * hang.  The SGE has internal counters which count up on each clock
10122 	 * tick whenever the SGE finds its Ingress DMA State Engines in the
10123 	 * same state they were on the previous clock tick.  The clock used is
10124 	 * the Core Clock so we have a limit on the maximum "time" they can
10125 	 * record; typically a very small number of seconds.  For instance,
10126 	 * with a 600MHz Core Clock, we can only count up to a bit more than
10127 	 * 7s.  So we'll synthesize a larger counter in order to not run the
10128 	 * risk of having the "timers" overflow and give us the flexibility to
10129 	 * maintain a Hung SGE State Machine of our own which operates across
10130 	 * a longer time frame.
10131 	 */
10132 	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10133 	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
10134 }
10135 
10136 /**
10137  *	t4_idma_monitor - monitor SGE Ingress DMA state
10138  *	@adapter: the adapter
10139  *	@idma: the adapter IDMA Monitor state
10140  *	@hz: number of ticks/second
10141  *	@ticks: number of ticks since the last IDMA Monitor call
10142  */
10143 void t4_idma_monitor(struct adapter *adapter,
10144 		     struct sge_idma_monitor_state *idma,
10145 		     int hz, int ticks)
10146 {
10147 	int i, idma_same_state_cnt[2];
10148 
10149 	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
10150 	  * are counters inside the SGE which count up on each clock when the
10151 	  * SGE finds its Ingress DMA State Engines in the same states they
10152 	  * were in the previous clock.  The counters will peg out at
10153 	  * 0xffffffff without wrapping around so once they pass the 1s
10154 	  * threshold they'll stay above that till the IDMA state changes.
10155 	  */
10156 	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
10157 	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
10158 	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10159 
10160 	for (i = 0; i < 2; i++) {
10161 		u32 debug0, debug11;
10162 
10163 		/* If the Ingress DMA Same State Counter ("timer") is less
10164 		 * than 1s, then we can reset our synthesized Stall Timer and
10165 		 * continue.  If we have previously emitted warnings about a
10166 		 * potential stalled Ingress Queue, issue a note indicating
10167 		 * that the Ingress Queue has resumed forward progress.
10168 		 */
10169 		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10170 			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
10171 				CH_WARN(adapter, "SGE idma%d, queue %u, "
10172 					"resumed after %d seconds\n",
10173 					i, idma->idma_qid[i],
10174 					idma->idma_stalled[i]/hz);
10175 			idma->idma_stalled[i] = 0;
10176 			continue;
10177 		}
10178 
10179 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10180 		 * domain.  The first time we get here it'll be because we
10181 		 * passed the 1s Threshold; each additional time it'll be
10182 		 * because the RX Timer Callback is being fired on its regular
10183 		 * schedule.
10184 		 *
10185 		 * If the stall is below our Potential Hung Ingress Queue
10186 		 * Warning Threshold, continue.
10187 		 */
10188 		if (idma->idma_stalled[i] == 0) {
10189 			idma->idma_stalled[i] = hz;
10190 			idma->idma_warn[i] = 0;
10191 		} else {
10192 			idma->idma_stalled[i] += ticks;
10193 			idma->idma_warn[i] -= ticks;
10194 		}
10195 
10196 		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
10197 			continue;
10198 
10199 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10200 		 */
10201 		if (idma->idma_warn[i] > 0)
10202 			continue;
10203 		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
10204 
10205 		/* Read and save the SGE IDMA State and Queue ID information.
10206 		 * We do this every time in case it changes across time ...
10207 		 * can't be too careful ...
10208 		 */
10209 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
10210 		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10211 		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10212 
10213 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
10214 		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10215 		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10216 
10217 		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
10218 			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10219 			i, idma->idma_qid[i], idma->idma_state[i],
10220 			idma->idma_stalled[i]/hz,
10221 			debug0, debug11);
10222 		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10223 	}
10224 }
10225 
10226 /**
10227  *     t4_set_vf_mac - Set MAC address for the specified VF
10228  *     @adapter: The adapter
10229  *     @vf: one of the VFs instantiated by the specified PF
10230  *     @naddr: the number of MAC addresses
10231  *     @addr: the MAC address(es) to be set to the specified VF
10232  */
10233 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10234 		      unsigned int naddr, u8 *addr)
10235 {
10236 	struct fw_acl_mac_cmd cmd;
10237 
10238 	memset(&cmd, 0, sizeof(cmd));
10239 	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
10240 				    F_FW_CMD_REQUEST |
10241 				    F_FW_CMD_WRITE |
10242 				    V_FW_ACL_MAC_CMD_PFN(adapter->pf) |
10243 				    V_FW_ACL_MAC_CMD_VFN(vf));
10244 
10245 	/* Note: Do not enable the ACL */
10246 	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10247 	cmd.nmac = naddr;
10248 
10249 	switch (adapter->pf) {
10250 	case 3:
10251 		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10252 		break;
10253 	case 2:
10254 		memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10255 		break;
10256 	case 1:
10257 		memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10258 		break;
10259 	case 0:
10260 		memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10261 		break;
10262 	}
10263 
10264 	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10265 }
10266 
10267 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper
10268  * functions
10269  */
10270 
10271 /**
10272  *	t4_read_pace_tbl - read the pace table
10273  *	@adap: the adapter
10274  *	@pace_vals: holds the returned values
10275  *
10276  *	Returns the values of TP's pace table in microseconds.
10277  */
10278 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10279 {
10280 	unsigned int i, v;
10281 
10282 	for (i = 0; i < NTX_SCHED; i++) {
10283 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
10284 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
10285 		pace_vals[i] = dack_ticks_to_usec(adap, v);
10286 	}
10287 }
10288 
10289 /**
10290  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10291  *	@adap: the adapter
10292  *	@sched: the scheduler index
10293  *	@kbps: the byte rate in Kbps
10294  *	@ipg: the interpacket delay in tenths of nanoseconds
10295  * 	@sleep_ok: if true we may sleep while awaiting command completion
10296  *
10297  *	Return the current configuration of a HW Tx scheduler.
10298  */
10299 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
10300 		     unsigned int *ipg, bool sleep_ok)
10301 {
10302 	unsigned int v, addr, bpt, cpt;
10303 
10304 	if (kbps) {
10305 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
10306 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10307 		if (sched & 1)
10308 			v >>= 16;
10309 		bpt = (v >> 8) & 0xff;
10310 		cpt = v & 0xff;
10311 		if (!cpt)
10312 			*kbps = 0;	/* scheduler disabled */
10313 		else {
10314 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10315 			*kbps = (v * bpt) / 125;
10316 		}
10317 	}
10318 	if (ipg) {
10319 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
10320 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10321 		if (sched & 1)
10322 			v >>= 16;
10323 		v &= 0xffff;
10324 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
10325 	}
10326 }
10327 
10328 /**
10329  *	t4_load_cfg - download config file
10330  *	@adap: the adapter
10331  *	@cfg_data: the cfg text file to write
10332  *	@size: text file size
10333  *
10334  *	Write the supplied config text file to the card's serial flash.
10335  */
10336 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10337 {
10338 	int ret, i, n, cfg_addr;
10339 	unsigned int addr;
10340 	unsigned int flash_cfg_start_sec;
10341 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10342 
10343 	cfg_addr = t4_flash_cfg_addr(adap);
10344 	if (cfg_addr < 0)
10345 		return cfg_addr;
10346 
10347 	addr = cfg_addr;
10348 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
10349 
10350 	if (size > FLASH_CFG_MAX_SIZE) {
10351 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
10352 		       FLASH_CFG_MAX_SIZE);
10353 		return -EFBIG;
10354 	}
10355 
10356 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
10357 			 sf_sec_size);
10358 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10359 				     flash_cfg_start_sec + i - 1);
10360 	/*
10361 	 * If size == 0 then we're simply erasing the FLASH sectors associated
10362 	 * with the on-adapter Firmware Configuration File.
10363 	 */
10364 	if (ret || size == 0)
10365 		goto out;
10366 
10367 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
10368 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10369 		if ( (size - i) <  SF_PAGE_SIZE)
10370 			n = size - i;
10371 		else
10372 			n = SF_PAGE_SIZE;
10373 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
10374 		if (ret)
10375 			goto out;
10376 
10377 		addr += SF_PAGE_SIZE;
10378 		cfg_data += SF_PAGE_SIZE;
10379 	}
10380 
10381 out:
10382 	if (ret)
10383 		CH_ERR(adap, "config file %s failed %d\n",
10384 		       (size == 0 ? "clear" : "download"), ret);
10385 	return ret;
10386 }
10387 
10388 /**
10389  *	t5_fw_init_extern_mem - initialize the external memory
10390  *	@adap: the adapter
10391  *
10392  *	Initializes the external memory on T5.
10393  */
10394 int t5_fw_init_extern_mem(struct adapter *adap)
10395 {
10396 	u32 params[1], val[1];
10397 	int ret;
10398 
10399 	if (!is_t5(adap->params.chip))
10400 		return 0;
10401 
10402 	val[0] = 0xff; /* Initialize all MCs */
10403 	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10404 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
10405 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
10406 			FW_CMD_MAX_TIMEOUT);
10407 
10408 	return ret;
10409 }
10410 
10411 /* BIOS boot headers */
10412 typedef struct pci_expansion_rom_header {
10413 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
10414 	u8	reserved[22]; /* Reserved per processor Architecture data */
10415 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
10416 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
10417 
10418 /* Legacy PCI Expansion ROM Header */
10419 typedef struct legacy_pci_expansion_rom_header {
10420 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
10421 	u8	size512; /* Current Image Size in units of 512 bytes */
10422 	u8	initentry_point[4];
10423 	u8	cksum; /* Checksum computed on the entire Image */
10424 	u8	reserved[16]; /* Reserved */
10425 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
10426 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
10427 
10428 /* EFI PCI Expansion ROM Header */
10429 typedef struct efi_pci_expansion_rom_header {
10430 	u8	signature[2]; // ROM signature. The value 0xaa55
10431 	u8	initialization_size[2]; /* Units 512. Includes this header */
10432 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
10433 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
10434 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
10435 	u8	compression_type[2]; /* Compression type. */
10436 		/*
10437 		 * Compression type definition
10438 		 * 0x0: uncompressed
10439 		 * 0x1: Compressed
10440 		 * 0x2-0xFFFF: Reserved
10441 		 */
10442 	u8	reserved[8]; /* Reserved */
10443 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
10444 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
10445 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
10446 
10447 /* PCI Data Structure Format */
10448 typedef struct pcir_data_structure { /* PCI Data Structure */
10449 	u8	signature[4]; /* Signature. The string "PCIR" */
10450 	u8	vendor_id[2]; /* Vendor Identification */
10451 	u8	device_id[2]; /* Device Identification */
10452 	u8	vital_product[2]; /* Pointer to Vital Product Data */
10453 	u8	length[2]; /* PCIR Data Structure Length */
10454 	u8	revision; /* PCIR Data Structure Revision */
10455 	u8	class_code[3]; /* Class Code */
10456 	u8	image_length[2]; /* Image Length. Multiple of 512B */
10457 	u8	code_revision[2]; /* Revision Level of Code/Data */
10458 	u8	code_type; /* Code Type. */
10459 		/*
10460 		 * PCI Expansion ROM Code Types
10461 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
10462 		 * 0x01: Open Firmware standard for PCI. FCODE
10463 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
10464 		 * 0x03: EFI Image. EFI
10465 		 * 0x04-0xFF: Reserved.
10466 		 */
10467 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
10468 	u8	reserved[2]; /* Reserved */
10469 } pcir_data_t; /* PCI__DATA_STRUCTURE */
10470 
10471 /* BOOT constants */
10472 enum {
10473 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
10474 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
10475 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
10476 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
10477 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
10478 	VENDOR_ID = 0x1425, /* Vendor ID */
10479 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
10480 };
10481 
10482 /*
10483  *	modify_device_id - Modifies the device ID of the Boot BIOS image
10484  *	@adatper: the device ID to write.
10485  *	@boot_data: the boot image to modify.
10486  *
10487  *	Write the supplied device ID to the boot BIOS image.
10488  */
10489 static void modify_device_id(int device_id, u8 *boot_data)
10490 {
10491 	legacy_pci_exp_rom_header_t *header;
10492 	pcir_data_t *pcir_header;
10493 	u32 cur_header = 0;
10494 
10495 	/*
10496 	 * Loop through all chained images and change the device ID's
10497 	 */
10498 	while (1) {
10499 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
10500 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
10501 			      le16_to_cpu(*(u16*)header->pcir_offset)];
10502 
10503 		/*
10504 		 * Only modify the Device ID if code type is Legacy or HP.
10505 		 * 0x00: Okay to modify
10506 		 * 0x01: FCODE. Do not be modify
10507 		 * 0x03: Okay to modify
10508 		 * 0x04-0xFF: Do not modify
10509 		 */
10510 		if (pcir_header->code_type == 0x00) {
10511 			u8 csum = 0;
10512 			int i;
10513 
10514 			/*
10515 			 * Modify Device ID to match current adatper
10516 			 */
10517 			*(u16*) pcir_header->device_id = device_id;
10518 
10519 			/*
10520 			 * Set checksum temporarily to 0.
10521 			 * We will recalculate it later.
10522 			 */
10523 			header->cksum = 0x0;
10524 
10525 			/*
10526 			 * Calculate and update checksum
10527 			 */
10528 			for (i = 0; i < (header->size512 * 512); i++)
10529 				csum += (u8)boot_data[cur_header + i];
10530 
10531 			/*
10532 			 * Invert summed value to create the checksum
10533 			 * Writing new checksum value directly to the boot data
10534 			 */
10535 			boot_data[cur_header + 7] = -csum;
10536 
10537 		} else if (pcir_header->code_type == 0x03) {
10538 
10539 			/*
10540 			 * Modify Device ID to match current adatper
10541 			 */
10542 			*(u16*) pcir_header->device_id = device_id;
10543 
10544 		}
10545 
10546 
10547 		/*
10548 		 * Check indicator element to identify if this is the last
10549 		 * image in the ROM.
10550 		 */
10551 		if (pcir_header->indicator & 0x80)
10552 			break;
10553 
10554 		/*
10555 		 * Move header pointer up to the next image in the ROM.
10556 		 */
10557 		cur_header += header->size512 * 512;
10558 	}
10559 }
10560 
10561 #ifdef CHELSIO_T4_DIAGS
10562 /*
10563  *	t4_earse_sf - Erase entire serial Flash region
10564  *	@adapter: the adapter
10565  *
10566  *	Clears the entire serial flash region.
10567  */
10568 int t4_erase_sf(struct adapter *adap)
10569 {
10570 	unsigned int nsectors;
10571 	int ret;
10572 
10573 	nsectors = FLASH_END_SEC;
10574 	if (nsectors > adap->params.sf_nsec)
10575 		nsectors = adap->params.sf_nsec;
10576 
10577 	// Erase all sectors of flash before and including the FW.
10578 	// Flash layout is in t4_hw.h.
10579 	ret = t4_flash_erase_sectors(adap, 0, nsectors - 1);
10580 	if (ret)
10581 		CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret);
10582 	return ret;
10583 }
10584 #endif
10585 
10586 /*
10587  *	t4_load_boot - download boot flash
10588  *	@adapter: the adapter
10589  *	@boot_data: the boot image to write
10590  *	@boot_addr: offset in flash to write boot_data
10591  *	@size: image size
10592  *
10593  *	Write the supplied boot image to the card's serial flash.
10594  *	The boot image has the following sections: a 28-byte header and the
10595  *	boot image.
10596  */
10597 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10598 		 unsigned int boot_addr, unsigned int size)
10599 {
10600 	pci_exp_rom_header_t *header;
10601 	int pcir_offset ;
10602 	pcir_data_t *pcir_header;
10603 	int ret, addr;
10604 	uint16_t device_id;
10605 	unsigned int i;
10606 	unsigned int boot_sector = (boot_addr * 1024 );
10607 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10608 
10609 	/*
10610 	 * Make sure the boot image does not encroach on the firmware region
10611 	 */
10612 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10613 		CH_ERR(adap, "boot image encroaching on firmware region\n");
10614 		return -EFBIG;
10615 	}
10616 
10617 	/*
10618 	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10619 	 * and Boot configuration data sections. These 3 boot sections span
10620 	 * sectors 0 to 7 in flash and live right before the FW image location.
10621 	 */
10622 	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
10623 			sf_sec_size);
10624 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10625 				     (boot_sector >> 16) + i - 1);
10626 
10627 	/*
10628 	 * If size == 0 then we're simply erasing the FLASH sectors associated
10629 	 * with the on-adapter option ROM file
10630 	 */
10631 	if (ret || (size == 0))
10632 		goto out;
10633 
10634 	/* Get boot header */
10635 	header = (pci_exp_rom_header_t *)boot_data;
10636 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
10637 	/* PCIR Data Structure */
10638 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
10639 
10640 	/*
10641 	 * Perform some primitive sanity testing to avoid accidentally
10642 	 * writing garbage over the boot sectors.  We ought to check for
10643 	 * more but it's not worth it for now ...
10644 	 */
10645 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10646 		CH_ERR(adap, "boot image too small/large\n");
10647 		return -EFBIG;
10648 	}
10649 
10650 #ifndef CHELSIO_T4_DIAGS
10651 	/*
10652 	 * Check BOOT ROM header signature
10653 	 */
10654 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
10655 		CH_ERR(adap, "Boot image missing signature\n");
10656 		return -EINVAL;
10657 	}
10658 
10659 	/*
10660 	 * Check PCI header signature
10661 	 */
10662 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
10663 		CH_ERR(adap, "PCI header missing signature\n");
10664 		return -EINVAL;
10665 	}
10666 
10667 	/*
10668 	 * Check Vendor ID matches Chelsio ID
10669 	 */
10670 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
10671 		CH_ERR(adap, "Vendor ID missing signature\n");
10672 		return -EINVAL;
10673 	}
10674 #endif
10675 
10676 	/*
10677 	 * Retrieve adapter's device ID
10678 	 */
10679 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10680 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
10681 	device_id = device_id & 0xf0ff;
10682 
10683 	/*
10684 	 * Check PCIE Device ID
10685 	 */
10686 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
10687 		/*
10688 		 * Change the device ID in the Boot BIOS image to match
10689 		 * the Device ID of the current adapter.
10690 		 */
10691 		modify_device_id(device_id, boot_data);
10692 	}
10693 
10694 	/*
10695 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10696 	 * we finish copying the rest of the boot image. This will ensure
10697 	 * that the BIOS boot header will only be written if the boot image
10698 	 * was written in full.
10699 	 */
10700 	addr = boot_sector;
10701 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10702 		addr += SF_PAGE_SIZE;
10703 		boot_data += SF_PAGE_SIZE;
10704 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10705 		if (ret)
10706 			goto out;
10707 	}
10708 
10709 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10710 			     (const u8 *)header, 0);
10711 
10712 out:
10713 	if (ret)
10714 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
10715 	return ret;
10716 }
10717 
10718 /*
10719  *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
10720  *	@adapter: the adapter
10721  *
10722  *	Return the address within the flash where the OptionROM Configuration
10723  *	is stored, or an error if the device FLASH is too small to contain
10724  *	a OptionROM Configuration.
10725  */
10726 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10727 {
10728 	/*
10729 	 * If the device FLASH isn't large enough to hold a Firmware
10730 	 * Configuration File, return an error.
10731 	 */
10732 	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10733 		return -ENOSPC;
10734 
10735 	return FLASH_BOOTCFG_START;
10736 }
10737 
10738 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10739 {
10740 	int ret, i, n, cfg_addr;
10741 	unsigned int addr;
10742 	unsigned int flash_cfg_start_sec;
10743 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10744 
10745 	cfg_addr = t4_flash_bootcfg_addr(adap);
10746 	if (cfg_addr < 0)
10747 		return cfg_addr;
10748 
10749 	addr = cfg_addr;
10750 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
10751 
10752 	if (size > FLASH_BOOTCFG_MAX_SIZE) {
10753 		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10754 			FLASH_BOOTCFG_MAX_SIZE);
10755 		return -EFBIG;
10756 	}
10757 
10758 	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
10759 			 sf_sec_size);
10760 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10761 					flash_cfg_start_sec + i - 1);
10762 
10763 	/*
10764 	 * If size == 0 then we're simply erasing the FLASH sectors associated
10765 	 * with the on-adapter OptionROM Configuration File.
10766 	 */
10767 	if (ret || size == 0)
10768 		goto out;
10769 
10770 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
10771 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10772 		if ( (size - i) <  SF_PAGE_SIZE)
10773 			n = size - i;
10774 		else
10775 			n = SF_PAGE_SIZE;
10776 		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10777 		if (ret)
10778 			goto out;
10779 
10780 		addr += SF_PAGE_SIZE;
10781 		cfg_data += SF_PAGE_SIZE;
10782 	}
10783 
10784 out:
10785 	if (ret)
10786 		CH_ERR(adap, "boot config data %s failed %d\n",
10787 				(size == 0 ? "clear" : "download"), ret);
10788 	return ret;
10789 }
10790 
10791 /**
10792  *	t4_set_filter_mode - configure the optional components of filter tuples
10793  *	@adap: the adapter
10794  *	@mode_map: a bitmap selcting which optional filter components to enable
10795  * 	@sleep_ok: if true we may sleep while awaiting command completion
10796  *
10797  *	Sets the filter mode by selecting the optional components to enable
10798  *	in filter tuples.  Returns 0 on success and a negative error if the
10799  *	requested mode needs more bits than are available for optional
10800  *	components.
10801  */
10802 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
10803 		       bool sleep_ok)
10804 {
10805 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
10806 
10807 	int i, nbits = 0;
10808 
10809 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
10810 		if (mode_map & (1 << i))
10811 			nbits += width[i];
10812 	if (nbits > FILTER_OPT_LEN)
10813 		return -EINVAL;
10814 
10815 	t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
10816 
10817 	return 0;
10818 }
10819 
10820 /**
10821  *	t4_clr_port_stats - clear port statistics
10822  *	@adap: the adapter
10823  *	@idx: the port index
10824  *
10825  *	Clear HW statistics for the given port.
10826  */
10827 void t4_clr_port_stats(struct adapter *adap, int idx)
10828 {
10829 	unsigned int i;
10830 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
10831 	u32 port_base_addr;
10832 
10833 	if (is_t4(adap->params.chip))
10834 		port_base_addr = PORT_BASE(idx);
10835 	else
10836 		port_base_addr = T5_PORT_BASE(idx);
10837 
10838 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
10839 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
10840 		t4_write_reg(adap, port_base_addr + i, 0);
10841 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
10842 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
10843 		t4_write_reg(adap, port_base_addr + i, 0);
10844 	for (i = 0; i < 4; i++)
10845 		if (bgmap & (1 << i)) {
10846 			t4_write_reg(adap,
10847 			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
10848 			t4_write_reg(adap,
10849 			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
10850 		}
10851 }
10852 
10853 /**
10854  *	t4_i2c_rd - read I2C data from adapter
10855  *	@adap: the adapter
10856  *	@port: Port number if per-port device; <0 if not
10857  *	@devid: per-port device ID or absolute device ID
10858  *	@offset: byte offset into device I2C space
10859  *	@len: byte length of I2C space data
10860  *	@buf: buffer in which to return I2C data
10861  *
10862  *	Reads the I2C data from the indicated device and location.
10863  */
10864 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
10865 	      int port, unsigned int devid,
10866 	      unsigned int offset, unsigned int len,
10867 	      u8 *buf)
10868 {
10869 	u32 ldst_addrspace;
10870 	struct fw_ldst_cmd ldst;
10871 	int ret;
10872 
10873 	if (port >= 4 ||
10874 	    devid >= 256 ||
10875 	    offset >= 256 ||
10876 	    len > sizeof ldst.u.i2c.data)
10877 		return -EINVAL;
10878 
10879 	memset(&ldst, 0, sizeof ldst);
10880 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10881 	ldst.op_to_addrspace =
10882 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10883 			    F_FW_CMD_REQUEST |
10884 			    F_FW_CMD_READ |
10885 			    ldst_addrspace);
10886 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10887 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10888 	ldst.u.i2c.did = devid;
10889 	ldst.u.i2c.boffset = offset;
10890 	ldst.u.i2c.blen = len;
10891 	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10892 	if (!ret)
10893 		memcpy(buf, ldst.u.i2c.data, len);
10894 	return ret;
10895 }
10896 
10897 /**
10898  *	t4_i2c_wr - write I2C data to adapter
10899  *	@adap: the adapter
10900  *	@port: Port number if per-port device; <0 if not
10901  *	@devid: per-port device ID or absolute device ID
10902  *	@offset: byte offset into device I2C space
10903  *	@len: byte length of I2C space data
10904  *	@buf: buffer containing new I2C data
10905  *
10906  *	Write the I2C data to the indicated device and location.
10907  */
10908 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
10909 	      int port, unsigned int devid,
10910 	      unsigned int offset, unsigned int len,
10911 	      u8 *buf)
10912 {
10913 	u32 ldst_addrspace;
10914 	struct fw_ldst_cmd ldst;
10915 
10916 	if (port >= 4 ||
10917 	    devid >= 256 ||
10918 	    offset >= 256 ||
10919 	    len > sizeof ldst.u.i2c.data)
10920 		return -EINVAL;
10921 
10922 	memset(&ldst, 0, sizeof ldst);
10923 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10924 	ldst.op_to_addrspace =
10925 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10926 			    F_FW_CMD_REQUEST |
10927 			    F_FW_CMD_WRITE |
10928 			    ldst_addrspace);
10929 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10930 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10931 	ldst.u.i2c.did = devid;
10932 	ldst.u.i2c.boffset = offset;
10933 	ldst.u.i2c.blen = len;
10934 	memcpy(ldst.u.i2c.data, buf, len);
10935 	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10936 }
10937 
10938 /**
10939  * 	t4_sge_ctxt_rd - read an SGE context through FW
10940  * 	@adap: the adapter
10941  * 	@mbox: mailbox to use for the FW command
10942  * 	@cid: the context id
10943  * 	@ctype: the context type
10944  * 	@data: where to store the context data
10945  *
10946  * 	Issues a FW command through the given mailbox to read an SGE context.
10947  */
10948 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10949 		   enum ctxt_type ctype, u32 *data)
10950 {
10951 	int ret;
10952 	struct fw_ldst_cmd c;
10953 
10954 	if (ctype == CTXT_EGRESS)
10955 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
10956 	else if (ctype == CTXT_INGRESS)
10957 		ret = FW_LDST_ADDRSPC_SGE_INGC;
10958 	else if (ctype == CTXT_FLM)
10959 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
10960 	else
10961 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
10962 
10963 	memset(&c, 0, sizeof(c));
10964 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10965 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
10966 					V_FW_LDST_CMD_ADDRSPACE(ret));
10967 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10968 	c.u.idctxt.physid = cpu_to_be32(cid);
10969 
10970 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10971 	if (ret == 0) {
10972 		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10973 		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10974 		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10975 		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10976 		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10977 		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10978 	}
10979 	return ret;
10980 }
10981 
10982 /**
10983  * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10984  * 	@adap: the adapter
10985  * 	@cid: the context id
10986  * 	@ctype: the context type
10987  * 	@data: where to store the context data
10988  *
10989  * 	Reads an SGE context directly, bypassing FW.  This is only for
10990  * 	debugging when FW is unavailable.
10991  */
10992 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
10993 		      u32 *data)
10994 {
10995 	int i, ret;
10996 
10997 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
10998 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
10999 	if (!ret)
11000 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
11001 			*data++ = t4_read_reg(adap, i);
11002 	return ret;
11003 }
11004 
11005 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
11006 {
11007 	struct fw_sched_cmd cmd;
11008 
11009 	memset(&cmd, 0, sizeof(cmd));
11010 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11011 				      F_FW_CMD_REQUEST |
11012 				      F_FW_CMD_WRITE);
11013 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11014 
11015 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
11016 	cmd.u.config.type = type;
11017 	cmd.u.config.minmaxen = minmaxen;
11018 
11019 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11020 			       NULL, 1);
11021 }
11022 
11023 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
11024 		    int rateunit, int ratemode, int channel, int class,
11025 		    int minrate, int maxrate, int weight, int pktsize)
11026 {
11027 	struct fw_sched_cmd cmd;
11028 
11029 	memset(&cmd, 0, sizeof(cmd));
11030 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11031 				      F_FW_CMD_REQUEST |
11032 				      F_FW_CMD_WRITE);
11033 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11034 
11035 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11036 	cmd.u.params.type = type;
11037 	cmd.u.params.level = level;
11038 	cmd.u.params.mode = mode;
11039 	cmd.u.params.ch = channel;
11040 	cmd.u.params.cl = class;
11041 	cmd.u.params.unit = rateunit;
11042 	cmd.u.params.rate = ratemode;
11043 	cmd.u.params.min = cpu_to_be32(minrate);
11044 	cmd.u.params.max = cpu_to_be32(maxrate);
11045 	cmd.u.params.weight = cpu_to_be16(weight);
11046 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
11047 
11048 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11049 			       NULL, 1);
11050 }
11051 
11052 /*
11053  *	t4_config_watchdog - configure (enable/disable) a watchdog timer
11054  *	@adapter: the adapter
11055  * 	@mbox: mailbox to use for the FW command
11056  * 	@pf: the PF owning the queue
11057  * 	@vf: the VF owning the queue
11058  *	@timeout: watchdog timeout in ms
11059  *	@action: watchdog timer / action
11060  *
11061  *	There are separate watchdog timers for each possible watchdog
11062  *	action.  Configure one of the watchdog timers by setting a non-zero
11063  *	timeout.  Disable a watchdog timer by using a timeout of zero.
11064  */
11065 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
11066 		       unsigned int pf, unsigned int vf,
11067 		       unsigned int timeout, unsigned int action)
11068 {
11069 	struct fw_watchdog_cmd wdog;
11070 	unsigned int ticks;
11071 
11072 	/*
11073 	 * The watchdog command expects a timeout in units of 10ms so we need
11074 	 * to convert it here (via rounding) and force a minimum of one 10ms
11075 	 * "tick" if the timeout is non-zero but the convertion results in 0
11076 	 * ticks.
11077 	 */
11078 	ticks = (timeout + 5)/10;
11079 	if (timeout && !ticks)
11080 		ticks = 1;
11081 
11082 	memset(&wdog, 0, sizeof wdog);
11083 	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
11084 				     F_FW_CMD_REQUEST |
11085 				     F_FW_CMD_WRITE |
11086 				     V_FW_PARAMS_CMD_PFN(pf) |
11087 				     V_FW_PARAMS_CMD_VFN(vf));
11088 	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
11089 	wdog.timeout = cpu_to_be32(ticks);
11090 	wdog.action = cpu_to_be32(action);
11091 
11092 	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
11093 }
11094 
11095 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
11096 {
11097 	struct fw_devlog_cmd devlog_cmd;
11098 	int ret;
11099 
11100 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11101 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11102 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
11103 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11104 	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11105 			 sizeof(devlog_cmd), &devlog_cmd);
11106 	if (ret)
11107 		return ret;
11108 
11109 	*level = devlog_cmd.level;
11110 	return 0;
11111 }
11112 
11113 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
11114 {
11115 	struct fw_devlog_cmd devlog_cmd;
11116 
11117 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11118 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11119 					     F_FW_CMD_REQUEST |
11120 					     F_FW_CMD_WRITE);
11121 	devlog_cmd.level = level;
11122 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11123 	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11124 			  sizeof(devlog_cmd), &devlog_cmd);
11125 }
11126 
11127