xref: /illumos-gate/usr/src/uts/common/io/cxgbe/common/t4_hw.c (revision 3cdba02932a80ce23359d83defb057a1d5ddf6ba)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4/T5/T6 Ethernet driver.
14  *
15  * Copyright (C) 2003-2019 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 /*
24  * Copyright 2020 RackTop Systems, Inc.
25  */
26 
27 #include "common.h"
28 #include "t4_regs.h"
29 #include "t4_regs_values.h"
30 #include "t4fw_interface.h"
31 
32 static inline void
t4_os_lock(t4_os_lock_t * lock)33 t4_os_lock(t4_os_lock_t *lock)
34 {
35 	mutex_enter(lock);
36 }
37 
38 static inline void
t4_os_unlock(t4_os_lock_t * lock)39 t4_os_unlock(t4_os_lock_t *lock)
40 {
41 	mutex_exit(lock);
42 }
43 
44 static inline void
t4_os_pci_read_cfg1(struct adapter * sc,int reg,uint8_t * val)45 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
46 {
47 	*val = pci_config_get8(sc->pci_regh, reg);
48 }
49 
50 static inline void
t4_os_pci_write_cfg1(struct adapter * sc,int reg,uint8_t val)51 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
52 {
53 	pci_config_put8(sc->pci_regh, reg, val);
54 }
55 
56 static inline void
t4_os_pci_read_cfg2(struct adapter * sc,int reg,uint16_t * val)57 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
58 {
59 	*val = pci_config_get16(sc->pci_regh, reg);
60 }
61 
62 static inline void
t4_os_pci_write_cfg2(struct adapter * sc,int reg,uint16_t val)63 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
64 {
65 	pci_config_put16(sc->pci_regh, reg, val);
66 }
67 
68 static inline void
t4_os_pci_read_cfg4(struct adapter * sc,int reg,uint32_t * val)69 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
70 {
71 	*val = pci_config_get32(sc->pci_regh, reg);
72 }
73 
74 static inline void
t4_os_pci_write_cfg4(struct adapter * sc,int reg,uint32_t val)75 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
76 {
77 	pci_config_put32(sc->pci_regh, reg, val);
78 }
79 
80 static inline void *
t4_os_alloc(size_t size)81 t4_os_alloc(size_t size)
82 {
83 	return (kmem_alloc(size, KM_SLEEP));
84 }
85 
86 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
87 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
88 
89 /*
90  * t4_os_pci_read_seeprom - read four bytes of SEEPROM/VPD contents
91  * @adapter: the adapter
92  * @addr: SEEPROM/VPD Address to read
93  * @valp: where to store the value read
94  *
95  * Read a 32-bit value from the given address in the SEEPROM/VPD.  The address
96  * must be four-byte aligned.  Returns 0 on success, a negative error number
97  * on failure.
98  */
t4_os_pci_read_seeprom(adapter_t * adapter,int addr,u32 * valp)99 static inline int t4_os_pci_read_seeprom(adapter_t *adapter, int addr,
100     u32 *valp)
101 {
102 	const int ret = t4_seeprom_read(adapter, addr, valp);
103 	return (ret >= 0 ? 0 : ret);
104 }
105 
106 /*
107  * t4_os_pci_write_seeprom - write four bytes of SEEPROM/VPD contents
108  * @adapter: the adapter
109  * @addr: SEEPROM/VPD Address to write
110  * @val: the value write
111  *
112  * Write a 32-bit value to the given address in the SEEPROM/VPD.  The address
113  * must be four-byte aligned.  Returns 0 on success, a negative error number
114  * on failure.
115  */
t4_os_pci_write_seeprom(adapter_t * adapter,int addr,u32 val)116 static inline int t4_os_pci_write_seeprom(adapter_t *adapter, int addr, u32 val)
117 {
118 	const int ret = t4_seeprom_write(adapter, addr, val);
119 	return (ret >= 0 ? 0 : ret);
120 }
121 
122 
t4_os_pci_set_vpd_size(struct adapter * adapter,size_t len)123 static inline int t4_os_pci_set_vpd_size(struct adapter *adapter, size_t len)
124 {
125 	/* Presently unused on illumos. */
126 	return (0);
127 }
128 
129 
130 /**
131  *	t4_wait_op_done_val - wait until an operation is completed
132  *	@adapter: the adapter performing the operation
133  *	@reg: the register to check for completion
134  *	@mask: a single-bit field within @reg that indicates completion
135  *	@polarity: the value of the field when the operation is completed
136  *	@attempts: number of check iterations
137  *	@delay: delay in usecs between iterations
138  *	@valp: where to store the value of the register at completion time
139  *
140  *	Wait until an operation is completed by checking a bit in a register
141  *	up to @attempts times.  If @valp is not NULL the value of the register
142  *	at the time it indicated completion is stored there.  Returns 0 if the
143  *	operation completes and	-EAGAIN	otherwise.
144  */
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)145 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
146 			       int polarity, int attempts, int delay, u32 *valp)
147 {
148 	while (1) {
149 		u32 val = t4_read_reg(adapter, reg);
150 
151 		if (!!(val & mask) == polarity) {
152 			if (valp)
153 				*valp = val;
154 			return 0;
155 		}
156 		if (--attempts == 0)
157 			return -EAGAIN;
158 		if (delay)
159 			udelay(delay);
160 	}
161 }
162 
t4_wait_op_done(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay)163 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
164 				  int polarity, int attempts, int delay)
165 {
166 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
167 				   delay, NULL);
168 }
169 
170 /**
171  *	t4_set_reg_field - set a register field to a value
172  *	@adapter: the adapter to program
173  *	@addr: the register address
174  *	@mask: specifies the portion of the register to modify
175  *	@val: the new value for the register field
176  *
177  *	Sets a register field specified by the supplied mask to the
178  *	given value.
179  */
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)180 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
181 		      u32 val)
182 {
183 	u32 v = t4_read_reg(adapter, addr) & ~mask;
184 
185 	t4_write_reg(adapter, addr, v | val);
186 	(void) t4_read_reg(adapter, addr);      /* flush */
187 }
188 
189 /**
190  *	t4_read_indirect - read indirectly addressed registers
191  *	@adap: the adapter
192  *	@addr_reg: register holding the indirect address
193  *	@data_reg: register holding the value of the indirect register
194  *	@vals: where the read register values are stored
195  *	@nregs: how many indirect registers to read
196  *	@start_idx: index of first indirect register to read
197  *
198  *	Reads registers that are accessed indirectly through an address/data
199  *	register pair.
200  */
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)201 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
202 			     unsigned int data_reg, u32 *vals,
203 			     unsigned int nregs, unsigned int start_idx)
204 {
205 	while (nregs--) {
206 		t4_write_reg(adap, addr_reg, start_idx);
207 		*vals++ = t4_read_reg(adap, data_reg);
208 		start_idx++;
209 	}
210 }
211 
212 /**
213  *	t4_write_indirect - write indirectly addressed registers
214  *	@adap: the adapter
215  *	@addr_reg: register holding the indirect addresses
216  *	@data_reg: register holding the value for the indirect registers
217  *	@vals: values to write
218  *	@nregs: how many indirect registers to write
219  *	@start_idx: address of first indirect register to write
220  *
221  *	Writes a sequential block of registers that are accessed indirectly
222  *	through an address/data register pair.
223  */
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)224 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
225 		       unsigned int data_reg, const u32 *vals,
226 		       unsigned int nregs, unsigned int start_idx)
227 {
228 	while (nregs--) {
229 		t4_write_reg(adap, addr_reg, start_idx++);
230 		t4_write_reg(adap, data_reg, *vals++);
231 	}
232 }
233 
234 /*
235  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
236  * mechanism.  This guarantees that we get the real value even if we're
237  * operating within a Virtual Machine and the Hypervisor is trapping our
238  * Configuration Space accesses.
239  *
240  * N.B. This routine should only be used as a last resort: the firmware uses
241  *      the backdoor registers on a regular basis and we can end up
242  *      conflicting with it's uses!
243  */
t4_hw_pci_read_cfg4(struct adapter * adap,int reg,u32 * val)244 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
245 {
246 	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
247 
248 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
249 		req |= F_ENABLE;
250 	else
251 		req |= F_T6_ENABLE;
252 
253 	if (is_t4(adap->params.chip))
254 		req |= F_LOCALCFG;
255 
256 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
257 	*val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
258 
259 	/* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
260 	 * Configuration Space read.  (None of the other fields matter when
261 	 * F_ENABLE is 0 so a simple register write is easier than a
262 	 * read-modify-write via t4_set_reg_field().)
263 	 */
264 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
265 }
266 
267 /*
268  * t4_report_fw_error - report firmware error
269  * @adap: the adapter
270  *
271  * The adapter firmware can indicate error conditions to the host.
272  * If the firmware has indicated an error, print out the reason for
273  * the firmware error.
274  */
t4_report_fw_error(struct adapter * adap)275 static void t4_report_fw_error(struct adapter *adap)
276 {
277 	static const char *const reason[] = {
278 		"Crash",			/* PCIE_FW_EVAL_CRASH */
279 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
280 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
281 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
282 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
283 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
284 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
285 		"Reserved",			/* reserved */
286 	};
287 	u32 pcie_fw;
288 
289 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
290 	if (pcie_fw & F_PCIE_FW_ERR) {
291 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
292 			reason[G_PCIE_FW_EVAL(pcie_fw)]);
293 		adap->flags &= ~FW_OK;
294 	}
295 }
296 
297 /*
298  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
299  */
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)300 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
301 			 u32 mbox_addr)
302 {
303 	for ( ; nflit; nflit--, mbox_addr += 8)
304 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
305 }
306 
307 /*
308  * Handle a FW assertion reported in a mailbox.
309  */
fw_asrt(struct adapter * adap,struct fw_debug_cmd * asrt)310 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
311 {
312 	CH_ALERT(adap,
313 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
314 		  asrt->u.assert.filename_0_7,
315 		  be32_to_cpu(asrt->u.assert.line),
316 		  be32_to_cpu(asrt->u.assert.x),
317 		  be32_to_cpu(asrt->u.assert.y));
318 }
319 
320 #define X_CIM_PF_NOACCESS 0xeeeeeeee
321 
322 /*
323  * If the OS Driver wants busy waits to keep a watchdog happy, tap it during
324  * busy loops which don't sleep.
325  */
326 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG
327 #define T4_OS_TOUCH_NMI_WATCHDOG()	t4_os_touch_nmi_watchdog()
328 #else
329 #define T4_OS_TOUCH_NMI_WATCHDOG()
330 #endif
331 
332 #ifdef T4_OS_LOG_MBOX_CMDS
333 /**
334  *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
335  *	@adapter: the adapter
336  *	@cmd: the Firmware Mailbox Command or Reply
337  *	@size: command length in bytes
338  *	@access: the time (ms) needed to access the Firmware Mailbox
339  *	@execute: the time (ms) the command spent being executed
340  */
t4_record_mbox(struct adapter * adapter,const __be64 * cmd,unsigned int size,int access,int execute)341 static void t4_record_mbox(struct adapter *adapter,
342 			   const __be64 *cmd, unsigned int size,
343 			   int access, int execute)
344 {
345 	struct mbox_cmd_log *log = adapter->mbox_log;
346 	struct mbox_cmd *entry;
347 	int i;
348 
349 	entry = mbox_cmd_log_entry(log, log->cursor++);
350 	if (log->cursor == log->size)
351 		log->cursor = 0;
352 
353 	for (i = 0; i < size/8; i++)
354 		entry->cmd[i] = be64_to_cpu(cmd[i]);
355 	while (i < MBOX_LEN/8)
356 		entry->cmd[i++] = 0;
357 	entry->timestamp = t4_os_timestamp();
358 	entry->seqno = log->seqno++;
359 	entry->access = access;
360 	entry->execute = execute;
361 }
362 
363 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
364 	t4_record_mbox(__adapter, __cmd, __size, __access, __execute)
365 
366 #else /* !T4_OS_LOG_MBOX_CMDS */
367 
368 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
369 	/* nothing */
370 
371 #endif /* !T4_OS_LOG_MBOX_CMDS */
372 
373 /**
374  *	t4_record_mbox_marker - record a marker in the mailbox log
375  *	@adapter: the adapter
376  *	@marker: byte array marker
377  *	@size: marker size in bytes
378  *
379  *	We inject a "fake mailbox command" into the Firmware Mailbox Log
380  *	using a known command token and then the bytes of the specified
381  *	marker.  This lets debugging code inject markers into the log to
382  *	help identify which commands are in response to higher level code.
383  */
t4_record_mbox_marker(struct adapter * adapter,const void * marker,unsigned int size)384 void t4_record_mbox_marker(struct adapter *adapter,
385 			   const void *marker, unsigned int size)
386 {
387 #ifdef T4_OS_LOG_MBOX_CMDS
388 	__be64 marker_cmd[MBOX_LEN/8];
389 	const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64);
390 	unsigned int marker_cmd_size;
391 
392 	if (size > max_marker)
393 		size = max_marker;
394 
395 	marker_cmd[0] = cpu_to_be64(~0LLU);
396 	memcpy(&marker_cmd[1], marker, size);
397 	memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size);
398 	marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64));
399 
400 	t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0);
401 #endif /* T4_OS_LOG_MBOX_CMDS */
402 }
403 
404 /*
405  * Delay time in microseconds to wait for mailbox access/fw reply
406  * to mailbox command
407  */
408 #define MIN_MBOX_CMD_DELAY 900
409 #define MBOX_CMD_DELAY 1000
410 
411 /**
412  *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
413  *	@adap: the adapter
414  *	@mbox: index of the mailbox to use
415  *	@cmd: the command to write
416  *	@size: command length in bytes
417  *	@rpl: where to optionally store the reply
418  *	@sleep_ok: if true we may sleep while awaiting command completion
419  *	@timeout: time to wait for command to finish before timing out
420  *		(negative implies @sleep_ok=false)
421  *
422  *	Sends the given command to FW through the selected mailbox and waits
423  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
424  *	store the FW's reply to the command.  The command and its optional
425  *	reply are of the same length.  Some FW commands like RESET and
426  *	INITIALIZE can take a considerable amount of time to execute.
427  *	@sleep_ok determines whether we may sleep while awaiting the response.
428  *	If sleeping is allowed we use progressive backoff otherwise we spin.
429  *	Note that passing in a negative @timeout is an alternate mechanism
430  *	for specifying @sleep_ok=false.  This is useful when a higher level
431  *	interface allows for specification of @timeout but not @sleep_ok ...
432  *
433  *	The return value is 0 on success or a negative errno on failure.  A
434  *	failure can happen either because we are not able to execute the
435  *	command or FW executes it but signals an error.  In the latter case
436  *	the return value is the error code indicated by FW (negated).
437  */
t4_wr_mbox_meat_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok,int timeout)438 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
439 			    int size, void *rpl, bool sleep_ok, int timeout)
440 {
441 #ifdef T4_OS_LOG_MBOX_CMDS
442 	u16 access = 0;
443 #endif /* T4_OS_LOG_MBOX_CMDS */
444 	u32 v;
445 	u64 res;
446 	int i, ret;
447 	const __be64 *p = cmd;
448 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
449 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
450 	u32 ctl;
451 	__be64 cmd_rpl[MBOX_LEN/8];
452 	struct t4_mbox_list entry;
453 	u32 pcie_fw;
454 
455 	if ((size & 15) || size > MBOX_LEN)
456 		return -EINVAL;
457 
458 	/*
459 	 * If we have a negative timeout, that implies that we can't sleep.
460 	 */
461 	if (timeout < 0) {
462 		sleep_ok = false;
463 		timeout = -timeout;
464 	}
465 
466 	/*
467 	 * Queue ourselves onto the mailbox access list.  When our entry is at
468 	 * the front of the list, we have rights to access the mailbox.  So we
469 	 * wait [for a while] till we're at the front [or bail out with an
470 	 * EBUSY] ...
471 	 */
472 	t4_mbox_list_add(adap, &entry);
473 
474 	for (i = 0; ; i++) {
475 		/*
476 		 * If we've waited too long, return a busy indication.  This
477 		 * really ought to be based on our initial position in the
478 		 * mailbox access list but this is a start.  We very rarely
479 		 * contend on access to the mailbox ...  Also check for a
480 		 * firmware error which we'll report as a device error.
481 		 */
482 		pcie_fw = t4_read_reg(adap, A_PCIE_FW);
483 		if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) {
484 			t4_mbox_list_del(adap, &entry);
485 			t4_report_fw_error(adap);
486 			ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
487 			T4_RECORD_MBOX(adap, cmd, size, ret, 0);
488 			return ret;
489 		}
490 
491 		/*
492 		 * If we're at the head, break out and start the mailbox
493 		 * protocol.
494 		 */
495 		if (t4_mbox_list_first_entry(adap) == &entry)
496 			break;
497 
498 		/*
499 		 * Delay for a bit before checking again ...
500 		 */
501 		if (sleep_ok) {
502 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
503 		} else {
504 			T4_OS_TOUCH_NMI_WATCHDOG();
505 			udelay(MBOX_CMD_DELAY);
506 		}
507 	}
508 #ifdef T4_OS_LOG_MBOX_CMDS
509 	access = i;
510 #endif /* T4_OS_LOG_MBOX_CMDS */
511 
512 	/*
513 	 * Attempt to gain access to the mailbox.
514 	 */
515 	for (i = 0; i < 4; i++) {
516 		ctl = t4_read_reg(adap, ctl_reg);
517 		v = G_MBOWNER(ctl);
518 		if (v != X_MBOWNER_NONE)
519 			break;
520 	}
521 
522 	/*
523 	 * If we were unable to gain access, dequeue ourselves from the
524 	 * mailbox atomic access list and report the error to our caller.
525 	 */
526 	if (v != X_MBOWNER_PL) {
527 		t4_mbox_list_del(adap, &entry);
528 		t4_report_fw_error(adap);
529 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
530 		T4_RECORD_MBOX(adap, cmd, size, access, ret);
531 		return ret;
532 	}
533 
534 	/*
535 	 * If we gain ownership of the mailbox and there's a "valid" message
536 	 * in it, this is likely an asynchronous error message from the
537 	 * firmware.  So we'll report that and then proceed on with attempting
538 	 * to issue our own command ... which may well fail if the error
539 	 * presaged the firmware crashing ...
540 	 */
541 	if (ctl & F_MBMSGVALID) {
542 		CH_ERR(adap, "found VALID command in mbox %u: "
543 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
544 		       (unsigned long long)t4_read_reg64(adap, data_reg),
545 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
546 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
547 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
548 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
549 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
550 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
551 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
552 	}
553 
554 	/*
555 	 * Copy in the new mailbox command and send it on its way ...
556 	 */
557 	T4_RECORD_MBOX(adap, cmd, size, access, 0);
558 	for (i = 0; i < size; i += 8, p++)
559 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
560 
561 	/*
562 	 * XXX It's not clear that we need this anymore now
563 	 * XXX that we have mailbox logging ...
564 	 */
565 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
566 
567 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
568 	(void) t4_read_reg(adap, ctl_reg);	/* flush write */
569 
570 	/*
571 	 * Loop waiting for the reply; bail out if we time out or the firmware
572 	 * reports an error.
573 	 */
574 	for (i = 0;
575 	     !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
576 	     i < timeout;
577 	     i++) {
578 		if (sleep_ok) {
579 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
580 		} else {
581 			T4_OS_TOUCH_NMI_WATCHDOG();
582 			udelay(MBOX_CMD_DELAY);
583 		}
584 
585 		v = t4_read_reg(adap, ctl_reg);
586 		if (v == X_CIM_PF_NOACCESS)
587 			continue;
588 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
589 			if (!(v & F_MBMSGVALID)) {
590 				t4_write_reg(adap, ctl_reg,
591 					     V_MBOWNER(X_MBOWNER_NONE));
592 				continue;
593 			}
594 
595 			/*
596 			 * Retrieve the command reply and release the mailbox.
597 			 */
598 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
599 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
600 			t4_mbox_list_del(adap, &entry);
601 
602 			T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1);
603 
604 			/*
605 			 * XXX It's not clear that we need this anymore now
606 			 * XXX that we have mailbox logging ...
607 			 */
608 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
609 			CH_MSG(adap, INFO, HW,
610 			       "command completed in %d ms (%ssleeping)\n",
611 			       i + 1, sleep_ok ? "" : "non-");
612 
613 			res = be64_to_cpu(cmd_rpl[0]);
614 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
615 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
616 				res = V_FW_CMD_RETVAL(EIO);
617 			} else if (rpl)
618 				memcpy(rpl, cmd_rpl, size);
619 			return -G_FW_CMD_RETVAL((int)res);
620 		}
621 	}
622 
623 	/*
624 	 * We timed out waiting for a reply to our mailbox command.  Report
625 	 * the error and also check to see if the firmware reported any
626 	 * errors ...
627 	 */
628 	t4_mbox_list_del(adap, &entry);
629 
630 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
631 	T4_RECORD_MBOX(adap, cmd, size, access, ret);
632 	CH_ERR(adap, "command 0x%x in mailbox %d timed out\n",
633 	       *(const u8 *)cmd, mbox);
634 
635 	t4_report_fw_error(adap);
636 	t4_fatal_err(adap);
637 	return ret;
638 }
639 
640 #ifdef CONFIG_CUDBG
641 /*
642  * The maximum number of times to iterate for FW reply before
643  * issuing a mailbox timeout
644  */
645 #define FW_REPLY_WAIT_LOOP 6000000
646 
647 /**
648  *	t4_wr_mbox_meat_timeout_panic - send a command to FW through the given
649  *	mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout()
650  *	and is only invoked during a kernel crash. Since this function is
651  *	called through a atomic notifier chain ,we cannot sleep awaiting a
652  *	response from FW, hence repeatedly loop until we get a reply.
653  *
654  *	@adap: the adapter
655  *	@mbox: index of the mailbox to use
656  *	@cmd: the command to write
657  *	@size: command length in bytes
658  *	@rpl: where to optionally store the reply
659  */
660 
t4_wr_mbox_meat_timeout_panic(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl)661 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox,
662 			    const void *cmd, int size, void *rpl)
663 {
664 	u32 v;
665 	u64 res;
666 	int i, ret;
667 	u64 cnt;
668 	const __be64 *p = cmd;
669 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
670 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
671 	u32 ctl;
672 	__be64 cmd_rpl[MBOX_LEN/8];
673 	u32 pcie_fw;
674 
675 	if ((size & 15) || size > MBOX_LEN)
676 		return -EINVAL;
677 
678 	/*
679 	 * Check for a firmware error which we'll report as a
680 	 * device error.
681 	 */
682 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
683 	if (pcie_fw & F_PCIE_FW_ERR) {
684 		t4_report_fw_error(adap);
685 		ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
686 		return ret;
687 	}
688 
689 	/*
690 	 * Attempt to gain access to the mailbox.
691 	 */
692 	for (i = 0; i < 4; i++) {
693 		ctl = t4_read_reg(adap, ctl_reg);
694 		v = G_MBOWNER(ctl);
695 		if (v != X_MBOWNER_NONE)
696 			break;
697 	}
698 
699 	/*
700 	 * If we were unable to gain access, report the error to our caller.
701 	 */
702 	if (v != X_MBOWNER_PL) {
703 		t4_report_fw_error(adap);
704 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
705 		return ret;
706 	}
707 
708 	/*
709 	 * If we gain ownership of the mailbox and there's a "valid" message
710 	 * in it, this is likely an asynchronous error message from the
711 	 * firmware.  So we'll report that and then proceed on with attempting
712 	 * to issue our own command ... which may well fail if the error
713 	 * presaged the firmware crashing ...
714 	 */
715 	if (ctl & F_MBMSGVALID) {
716 		CH_ERR(adap, "found VALID command in mbox %u: "
717 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
718 		       (unsigned long long)t4_read_reg64(adap, data_reg),
719 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
720 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
721 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
722 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
723 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
724 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
725 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
726 	}
727 
728 	/*
729 	 * Copy in the new mailbox command and send it on its way ...
730 	 */
731 	for (i = 0; i < size; i += 8, p++)
732 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
733 
734 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
735 
736 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
737 	t4_read_reg(adap, ctl_reg);	/* flush write */
738 
739 	/*
740 	 * Loop waiting for the reply; bail out if we time out or the firmware
741 	 * reports an error.
742 	 */
743 	for (cnt = 0;
744 	    !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
745 	    cnt < FW_REPLY_WAIT_LOOP;
746 	    cnt++) {
747 		v = t4_read_reg(adap, ctl_reg);
748 		if (v == X_CIM_PF_NOACCESS)
749 			continue;
750 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
751 			if (!(v & F_MBMSGVALID)) {
752 				t4_write_reg(adap, ctl_reg,
753 					     V_MBOWNER(X_MBOWNER_NONE));
754 				continue;
755 			}
756 
757 			/*
758 			 * Retrieve the command reply and release the mailbox.
759 			 */
760 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
761 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
762 
763 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
764 
765 			res = be64_to_cpu(cmd_rpl[0]);
766 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
767 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
768 				res = V_FW_CMD_RETVAL(EIO);
769 			} else if (rpl)
770 				memcpy(rpl, cmd_rpl, size);
771 			return -G_FW_CMD_RETVAL((int)res);
772 		}
773 	}
774 
775 	/*
776 	 * We timed out waiting for a reply to our mailbox command.  Report
777 	 * the error and also check to see if the firmware reported any
778 	 * errors ...
779 	 */
780 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
781 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
782 	       *(const u8 *)cmd, mbox);
783 
784 	t4_report_fw_error(adap);
785 	t4_fatal_err(adap);
786 	return ret;
787 }
788 #endif
789 
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)790 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
791 		    void *rpl, bool sleep_ok)
792 {
793 #ifdef CONFIG_CUDBG
794 	if (adap->flags & K_CRASH)
795 		return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size,
796 						     rpl);
797 	else
798 #endif
799 		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
800 					       sleep_ok, FW_CMD_MAX_TIMEOUT);
801 
802 }
803 
t4_edc_err_read(struct adapter * adap,int idx)804 static int t4_edc_err_read(struct adapter *adap, int idx)
805 {
806 	u32 edc_ecc_err_addr_reg;
807 	u32 edc_bist_status_rdata_reg;
808 
809 	if (is_t4(adap->params.chip)) {
810 		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
811 		return 0;
812 	}
813 	if (idx != MEM_EDC0 && idx != MEM_EDC1) {
814 		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
815 		return 0;
816 	}
817 
818 	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
819 	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
820 
821 	CH_WARN(adap,
822 		"edc%d err addr 0x%x: 0x%x.\n",
823 		idx, edc_ecc_err_addr_reg,
824 		t4_read_reg(adap, edc_ecc_err_addr_reg));
825 	CH_WARN(adap,
826 	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
827 		edc_bist_status_rdata_reg,
828 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
829 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
830 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
831 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
832 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
833 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
834 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
835 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
836 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
837 
838 	return 0;
839 }
840 
841 /**
842  *	t4_memory_rw_addr - read/write adapter memory via PCIE memory window
843  *	@adap: the adapter
844  *	@win: PCI-E Memory Window to use
845  *	@addr: address within adapter memory
846  *	@len: amount of memory to transfer
847  *	@hbuf: host memory buffer
848  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
849  *
850  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
851  *	firmware memory address and host buffer must be aligned on 32-bit
852  *	boudaries; the length may be arbitrary.
853  *
854  *	NOTES:
855  *	 1. The memory is transferred as a raw byte sequence from/to the
856  *	    firmware's memory.  If this memory contains data structures which
857  *	    contain multi-byte integers, it's the caller's responsibility to
858  *	    perform appropriate byte order conversions.
859  *
860  *	 2. It is the Caller's responsibility to ensure that no other code
861  *	    uses the specified PCI-E Memory Window while this routine is
862  *	    using it.  This is typically done via the use of OS-specific
863  *	    locks, etc.
864  */
t4_memory_rw_addr(struct adapter * adap,int win,u32 addr,u32 len,void * hbuf,int dir)865 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
866 		      u32 len, void *hbuf, int dir)
867 {
868 	u32 pos, offset, resid;
869 	u32 win_pf, mem_reg, mem_aperture, mem_base;
870 	u32 *buf;
871 
872 	/* Argument sanity checks ...
873 	 */
874 	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
875 		return -EINVAL;
876 	buf = (u32 *)hbuf;
877 
878 	/* It's convenient to be able to handle lengths which aren't a
879 	 * multiple of 32-bits because we often end up transferring files to
880 	 * the firmware.  So we'll handle that by normalizing the length here
881 	 * and then handling any residual transfer at the end.
882 	 */
883 	resid = len & 0x3;
884 	len -= resid;
885 
886 	/* Each PCI-E Memory Window is programmed with a window size -- or
887 	 * "aperture" -- which controls the granularity of its mapping onto
888 	 * adapter memory.  We need to grab that aperture in order to know
889 	 * how to use the specified window.  The window is also programmed
890 	 * with the base address of the Memory Window in BAR0's address
891 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
892 	 * the address is relative to BAR0.
893 	 */
894 	mem_reg = t4_read_reg(adap,
895 			      PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
896 						  win));
897 
898 	/* a dead adapter will return 0xffffffff for PIO reads */
899 	if (mem_reg == 0xffffffff) {
900 		CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n",
901 			win);
902 		return -ENXIO;
903 	}
904 
905 	mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
906 	mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
907 	if (is_t4(adap->params.chip))
908 		mem_base -= adap->t4_bar0;
909 	win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
910 
911 	/* Calculate our initial PCI-E Memory Window Position and Offset into
912 	 * that Window.
913 	 */
914 	pos = addr & ~(mem_aperture-1);
915 	offset = addr - pos;
916 
917 	/* Set up initial PCI-E Memory Window to cover the start of our
918 	 * transfer.  (Read it back to ensure that changes propagate before we
919 	 * attempt to use the new value.)
920 	 */
921 	t4_write_reg(adap,
922 		     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
923 		     pos | win_pf);
924 	t4_read_reg(adap,
925 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
926 
927 	/* Transfer data to/from the adapter as long as there's an integral
928 	 * number of 32-bit transfers to complete.
929 	 *
930 	 * A note on Endianness issues:
931 	 *
932 	 * The "register" reads and writes below from/to the PCI-E Memory
933 	 * Window invoke the standard adapter Big-Endian to PCI-E Link
934 	 * Little-Endian "swizzel."  As a result, if we have the following
935 	 * data in adapter memory:
936 	 *
937 	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
938 	 *     Address:      i+0  i+1  i+2  i+3
939 	 *
940 	 * Then a read of the adapter memory via the PCI-E Memory Window
941 	 * will yield:
942 	 *
943 	 *     x = readl(i)
944 	 *	   31                  0
945 	 *         [ b3 | b2 | b1 | b0 ]
946 	 *
947 	 * If this value is stored into local memory on a Little-Endian system
948 	 * it will show up correctly in local memory as:
949 	 *
950 	 *     ( ..., b0, b1, b2, b3, ... )
951 	 *
952 	 * But on a Big-Endian system, the store will show up in memory
953 	 * incorrectly swizzled as:
954 	 *
955 	 *     ( ..., b3, b2, b1, b0, ... )
956 	 *
957 	 * So we need to account for this in the reads and writes to the
958 	 * PCI-E Memory Window below by undoing the register read/write
959 	 * swizzels.
960 	 */
961 	while (len > 0) {
962 		if (dir == T4_MEMORY_READ)
963 			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
964 						mem_base + offset));
965 		else
966 			t4_write_reg(adap, mem_base + offset,
967 				     (__force u32)cpu_to_le32(*buf++));
968 		offset += sizeof(__be32);
969 		len -= sizeof(__be32);
970 
971 		/* If we've reached the end of our current window aperture,
972 		 * move the PCI-E Memory Window on to the next.  Note that
973 		 * doing this here after "len" may be 0 allows us to set up
974 		 * the PCI-E Memory Window for a possible final residual
975 		 * transfer below ...
976 		 */
977 		if (offset == mem_aperture) {
978 			pos += mem_aperture;
979 			offset = 0;
980 			t4_write_reg(adap,
981 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
982 						    win), pos | win_pf);
983 			t4_read_reg(adap,
984 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
985 						    win));
986 		}
987 	}
988 
989 	/* If the original transfer had a length which wasn't a multiple of
990 	 * 32-bits, now's where we need to finish off the transfer of the
991 	 * residual amount.  The PCI-E Memory Window has already been moved
992 	 * above (if necessary) to cover this final transfer.
993 	 */
994 	if (resid) {
995 		union {
996 			u32 word;
997 			char byte[4];
998 		} last;
999 		unsigned char *bp;
1000 		int i;
1001 
1002 		if (dir == T4_MEMORY_READ) {
1003 			last.word = le32_to_cpu(
1004 					(__force __le32)t4_read_reg(adap,
1005 						mem_base + offset));
1006 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
1007 				bp[i] = last.byte[i];
1008 		} else {
1009 			last.word = *buf;
1010 			for (i = resid; i < 4; i++)
1011 				last.byte[i] = 0;
1012 			t4_write_reg(adap, mem_base + offset,
1013 				     (__force u32)cpu_to_le32(last.word));
1014 		}
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 /**
1021  *	t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window
1022  *	@adap: the adapter
1023  *	@win: PCI-E Memory Window to use
1024  *	@mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
1025  *	@maddr: address within indicated memory type
1026  *	@len: amount of memory to transfer
1027  *	@hbuf: host memory buffer
1028  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
1029  *
1030  *	Reads/writes adapter memory using t4_memory_rw_addr().  This routine
1031  *	provides an (memory type, address withing memory type) interface.
1032  */
t4_memory_rw_mtype(struct adapter * adap,int win,int mtype,u32 maddr,u32 len,void * hbuf,int dir)1033 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
1034 		       u32 len, void *hbuf, int dir)
1035 {
1036 	u32 mtype_offset;
1037 	u32 edc_size, mc_size;
1038 
1039 	/* Offset into the region of memory which is being accessed
1040 	 * MEM_EDC0 = 0
1041 	 * MEM_EDC1 = 1
1042 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
1043 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
1044 	 * MEM_HMA  = 4
1045 	 */
1046 	edc_size  = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
1047 	if (mtype == MEM_HMA) {
1048 		mtype_offset = 2 * (edc_size * 1024 * 1024);
1049 	} else if (mtype != MEM_MC1)
1050 		mtype_offset = (mtype * (edc_size * 1024 * 1024));
1051 	else {
1052 		mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
1053 						      A_MA_EXT_MEMORY0_BAR));
1054 		mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
1055 	}
1056 
1057 	return t4_memory_rw_addr(adap, win,
1058 				 mtype_offset + maddr, len,
1059 				 hbuf, dir);
1060 }
1061 
1062 /*
1063  * Return the specified PCI-E Configuration Space register from our Physical
1064  * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
1065  * since we prefer to let the firmware own all of these registers, but if that
1066  * fails we go for it directly ourselves.
1067  */
t4_read_pcie_cfg4(struct adapter * adap,int reg,int drv_fw_attach)1068 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
1069 {
1070 	u32 val;
1071 
1072 	/*
1073 	 * If fw_attach != 0, construct and send the Firmware LDST Command to
1074 	 * retrieve the specified PCI-E Configuration Space register.
1075 	 */
1076 	if (drv_fw_attach != 0) {
1077 		struct fw_ldst_cmd ldst_cmd;
1078 		int ret;
1079 
1080 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1081 		ldst_cmd.op_to_addrspace =
1082 			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
1083 				    F_FW_CMD_REQUEST |
1084 				    F_FW_CMD_READ |
1085 				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
1086 		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
1087 		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
1088 		ldst_cmd.u.pcie.ctrl_to_fn =
1089 			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
1090 		ldst_cmd.u.pcie.r = reg;
1091 
1092 		/*
1093 		 * If the LDST Command succeeds, return the result, otherwise
1094 		 * fall through to reading it directly ourselves ...
1095 		 */
1096 		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1097 				 &ldst_cmd);
1098 		if (ret == 0)
1099 			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
1100 
1101 		CH_WARN(adap, "Firmware failed to return "
1102 			"Configuration Space register %d, err = %d\n",
1103 			reg, -ret);
1104 	}
1105 
1106 	/*
1107 	 * Read the desired Configuration Space register via the PCI-E
1108 	 * Backdoor mechanism.
1109 	 */
1110 	t4_hw_pci_read_cfg4(adap, reg, &val);
1111 	return val;
1112 }
1113 
1114 /*
1115  * Get the window based on base passed to it.
1116  * Window aperture is currently unhandled, but there is no use case for it
1117  * right now
1118  */
t4_get_window(struct adapter * adap,u64 pci_base,u64 pci_mask,u64 memwin_base,int drv_fw_attach)1119 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach)
1120 {
1121 	if (is_t4(adap->params.chip)) {
1122 		u32 bar0;
1123 
1124 		/*
1125 		 * Truncation intentional: we only read the bottom 32-bits of
1126 		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
1127 		 * mechanism to read BAR0 instead of using
1128 		 * pci_resource_start() because we could be operating from
1129 		 * within a Virtual Machine which is trapping our accesses to
1130 		 * our Configuration Space and we need to set up the PCI-E
1131 		 * Memory Window decoders with the actual addresses which will
1132 		 * be coming across the PCI-E link.
1133 		 */
1134 		bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach);
1135 		bar0 &= pci_mask;
1136 		adap->t4_bar0 = bar0;
1137 
1138 		return bar0 + memwin_base;
1139 	} else {
1140 		/* For T5, only relative offset inside the PCIe BAR is passed */
1141 		return memwin_base;
1142 	}
1143 }
1144 
1145 /* Get the default utility window (win0) used by everyone */
t4_get_util_window(struct adapter * adap,int drv_fw_attach)1146 int t4_get_util_window(struct adapter *adap, int drv_fw_attach)
1147 {
1148 	return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach);
1149 }
1150 
1151 /*
1152  * Set up memory window for accessing adapter memory ranges.  (Read
1153  * back MA register to ensure that changes propagate before we attempt
1154  * to use the new values.)
1155  */
t4_setup_memwin(struct adapter * adap,u32 memwin_base,u32 window)1156 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
1157 {
1158 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window),
1159 		     memwin_base | V_BIR(0) |
1160 		     V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
1161 	t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window));
1162 }
1163 
1164 /**
1165  *	t4_get_regs_len - return the size of the chips register set
1166  *	@adapter: the adapter
1167  *
1168  *	Returns the size of the chip's BAR0 register space.
1169  */
t4_get_regs_len(struct adapter * adapter)1170 unsigned int t4_get_regs_len(struct adapter *adapter)
1171 {
1172 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
1173 
1174 	switch (chip_version) {
1175 	case CHELSIO_T4:
1176 		return T4_REGMAP_SIZE;
1177 
1178 	case CHELSIO_T5:
1179 	case CHELSIO_T6:
1180 		return T5_REGMAP_SIZE;
1181 	}
1182 
1183 	CH_ERR(adapter,
1184 		"Unsupported chip version %d\n", chip_version);
1185 	return 0;
1186 }
1187 
1188 /**
1189  *	t4_get_regs - read chip registers into provided buffer
1190  *	@adap: the adapter
1191  *	@buf: register buffer
1192  *	@buf_size: size (in bytes) of register buffer
1193  *
1194  *	If the provided register buffer isn't large enough for the chip's
1195  *	full register range, the register dump will be truncated to the
1196  *	register buffer's size.
1197  */
t4_get_regs(struct adapter * adap,void * buf,size_t buf_size)1198 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
1199 {
1200 	static const unsigned int t4_reg_ranges[] = {
1201 		0x1008, 0x1108,
1202 		0x1180, 0x1184,
1203 		0x1190, 0x1194,
1204 		0x11a0, 0x11a4,
1205 		0x11b0, 0x11b4,
1206 		0x11fc, 0x123c,
1207 		0x1300, 0x173c,
1208 		0x1800, 0x18fc,
1209 		0x3000, 0x30d8,
1210 		0x30e0, 0x30e4,
1211 		0x30ec, 0x5910,
1212 		0x5920, 0x5924,
1213 		0x5960, 0x5960,
1214 		0x5968, 0x5968,
1215 		0x5970, 0x5970,
1216 		0x5978, 0x5978,
1217 		0x5980, 0x5980,
1218 		0x5988, 0x5988,
1219 		0x5990, 0x5990,
1220 		0x5998, 0x5998,
1221 		0x59a0, 0x59d4,
1222 		0x5a00, 0x5ae0,
1223 		0x5ae8, 0x5ae8,
1224 		0x5af0, 0x5af0,
1225 		0x5af8, 0x5af8,
1226 		0x6000, 0x6098,
1227 		0x6100, 0x6150,
1228 		0x6200, 0x6208,
1229 		0x6240, 0x6248,
1230 		0x6280, 0x62b0,
1231 		0x62c0, 0x6338,
1232 		0x6370, 0x638c,
1233 		0x6400, 0x643c,
1234 		0x6500, 0x6524,
1235 		0x6a00, 0x6a04,
1236 		0x6a14, 0x6a38,
1237 		0x6a60, 0x6a70,
1238 		0x6a78, 0x6a78,
1239 		0x6b00, 0x6b0c,
1240 		0x6b1c, 0x6b84,
1241 		0x6bf0, 0x6bf8,
1242 		0x6c00, 0x6c0c,
1243 		0x6c1c, 0x6c84,
1244 		0x6cf0, 0x6cf8,
1245 		0x6d00, 0x6d0c,
1246 		0x6d1c, 0x6d84,
1247 		0x6df0, 0x6df8,
1248 		0x6e00, 0x6e0c,
1249 		0x6e1c, 0x6e84,
1250 		0x6ef0, 0x6ef8,
1251 		0x6f00, 0x6f0c,
1252 		0x6f1c, 0x6f84,
1253 		0x6ff0, 0x6ff8,
1254 		0x7000, 0x700c,
1255 		0x701c, 0x7084,
1256 		0x70f0, 0x70f8,
1257 		0x7100, 0x710c,
1258 		0x711c, 0x7184,
1259 		0x71f0, 0x71f8,
1260 		0x7200, 0x720c,
1261 		0x721c, 0x7284,
1262 		0x72f0, 0x72f8,
1263 		0x7300, 0x730c,
1264 		0x731c, 0x7384,
1265 		0x73f0, 0x73f8,
1266 		0x7400, 0x7450,
1267 		0x7500, 0x7530,
1268 		0x7600, 0x760c,
1269 		0x7614, 0x761c,
1270 		0x7680, 0x76cc,
1271 		0x7700, 0x7798,
1272 		0x77c0, 0x77fc,
1273 		0x7900, 0x79fc,
1274 		0x7b00, 0x7b58,
1275 		0x7b60, 0x7b84,
1276 		0x7b8c, 0x7c38,
1277 		0x7d00, 0x7d38,
1278 		0x7d40, 0x7d80,
1279 		0x7d8c, 0x7ddc,
1280 		0x7de4, 0x7e04,
1281 		0x7e10, 0x7e1c,
1282 		0x7e24, 0x7e38,
1283 		0x7e40, 0x7e44,
1284 		0x7e4c, 0x7e78,
1285 		0x7e80, 0x7ea4,
1286 		0x7eac, 0x7edc,
1287 		0x7ee8, 0x7efc,
1288 		0x8dc0, 0x8e04,
1289 		0x8e10, 0x8e1c,
1290 		0x8e30, 0x8e78,
1291 		0x8ea0, 0x8eb8,
1292 		0x8ec0, 0x8f6c,
1293 		0x8fc0, 0x9008,
1294 		0x9010, 0x9058,
1295 		0x9060, 0x9060,
1296 		0x9068, 0x9074,
1297 		0x90fc, 0x90fc,
1298 		0x9400, 0x9408,
1299 		0x9410, 0x9458,
1300 		0x9600, 0x9600,
1301 		0x9608, 0x9638,
1302 		0x9640, 0x96bc,
1303 		0x9800, 0x9808,
1304 		0x9820, 0x983c,
1305 		0x9850, 0x9864,
1306 		0x9c00, 0x9c6c,
1307 		0x9c80, 0x9cec,
1308 		0x9d00, 0x9d6c,
1309 		0x9d80, 0x9dec,
1310 		0x9e00, 0x9e6c,
1311 		0x9e80, 0x9eec,
1312 		0x9f00, 0x9f6c,
1313 		0x9f80, 0x9fec,
1314 		0xd004, 0xd004,
1315 		0xd010, 0xd03c,
1316 		0xdfc0, 0xdfe0,
1317 		0xe000, 0xea7c,
1318 		0xf000, 0x11110,
1319 		0x11118, 0x11190,
1320 		0x19040, 0x1906c,
1321 		0x19078, 0x19080,
1322 		0x1908c, 0x190e4,
1323 		0x190f0, 0x190f8,
1324 		0x19100, 0x19110,
1325 		0x19120, 0x19124,
1326 		0x19150, 0x19194,
1327 		0x1919c, 0x191b0,
1328 		0x191d0, 0x191e8,
1329 		0x19238, 0x1924c,
1330 		0x193f8, 0x1943c,
1331 		0x1944c, 0x19474,
1332 		0x19490, 0x194e0,
1333 		0x194f0, 0x194f8,
1334 		0x19800, 0x19c08,
1335 		0x19c10, 0x19c90,
1336 		0x19ca0, 0x19ce4,
1337 		0x19cf0, 0x19d40,
1338 		0x19d50, 0x19d94,
1339 		0x19da0, 0x19de8,
1340 		0x19df0, 0x19e40,
1341 		0x19e50, 0x19e90,
1342 		0x19ea0, 0x19f4c,
1343 		0x1a000, 0x1a004,
1344 		0x1a010, 0x1a06c,
1345 		0x1a0b0, 0x1a0e4,
1346 		0x1a0ec, 0x1a0f4,
1347 		0x1a100, 0x1a108,
1348 		0x1a114, 0x1a120,
1349 		0x1a128, 0x1a130,
1350 		0x1a138, 0x1a138,
1351 		0x1a190, 0x1a1c4,
1352 		0x1a1fc, 0x1a1fc,
1353 		0x1e040, 0x1e04c,
1354 		0x1e284, 0x1e28c,
1355 		0x1e2c0, 0x1e2c0,
1356 		0x1e2e0, 0x1e2e0,
1357 		0x1e300, 0x1e384,
1358 		0x1e3c0, 0x1e3c8,
1359 		0x1e440, 0x1e44c,
1360 		0x1e684, 0x1e68c,
1361 		0x1e6c0, 0x1e6c0,
1362 		0x1e6e0, 0x1e6e0,
1363 		0x1e700, 0x1e784,
1364 		0x1e7c0, 0x1e7c8,
1365 		0x1e840, 0x1e84c,
1366 		0x1ea84, 0x1ea8c,
1367 		0x1eac0, 0x1eac0,
1368 		0x1eae0, 0x1eae0,
1369 		0x1eb00, 0x1eb84,
1370 		0x1ebc0, 0x1ebc8,
1371 		0x1ec40, 0x1ec4c,
1372 		0x1ee84, 0x1ee8c,
1373 		0x1eec0, 0x1eec0,
1374 		0x1eee0, 0x1eee0,
1375 		0x1ef00, 0x1ef84,
1376 		0x1efc0, 0x1efc8,
1377 		0x1f040, 0x1f04c,
1378 		0x1f284, 0x1f28c,
1379 		0x1f2c0, 0x1f2c0,
1380 		0x1f2e0, 0x1f2e0,
1381 		0x1f300, 0x1f384,
1382 		0x1f3c0, 0x1f3c8,
1383 		0x1f440, 0x1f44c,
1384 		0x1f684, 0x1f68c,
1385 		0x1f6c0, 0x1f6c0,
1386 		0x1f6e0, 0x1f6e0,
1387 		0x1f700, 0x1f784,
1388 		0x1f7c0, 0x1f7c8,
1389 		0x1f840, 0x1f84c,
1390 		0x1fa84, 0x1fa8c,
1391 		0x1fac0, 0x1fac0,
1392 		0x1fae0, 0x1fae0,
1393 		0x1fb00, 0x1fb84,
1394 		0x1fbc0, 0x1fbc8,
1395 		0x1fc40, 0x1fc4c,
1396 		0x1fe84, 0x1fe8c,
1397 		0x1fec0, 0x1fec0,
1398 		0x1fee0, 0x1fee0,
1399 		0x1ff00, 0x1ff84,
1400 		0x1ffc0, 0x1ffc8,
1401 		0x20000, 0x2002c,
1402 		0x20100, 0x2013c,
1403 		0x20190, 0x201a0,
1404 		0x201a8, 0x201b8,
1405 		0x201c4, 0x201c8,
1406 		0x20200, 0x20318,
1407 		0x20400, 0x204b4,
1408 		0x204c0, 0x20528,
1409 		0x20540, 0x20614,
1410 		0x21000, 0x21040,
1411 		0x2104c, 0x21060,
1412 		0x210c0, 0x210ec,
1413 		0x21200, 0x21268,
1414 		0x21270, 0x21284,
1415 		0x212fc, 0x21388,
1416 		0x21400, 0x21404,
1417 		0x21500, 0x21500,
1418 		0x21510, 0x21518,
1419 		0x2152c, 0x21530,
1420 		0x2153c, 0x2153c,
1421 		0x21550, 0x21554,
1422 		0x21600, 0x21600,
1423 		0x21608, 0x2161c,
1424 		0x21624, 0x21628,
1425 		0x21630, 0x21634,
1426 		0x2163c, 0x2163c,
1427 		0x21700, 0x2171c,
1428 		0x21780, 0x2178c,
1429 		0x21800, 0x21818,
1430 		0x21820, 0x21828,
1431 		0x21830, 0x21848,
1432 		0x21850, 0x21854,
1433 		0x21860, 0x21868,
1434 		0x21870, 0x21870,
1435 		0x21878, 0x21898,
1436 		0x218a0, 0x218a8,
1437 		0x218b0, 0x218c8,
1438 		0x218d0, 0x218d4,
1439 		0x218e0, 0x218e8,
1440 		0x218f0, 0x218f0,
1441 		0x218f8, 0x21a18,
1442 		0x21a20, 0x21a28,
1443 		0x21a30, 0x21a48,
1444 		0x21a50, 0x21a54,
1445 		0x21a60, 0x21a68,
1446 		0x21a70, 0x21a70,
1447 		0x21a78, 0x21a98,
1448 		0x21aa0, 0x21aa8,
1449 		0x21ab0, 0x21ac8,
1450 		0x21ad0, 0x21ad4,
1451 		0x21ae0, 0x21ae8,
1452 		0x21af0, 0x21af0,
1453 		0x21af8, 0x21c18,
1454 		0x21c20, 0x21c20,
1455 		0x21c28, 0x21c30,
1456 		0x21c38, 0x21c38,
1457 		0x21c80, 0x21c98,
1458 		0x21ca0, 0x21ca8,
1459 		0x21cb0, 0x21cc8,
1460 		0x21cd0, 0x21cd4,
1461 		0x21ce0, 0x21ce8,
1462 		0x21cf0, 0x21cf0,
1463 		0x21cf8, 0x21d7c,
1464 		0x21e00, 0x21e04,
1465 		0x22000, 0x2202c,
1466 		0x22100, 0x2213c,
1467 		0x22190, 0x221a0,
1468 		0x221a8, 0x221b8,
1469 		0x221c4, 0x221c8,
1470 		0x22200, 0x22318,
1471 		0x22400, 0x224b4,
1472 		0x224c0, 0x22528,
1473 		0x22540, 0x22614,
1474 		0x23000, 0x23040,
1475 		0x2304c, 0x23060,
1476 		0x230c0, 0x230ec,
1477 		0x23200, 0x23268,
1478 		0x23270, 0x23284,
1479 		0x232fc, 0x23388,
1480 		0x23400, 0x23404,
1481 		0x23500, 0x23500,
1482 		0x23510, 0x23518,
1483 		0x2352c, 0x23530,
1484 		0x2353c, 0x2353c,
1485 		0x23550, 0x23554,
1486 		0x23600, 0x23600,
1487 		0x23608, 0x2361c,
1488 		0x23624, 0x23628,
1489 		0x23630, 0x23634,
1490 		0x2363c, 0x2363c,
1491 		0x23700, 0x2371c,
1492 		0x23780, 0x2378c,
1493 		0x23800, 0x23818,
1494 		0x23820, 0x23828,
1495 		0x23830, 0x23848,
1496 		0x23850, 0x23854,
1497 		0x23860, 0x23868,
1498 		0x23870, 0x23870,
1499 		0x23878, 0x23898,
1500 		0x238a0, 0x238a8,
1501 		0x238b0, 0x238c8,
1502 		0x238d0, 0x238d4,
1503 		0x238e0, 0x238e8,
1504 		0x238f0, 0x238f0,
1505 		0x238f8, 0x23a18,
1506 		0x23a20, 0x23a28,
1507 		0x23a30, 0x23a48,
1508 		0x23a50, 0x23a54,
1509 		0x23a60, 0x23a68,
1510 		0x23a70, 0x23a70,
1511 		0x23a78, 0x23a98,
1512 		0x23aa0, 0x23aa8,
1513 		0x23ab0, 0x23ac8,
1514 		0x23ad0, 0x23ad4,
1515 		0x23ae0, 0x23ae8,
1516 		0x23af0, 0x23af0,
1517 		0x23af8, 0x23c18,
1518 		0x23c20, 0x23c20,
1519 		0x23c28, 0x23c30,
1520 		0x23c38, 0x23c38,
1521 		0x23c80, 0x23c98,
1522 		0x23ca0, 0x23ca8,
1523 		0x23cb0, 0x23cc8,
1524 		0x23cd0, 0x23cd4,
1525 		0x23ce0, 0x23ce8,
1526 		0x23cf0, 0x23cf0,
1527 		0x23cf8, 0x23d7c,
1528 		0x23e00, 0x23e04,
1529 		0x24000, 0x2402c,
1530 		0x24100, 0x2413c,
1531 		0x24190, 0x241a0,
1532 		0x241a8, 0x241b8,
1533 		0x241c4, 0x241c8,
1534 		0x24200, 0x24318,
1535 		0x24400, 0x244b4,
1536 		0x244c0, 0x24528,
1537 		0x24540, 0x24614,
1538 		0x25000, 0x25040,
1539 		0x2504c, 0x25060,
1540 		0x250c0, 0x250ec,
1541 		0x25200, 0x25268,
1542 		0x25270, 0x25284,
1543 		0x252fc, 0x25388,
1544 		0x25400, 0x25404,
1545 		0x25500, 0x25500,
1546 		0x25510, 0x25518,
1547 		0x2552c, 0x25530,
1548 		0x2553c, 0x2553c,
1549 		0x25550, 0x25554,
1550 		0x25600, 0x25600,
1551 		0x25608, 0x2561c,
1552 		0x25624, 0x25628,
1553 		0x25630, 0x25634,
1554 		0x2563c, 0x2563c,
1555 		0x25700, 0x2571c,
1556 		0x25780, 0x2578c,
1557 		0x25800, 0x25818,
1558 		0x25820, 0x25828,
1559 		0x25830, 0x25848,
1560 		0x25850, 0x25854,
1561 		0x25860, 0x25868,
1562 		0x25870, 0x25870,
1563 		0x25878, 0x25898,
1564 		0x258a0, 0x258a8,
1565 		0x258b0, 0x258c8,
1566 		0x258d0, 0x258d4,
1567 		0x258e0, 0x258e8,
1568 		0x258f0, 0x258f0,
1569 		0x258f8, 0x25a18,
1570 		0x25a20, 0x25a28,
1571 		0x25a30, 0x25a48,
1572 		0x25a50, 0x25a54,
1573 		0x25a60, 0x25a68,
1574 		0x25a70, 0x25a70,
1575 		0x25a78, 0x25a98,
1576 		0x25aa0, 0x25aa8,
1577 		0x25ab0, 0x25ac8,
1578 		0x25ad0, 0x25ad4,
1579 		0x25ae0, 0x25ae8,
1580 		0x25af0, 0x25af0,
1581 		0x25af8, 0x25c18,
1582 		0x25c20, 0x25c20,
1583 		0x25c28, 0x25c30,
1584 		0x25c38, 0x25c38,
1585 		0x25c80, 0x25c98,
1586 		0x25ca0, 0x25ca8,
1587 		0x25cb0, 0x25cc8,
1588 		0x25cd0, 0x25cd4,
1589 		0x25ce0, 0x25ce8,
1590 		0x25cf0, 0x25cf0,
1591 		0x25cf8, 0x25d7c,
1592 		0x25e00, 0x25e04,
1593 		0x26000, 0x2602c,
1594 		0x26100, 0x2613c,
1595 		0x26190, 0x261a0,
1596 		0x261a8, 0x261b8,
1597 		0x261c4, 0x261c8,
1598 		0x26200, 0x26318,
1599 		0x26400, 0x264b4,
1600 		0x264c0, 0x26528,
1601 		0x26540, 0x26614,
1602 		0x27000, 0x27040,
1603 		0x2704c, 0x27060,
1604 		0x270c0, 0x270ec,
1605 		0x27200, 0x27268,
1606 		0x27270, 0x27284,
1607 		0x272fc, 0x27388,
1608 		0x27400, 0x27404,
1609 		0x27500, 0x27500,
1610 		0x27510, 0x27518,
1611 		0x2752c, 0x27530,
1612 		0x2753c, 0x2753c,
1613 		0x27550, 0x27554,
1614 		0x27600, 0x27600,
1615 		0x27608, 0x2761c,
1616 		0x27624, 0x27628,
1617 		0x27630, 0x27634,
1618 		0x2763c, 0x2763c,
1619 		0x27700, 0x2771c,
1620 		0x27780, 0x2778c,
1621 		0x27800, 0x27818,
1622 		0x27820, 0x27828,
1623 		0x27830, 0x27848,
1624 		0x27850, 0x27854,
1625 		0x27860, 0x27868,
1626 		0x27870, 0x27870,
1627 		0x27878, 0x27898,
1628 		0x278a0, 0x278a8,
1629 		0x278b0, 0x278c8,
1630 		0x278d0, 0x278d4,
1631 		0x278e0, 0x278e8,
1632 		0x278f0, 0x278f0,
1633 		0x278f8, 0x27a18,
1634 		0x27a20, 0x27a28,
1635 		0x27a30, 0x27a48,
1636 		0x27a50, 0x27a54,
1637 		0x27a60, 0x27a68,
1638 		0x27a70, 0x27a70,
1639 		0x27a78, 0x27a98,
1640 		0x27aa0, 0x27aa8,
1641 		0x27ab0, 0x27ac8,
1642 		0x27ad0, 0x27ad4,
1643 		0x27ae0, 0x27ae8,
1644 		0x27af0, 0x27af0,
1645 		0x27af8, 0x27c18,
1646 		0x27c20, 0x27c20,
1647 		0x27c28, 0x27c30,
1648 		0x27c38, 0x27c38,
1649 		0x27c80, 0x27c98,
1650 		0x27ca0, 0x27ca8,
1651 		0x27cb0, 0x27cc8,
1652 		0x27cd0, 0x27cd4,
1653 		0x27ce0, 0x27ce8,
1654 		0x27cf0, 0x27cf0,
1655 		0x27cf8, 0x27d7c,
1656 		0x27e00, 0x27e04,
1657 	};
1658 
1659 	static const unsigned int t5_reg_ranges[] = {
1660 		0x1008, 0x10c0,
1661 		0x10cc, 0x10f8,
1662 		0x1100, 0x1100,
1663 		0x110c, 0x1148,
1664 		0x1180, 0x1184,
1665 		0x1190, 0x1194,
1666 		0x11a0, 0x11a4,
1667 		0x11b0, 0x11b4,
1668 		0x11fc, 0x123c,
1669 		0x1280, 0x173c,
1670 		0x1800, 0x18fc,
1671 		0x3000, 0x3028,
1672 		0x3060, 0x30b0,
1673 		0x30b8, 0x30d8,
1674 		0x30e0, 0x30fc,
1675 		0x3140, 0x357c,
1676 		0x35a8, 0x35cc,
1677 		0x35ec, 0x35ec,
1678 		0x3600, 0x5624,
1679 		0x56cc, 0x56ec,
1680 		0x56f4, 0x5720,
1681 		0x5728, 0x575c,
1682 		0x580c, 0x5814,
1683 		0x5890, 0x589c,
1684 		0x58a4, 0x58ac,
1685 		0x58b8, 0x58bc,
1686 		0x5940, 0x59c8,
1687 		0x59d0, 0x59dc,
1688 		0x59fc, 0x5a18,
1689 		0x5a60, 0x5a70,
1690 		0x5a80, 0x5a9c,
1691 		0x5b94, 0x5bfc,
1692 		0x6000, 0x6020,
1693 		0x6028, 0x6040,
1694 		0x6058, 0x609c,
1695 		0x60a8, 0x614c,
1696 		0x7700, 0x7798,
1697 		0x77c0, 0x78fc,
1698 		0x7b00, 0x7b58,
1699 		0x7b60, 0x7b84,
1700 		0x7b8c, 0x7c54,
1701 		0x7d00, 0x7d38,
1702 		0x7d40, 0x7d80,
1703 		0x7d8c, 0x7ddc,
1704 		0x7de4, 0x7e04,
1705 		0x7e10, 0x7e1c,
1706 		0x7e24, 0x7e38,
1707 		0x7e40, 0x7e44,
1708 		0x7e4c, 0x7e78,
1709 		0x7e80, 0x7edc,
1710 		0x7ee8, 0x7efc,
1711 		0x8dc0, 0x8de0,
1712 		0x8df8, 0x8e04,
1713 		0x8e10, 0x8e84,
1714 		0x8ea0, 0x8f84,
1715 		0x8fc0, 0x9058,
1716 		0x9060, 0x9060,
1717 		0x9068, 0x90f8,
1718 		0x9400, 0x9408,
1719 		0x9410, 0x9470,
1720 		0x9600, 0x9600,
1721 		0x9608, 0x9638,
1722 		0x9640, 0x96f4,
1723 		0x9800, 0x9808,
1724 		0x9820, 0x983c,
1725 		0x9850, 0x9864,
1726 		0x9c00, 0x9c6c,
1727 		0x9c80, 0x9cec,
1728 		0x9d00, 0x9d6c,
1729 		0x9d80, 0x9dec,
1730 		0x9e00, 0x9e6c,
1731 		0x9e80, 0x9eec,
1732 		0x9f00, 0x9f6c,
1733 		0x9f80, 0xa020,
1734 		0xd004, 0xd004,
1735 		0xd010, 0xd03c,
1736 		0xdfc0, 0xdfe0,
1737 		0xe000, 0x1106c,
1738 		0x11074, 0x11088,
1739 		0x1109c, 0x1117c,
1740 		0x11190, 0x11204,
1741 		0x19040, 0x1906c,
1742 		0x19078, 0x19080,
1743 		0x1908c, 0x190e8,
1744 		0x190f0, 0x190f8,
1745 		0x19100, 0x19110,
1746 		0x19120, 0x19124,
1747 		0x19150, 0x19194,
1748 		0x1919c, 0x191b0,
1749 		0x191d0, 0x191e8,
1750 		0x19238, 0x19290,
1751 		0x193f8, 0x19428,
1752 		0x19430, 0x19444,
1753 		0x1944c, 0x1946c,
1754 		0x19474, 0x19474,
1755 		0x19490, 0x194cc,
1756 		0x194f0, 0x194f8,
1757 		0x19c00, 0x19c08,
1758 		0x19c10, 0x19c60,
1759 		0x19c94, 0x19ce4,
1760 		0x19cf0, 0x19d40,
1761 		0x19d50, 0x19d94,
1762 		0x19da0, 0x19de8,
1763 		0x19df0, 0x19e10,
1764 		0x19e50, 0x19e90,
1765 		0x19ea0, 0x19f24,
1766 		0x19f34, 0x19f34,
1767 		0x19f40, 0x19f50,
1768 		0x19f90, 0x19fb4,
1769 		0x19fc4, 0x19fe4,
1770 		0x1a000, 0x1a004,
1771 		0x1a010, 0x1a06c,
1772 		0x1a0b0, 0x1a0e4,
1773 		0x1a0ec, 0x1a0f8,
1774 		0x1a100, 0x1a108,
1775 		0x1a114, 0x1a120,
1776 		0x1a128, 0x1a130,
1777 		0x1a138, 0x1a138,
1778 		0x1a190, 0x1a1c4,
1779 		0x1a1fc, 0x1a1fc,
1780 		0x1e008, 0x1e00c,
1781 		0x1e040, 0x1e044,
1782 		0x1e04c, 0x1e04c,
1783 		0x1e284, 0x1e290,
1784 		0x1e2c0, 0x1e2c0,
1785 		0x1e2e0, 0x1e2e0,
1786 		0x1e300, 0x1e384,
1787 		0x1e3c0, 0x1e3c8,
1788 		0x1e408, 0x1e40c,
1789 		0x1e440, 0x1e444,
1790 		0x1e44c, 0x1e44c,
1791 		0x1e684, 0x1e690,
1792 		0x1e6c0, 0x1e6c0,
1793 		0x1e6e0, 0x1e6e0,
1794 		0x1e700, 0x1e784,
1795 		0x1e7c0, 0x1e7c8,
1796 		0x1e808, 0x1e80c,
1797 		0x1e840, 0x1e844,
1798 		0x1e84c, 0x1e84c,
1799 		0x1ea84, 0x1ea90,
1800 		0x1eac0, 0x1eac0,
1801 		0x1eae0, 0x1eae0,
1802 		0x1eb00, 0x1eb84,
1803 		0x1ebc0, 0x1ebc8,
1804 		0x1ec08, 0x1ec0c,
1805 		0x1ec40, 0x1ec44,
1806 		0x1ec4c, 0x1ec4c,
1807 		0x1ee84, 0x1ee90,
1808 		0x1eec0, 0x1eec0,
1809 		0x1eee0, 0x1eee0,
1810 		0x1ef00, 0x1ef84,
1811 		0x1efc0, 0x1efc8,
1812 		0x1f008, 0x1f00c,
1813 		0x1f040, 0x1f044,
1814 		0x1f04c, 0x1f04c,
1815 		0x1f284, 0x1f290,
1816 		0x1f2c0, 0x1f2c0,
1817 		0x1f2e0, 0x1f2e0,
1818 		0x1f300, 0x1f384,
1819 		0x1f3c0, 0x1f3c8,
1820 		0x1f408, 0x1f40c,
1821 		0x1f440, 0x1f444,
1822 		0x1f44c, 0x1f44c,
1823 		0x1f684, 0x1f690,
1824 		0x1f6c0, 0x1f6c0,
1825 		0x1f6e0, 0x1f6e0,
1826 		0x1f700, 0x1f784,
1827 		0x1f7c0, 0x1f7c8,
1828 		0x1f808, 0x1f80c,
1829 		0x1f840, 0x1f844,
1830 		0x1f84c, 0x1f84c,
1831 		0x1fa84, 0x1fa90,
1832 		0x1fac0, 0x1fac0,
1833 		0x1fae0, 0x1fae0,
1834 		0x1fb00, 0x1fb84,
1835 		0x1fbc0, 0x1fbc8,
1836 		0x1fc08, 0x1fc0c,
1837 		0x1fc40, 0x1fc44,
1838 		0x1fc4c, 0x1fc4c,
1839 		0x1fe84, 0x1fe90,
1840 		0x1fec0, 0x1fec0,
1841 		0x1fee0, 0x1fee0,
1842 		0x1ff00, 0x1ff84,
1843 		0x1ffc0, 0x1ffc8,
1844 		0x30000, 0x30030,
1845 		0x30100, 0x30144,
1846 		0x30190, 0x301a0,
1847 		0x301a8, 0x301b8,
1848 		0x301c4, 0x301c8,
1849 		0x301d0, 0x301d0,
1850 		0x30200, 0x30318,
1851 		0x30400, 0x304b4,
1852 		0x304c0, 0x3052c,
1853 		0x30540, 0x3061c,
1854 		0x30800, 0x30828,
1855 		0x30834, 0x30834,
1856 		0x308c0, 0x30908,
1857 		0x30910, 0x309ac,
1858 		0x30a00, 0x30a14,
1859 		0x30a1c, 0x30a2c,
1860 		0x30a44, 0x30a50,
1861 		0x30a74, 0x30a74,
1862 		0x30a7c, 0x30afc,
1863 		0x30b08, 0x30c24,
1864 		0x30d00, 0x30d00,
1865 		0x30d08, 0x30d14,
1866 		0x30d1c, 0x30d20,
1867 		0x30d3c, 0x30d3c,
1868 		0x30d48, 0x30d50,
1869 		0x31200, 0x3120c,
1870 		0x31220, 0x31220,
1871 		0x31240, 0x31240,
1872 		0x31600, 0x3160c,
1873 		0x31a00, 0x31a1c,
1874 		0x31e00, 0x31e20,
1875 		0x31e38, 0x31e3c,
1876 		0x31e80, 0x31e80,
1877 		0x31e88, 0x31ea8,
1878 		0x31eb0, 0x31eb4,
1879 		0x31ec8, 0x31ed4,
1880 		0x31fb8, 0x32004,
1881 		0x32200, 0x32200,
1882 		0x32208, 0x32240,
1883 		0x32248, 0x32280,
1884 		0x32288, 0x322c0,
1885 		0x322c8, 0x322fc,
1886 		0x32600, 0x32630,
1887 		0x32a00, 0x32abc,
1888 		0x32b00, 0x32b10,
1889 		0x32b20, 0x32b30,
1890 		0x32b40, 0x32b50,
1891 		0x32b60, 0x32b70,
1892 		0x33000, 0x33028,
1893 		0x33030, 0x33048,
1894 		0x33060, 0x33068,
1895 		0x33070, 0x3309c,
1896 		0x330f0, 0x33128,
1897 		0x33130, 0x33148,
1898 		0x33160, 0x33168,
1899 		0x33170, 0x3319c,
1900 		0x331f0, 0x33238,
1901 		0x33240, 0x33240,
1902 		0x33248, 0x33250,
1903 		0x3325c, 0x33264,
1904 		0x33270, 0x332b8,
1905 		0x332c0, 0x332e4,
1906 		0x332f8, 0x33338,
1907 		0x33340, 0x33340,
1908 		0x33348, 0x33350,
1909 		0x3335c, 0x33364,
1910 		0x33370, 0x333b8,
1911 		0x333c0, 0x333e4,
1912 		0x333f8, 0x33428,
1913 		0x33430, 0x33448,
1914 		0x33460, 0x33468,
1915 		0x33470, 0x3349c,
1916 		0x334f0, 0x33528,
1917 		0x33530, 0x33548,
1918 		0x33560, 0x33568,
1919 		0x33570, 0x3359c,
1920 		0x335f0, 0x33638,
1921 		0x33640, 0x33640,
1922 		0x33648, 0x33650,
1923 		0x3365c, 0x33664,
1924 		0x33670, 0x336b8,
1925 		0x336c0, 0x336e4,
1926 		0x336f8, 0x33738,
1927 		0x33740, 0x33740,
1928 		0x33748, 0x33750,
1929 		0x3375c, 0x33764,
1930 		0x33770, 0x337b8,
1931 		0x337c0, 0x337e4,
1932 		0x337f8, 0x337fc,
1933 		0x33814, 0x33814,
1934 		0x3382c, 0x3382c,
1935 		0x33880, 0x3388c,
1936 		0x338e8, 0x338ec,
1937 		0x33900, 0x33928,
1938 		0x33930, 0x33948,
1939 		0x33960, 0x33968,
1940 		0x33970, 0x3399c,
1941 		0x339f0, 0x33a38,
1942 		0x33a40, 0x33a40,
1943 		0x33a48, 0x33a50,
1944 		0x33a5c, 0x33a64,
1945 		0x33a70, 0x33ab8,
1946 		0x33ac0, 0x33ae4,
1947 		0x33af8, 0x33b10,
1948 		0x33b28, 0x33b28,
1949 		0x33b3c, 0x33b50,
1950 		0x33bf0, 0x33c10,
1951 		0x33c28, 0x33c28,
1952 		0x33c3c, 0x33c50,
1953 		0x33cf0, 0x33cfc,
1954 		0x34000, 0x34030,
1955 		0x34100, 0x34144,
1956 		0x34190, 0x341a0,
1957 		0x341a8, 0x341b8,
1958 		0x341c4, 0x341c8,
1959 		0x341d0, 0x341d0,
1960 		0x34200, 0x34318,
1961 		0x34400, 0x344b4,
1962 		0x344c0, 0x3452c,
1963 		0x34540, 0x3461c,
1964 		0x34800, 0x34828,
1965 		0x34834, 0x34834,
1966 		0x348c0, 0x34908,
1967 		0x34910, 0x349ac,
1968 		0x34a00, 0x34a14,
1969 		0x34a1c, 0x34a2c,
1970 		0x34a44, 0x34a50,
1971 		0x34a74, 0x34a74,
1972 		0x34a7c, 0x34afc,
1973 		0x34b08, 0x34c24,
1974 		0x34d00, 0x34d00,
1975 		0x34d08, 0x34d14,
1976 		0x34d1c, 0x34d20,
1977 		0x34d3c, 0x34d3c,
1978 		0x34d48, 0x34d50,
1979 		0x35200, 0x3520c,
1980 		0x35220, 0x35220,
1981 		0x35240, 0x35240,
1982 		0x35600, 0x3560c,
1983 		0x35a00, 0x35a1c,
1984 		0x35e00, 0x35e20,
1985 		0x35e38, 0x35e3c,
1986 		0x35e80, 0x35e80,
1987 		0x35e88, 0x35ea8,
1988 		0x35eb0, 0x35eb4,
1989 		0x35ec8, 0x35ed4,
1990 		0x35fb8, 0x36004,
1991 		0x36200, 0x36200,
1992 		0x36208, 0x36240,
1993 		0x36248, 0x36280,
1994 		0x36288, 0x362c0,
1995 		0x362c8, 0x362fc,
1996 		0x36600, 0x36630,
1997 		0x36a00, 0x36abc,
1998 		0x36b00, 0x36b10,
1999 		0x36b20, 0x36b30,
2000 		0x36b40, 0x36b50,
2001 		0x36b60, 0x36b70,
2002 		0x37000, 0x37028,
2003 		0x37030, 0x37048,
2004 		0x37060, 0x37068,
2005 		0x37070, 0x3709c,
2006 		0x370f0, 0x37128,
2007 		0x37130, 0x37148,
2008 		0x37160, 0x37168,
2009 		0x37170, 0x3719c,
2010 		0x371f0, 0x37238,
2011 		0x37240, 0x37240,
2012 		0x37248, 0x37250,
2013 		0x3725c, 0x37264,
2014 		0x37270, 0x372b8,
2015 		0x372c0, 0x372e4,
2016 		0x372f8, 0x37338,
2017 		0x37340, 0x37340,
2018 		0x37348, 0x37350,
2019 		0x3735c, 0x37364,
2020 		0x37370, 0x373b8,
2021 		0x373c0, 0x373e4,
2022 		0x373f8, 0x37428,
2023 		0x37430, 0x37448,
2024 		0x37460, 0x37468,
2025 		0x37470, 0x3749c,
2026 		0x374f0, 0x37528,
2027 		0x37530, 0x37548,
2028 		0x37560, 0x37568,
2029 		0x37570, 0x3759c,
2030 		0x375f0, 0x37638,
2031 		0x37640, 0x37640,
2032 		0x37648, 0x37650,
2033 		0x3765c, 0x37664,
2034 		0x37670, 0x376b8,
2035 		0x376c0, 0x376e4,
2036 		0x376f8, 0x37738,
2037 		0x37740, 0x37740,
2038 		0x37748, 0x37750,
2039 		0x3775c, 0x37764,
2040 		0x37770, 0x377b8,
2041 		0x377c0, 0x377e4,
2042 		0x377f8, 0x377fc,
2043 		0x37814, 0x37814,
2044 		0x3782c, 0x3782c,
2045 		0x37880, 0x3788c,
2046 		0x378e8, 0x378ec,
2047 		0x37900, 0x37928,
2048 		0x37930, 0x37948,
2049 		0x37960, 0x37968,
2050 		0x37970, 0x3799c,
2051 		0x379f0, 0x37a38,
2052 		0x37a40, 0x37a40,
2053 		0x37a48, 0x37a50,
2054 		0x37a5c, 0x37a64,
2055 		0x37a70, 0x37ab8,
2056 		0x37ac0, 0x37ae4,
2057 		0x37af8, 0x37b10,
2058 		0x37b28, 0x37b28,
2059 		0x37b3c, 0x37b50,
2060 		0x37bf0, 0x37c10,
2061 		0x37c28, 0x37c28,
2062 		0x37c3c, 0x37c50,
2063 		0x37cf0, 0x37cfc,
2064 		0x38000, 0x38030,
2065 		0x38100, 0x38144,
2066 		0x38190, 0x381a0,
2067 		0x381a8, 0x381b8,
2068 		0x381c4, 0x381c8,
2069 		0x381d0, 0x381d0,
2070 		0x38200, 0x38318,
2071 		0x38400, 0x384b4,
2072 		0x384c0, 0x3852c,
2073 		0x38540, 0x3861c,
2074 		0x38800, 0x38828,
2075 		0x38834, 0x38834,
2076 		0x388c0, 0x38908,
2077 		0x38910, 0x389ac,
2078 		0x38a00, 0x38a14,
2079 		0x38a1c, 0x38a2c,
2080 		0x38a44, 0x38a50,
2081 		0x38a74, 0x38a74,
2082 		0x38a7c, 0x38afc,
2083 		0x38b08, 0x38c24,
2084 		0x38d00, 0x38d00,
2085 		0x38d08, 0x38d14,
2086 		0x38d1c, 0x38d20,
2087 		0x38d3c, 0x38d3c,
2088 		0x38d48, 0x38d50,
2089 		0x39200, 0x3920c,
2090 		0x39220, 0x39220,
2091 		0x39240, 0x39240,
2092 		0x39600, 0x3960c,
2093 		0x39a00, 0x39a1c,
2094 		0x39e00, 0x39e20,
2095 		0x39e38, 0x39e3c,
2096 		0x39e80, 0x39e80,
2097 		0x39e88, 0x39ea8,
2098 		0x39eb0, 0x39eb4,
2099 		0x39ec8, 0x39ed4,
2100 		0x39fb8, 0x3a004,
2101 		0x3a200, 0x3a200,
2102 		0x3a208, 0x3a240,
2103 		0x3a248, 0x3a280,
2104 		0x3a288, 0x3a2c0,
2105 		0x3a2c8, 0x3a2fc,
2106 		0x3a600, 0x3a630,
2107 		0x3aa00, 0x3aabc,
2108 		0x3ab00, 0x3ab10,
2109 		0x3ab20, 0x3ab30,
2110 		0x3ab40, 0x3ab50,
2111 		0x3ab60, 0x3ab70,
2112 		0x3b000, 0x3b028,
2113 		0x3b030, 0x3b048,
2114 		0x3b060, 0x3b068,
2115 		0x3b070, 0x3b09c,
2116 		0x3b0f0, 0x3b128,
2117 		0x3b130, 0x3b148,
2118 		0x3b160, 0x3b168,
2119 		0x3b170, 0x3b19c,
2120 		0x3b1f0, 0x3b238,
2121 		0x3b240, 0x3b240,
2122 		0x3b248, 0x3b250,
2123 		0x3b25c, 0x3b264,
2124 		0x3b270, 0x3b2b8,
2125 		0x3b2c0, 0x3b2e4,
2126 		0x3b2f8, 0x3b338,
2127 		0x3b340, 0x3b340,
2128 		0x3b348, 0x3b350,
2129 		0x3b35c, 0x3b364,
2130 		0x3b370, 0x3b3b8,
2131 		0x3b3c0, 0x3b3e4,
2132 		0x3b3f8, 0x3b428,
2133 		0x3b430, 0x3b448,
2134 		0x3b460, 0x3b468,
2135 		0x3b470, 0x3b49c,
2136 		0x3b4f0, 0x3b528,
2137 		0x3b530, 0x3b548,
2138 		0x3b560, 0x3b568,
2139 		0x3b570, 0x3b59c,
2140 		0x3b5f0, 0x3b638,
2141 		0x3b640, 0x3b640,
2142 		0x3b648, 0x3b650,
2143 		0x3b65c, 0x3b664,
2144 		0x3b670, 0x3b6b8,
2145 		0x3b6c0, 0x3b6e4,
2146 		0x3b6f8, 0x3b738,
2147 		0x3b740, 0x3b740,
2148 		0x3b748, 0x3b750,
2149 		0x3b75c, 0x3b764,
2150 		0x3b770, 0x3b7b8,
2151 		0x3b7c0, 0x3b7e4,
2152 		0x3b7f8, 0x3b7fc,
2153 		0x3b814, 0x3b814,
2154 		0x3b82c, 0x3b82c,
2155 		0x3b880, 0x3b88c,
2156 		0x3b8e8, 0x3b8ec,
2157 		0x3b900, 0x3b928,
2158 		0x3b930, 0x3b948,
2159 		0x3b960, 0x3b968,
2160 		0x3b970, 0x3b99c,
2161 		0x3b9f0, 0x3ba38,
2162 		0x3ba40, 0x3ba40,
2163 		0x3ba48, 0x3ba50,
2164 		0x3ba5c, 0x3ba64,
2165 		0x3ba70, 0x3bab8,
2166 		0x3bac0, 0x3bae4,
2167 		0x3baf8, 0x3bb10,
2168 		0x3bb28, 0x3bb28,
2169 		0x3bb3c, 0x3bb50,
2170 		0x3bbf0, 0x3bc10,
2171 		0x3bc28, 0x3bc28,
2172 		0x3bc3c, 0x3bc50,
2173 		0x3bcf0, 0x3bcfc,
2174 		0x3c000, 0x3c030,
2175 		0x3c100, 0x3c144,
2176 		0x3c190, 0x3c1a0,
2177 		0x3c1a8, 0x3c1b8,
2178 		0x3c1c4, 0x3c1c8,
2179 		0x3c1d0, 0x3c1d0,
2180 		0x3c200, 0x3c318,
2181 		0x3c400, 0x3c4b4,
2182 		0x3c4c0, 0x3c52c,
2183 		0x3c540, 0x3c61c,
2184 		0x3c800, 0x3c828,
2185 		0x3c834, 0x3c834,
2186 		0x3c8c0, 0x3c908,
2187 		0x3c910, 0x3c9ac,
2188 		0x3ca00, 0x3ca14,
2189 		0x3ca1c, 0x3ca2c,
2190 		0x3ca44, 0x3ca50,
2191 		0x3ca74, 0x3ca74,
2192 		0x3ca7c, 0x3cafc,
2193 		0x3cb08, 0x3cc24,
2194 		0x3cd00, 0x3cd00,
2195 		0x3cd08, 0x3cd14,
2196 		0x3cd1c, 0x3cd20,
2197 		0x3cd3c, 0x3cd3c,
2198 		0x3cd48, 0x3cd50,
2199 		0x3d200, 0x3d20c,
2200 		0x3d220, 0x3d220,
2201 		0x3d240, 0x3d240,
2202 		0x3d600, 0x3d60c,
2203 		0x3da00, 0x3da1c,
2204 		0x3de00, 0x3de20,
2205 		0x3de38, 0x3de3c,
2206 		0x3de80, 0x3de80,
2207 		0x3de88, 0x3dea8,
2208 		0x3deb0, 0x3deb4,
2209 		0x3dec8, 0x3ded4,
2210 		0x3dfb8, 0x3e004,
2211 		0x3e200, 0x3e200,
2212 		0x3e208, 0x3e240,
2213 		0x3e248, 0x3e280,
2214 		0x3e288, 0x3e2c0,
2215 		0x3e2c8, 0x3e2fc,
2216 		0x3e600, 0x3e630,
2217 		0x3ea00, 0x3eabc,
2218 		0x3eb00, 0x3eb10,
2219 		0x3eb20, 0x3eb30,
2220 		0x3eb40, 0x3eb50,
2221 		0x3eb60, 0x3eb70,
2222 		0x3f000, 0x3f028,
2223 		0x3f030, 0x3f048,
2224 		0x3f060, 0x3f068,
2225 		0x3f070, 0x3f09c,
2226 		0x3f0f0, 0x3f128,
2227 		0x3f130, 0x3f148,
2228 		0x3f160, 0x3f168,
2229 		0x3f170, 0x3f19c,
2230 		0x3f1f0, 0x3f238,
2231 		0x3f240, 0x3f240,
2232 		0x3f248, 0x3f250,
2233 		0x3f25c, 0x3f264,
2234 		0x3f270, 0x3f2b8,
2235 		0x3f2c0, 0x3f2e4,
2236 		0x3f2f8, 0x3f338,
2237 		0x3f340, 0x3f340,
2238 		0x3f348, 0x3f350,
2239 		0x3f35c, 0x3f364,
2240 		0x3f370, 0x3f3b8,
2241 		0x3f3c0, 0x3f3e4,
2242 		0x3f3f8, 0x3f428,
2243 		0x3f430, 0x3f448,
2244 		0x3f460, 0x3f468,
2245 		0x3f470, 0x3f49c,
2246 		0x3f4f0, 0x3f528,
2247 		0x3f530, 0x3f548,
2248 		0x3f560, 0x3f568,
2249 		0x3f570, 0x3f59c,
2250 		0x3f5f0, 0x3f638,
2251 		0x3f640, 0x3f640,
2252 		0x3f648, 0x3f650,
2253 		0x3f65c, 0x3f664,
2254 		0x3f670, 0x3f6b8,
2255 		0x3f6c0, 0x3f6e4,
2256 		0x3f6f8, 0x3f738,
2257 		0x3f740, 0x3f740,
2258 		0x3f748, 0x3f750,
2259 		0x3f75c, 0x3f764,
2260 		0x3f770, 0x3f7b8,
2261 		0x3f7c0, 0x3f7e4,
2262 		0x3f7f8, 0x3f7fc,
2263 		0x3f814, 0x3f814,
2264 		0x3f82c, 0x3f82c,
2265 		0x3f880, 0x3f88c,
2266 		0x3f8e8, 0x3f8ec,
2267 		0x3f900, 0x3f928,
2268 		0x3f930, 0x3f948,
2269 		0x3f960, 0x3f968,
2270 		0x3f970, 0x3f99c,
2271 		0x3f9f0, 0x3fa38,
2272 		0x3fa40, 0x3fa40,
2273 		0x3fa48, 0x3fa50,
2274 		0x3fa5c, 0x3fa64,
2275 		0x3fa70, 0x3fab8,
2276 		0x3fac0, 0x3fae4,
2277 		0x3faf8, 0x3fb10,
2278 		0x3fb28, 0x3fb28,
2279 		0x3fb3c, 0x3fb50,
2280 		0x3fbf0, 0x3fc10,
2281 		0x3fc28, 0x3fc28,
2282 		0x3fc3c, 0x3fc50,
2283 		0x3fcf0, 0x3fcfc,
2284 		0x40000, 0x4000c,
2285 		0x40040, 0x40050,
2286 		0x40060, 0x40068,
2287 		0x4007c, 0x4008c,
2288 		0x40094, 0x400b0,
2289 		0x400c0, 0x40144,
2290 		0x40180, 0x4018c,
2291 		0x40200, 0x40254,
2292 		0x40260, 0x40264,
2293 		0x40270, 0x40288,
2294 		0x40290, 0x40298,
2295 		0x402ac, 0x402c8,
2296 		0x402d0, 0x402e0,
2297 		0x402f0, 0x402f0,
2298 		0x40300, 0x4033c,
2299 		0x403f8, 0x403fc,
2300 		0x41304, 0x413c4,
2301 		0x41400, 0x4140c,
2302 		0x41414, 0x4141c,
2303 		0x41480, 0x414d0,
2304 		0x44000, 0x44054,
2305 		0x4405c, 0x44078,
2306 		0x440c0, 0x44174,
2307 		0x44180, 0x441ac,
2308 		0x441b4, 0x441b8,
2309 		0x441c0, 0x44254,
2310 		0x4425c, 0x44278,
2311 		0x442c0, 0x44374,
2312 		0x44380, 0x443ac,
2313 		0x443b4, 0x443b8,
2314 		0x443c0, 0x44454,
2315 		0x4445c, 0x44478,
2316 		0x444c0, 0x44574,
2317 		0x44580, 0x445ac,
2318 		0x445b4, 0x445b8,
2319 		0x445c0, 0x44654,
2320 		0x4465c, 0x44678,
2321 		0x446c0, 0x44774,
2322 		0x44780, 0x447ac,
2323 		0x447b4, 0x447b8,
2324 		0x447c0, 0x44854,
2325 		0x4485c, 0x44878,
2326 		0x448c0, 0x44974,
2327 		0x44980, 0x449ac,
2328 		0x449b4, 0x449b8,
2329 		0x449c0, 0x449fc,
2330 		0x45000, 0x45004,
2331 		0x45010, 0x45030,
2332 		0x45040, 0x45060,
2333 		0x45068, 0x45068,
2334 		0x45080, 0x45084,
2335 		0x450a0, 0x450b0,
2336 		0x45200, 0x45204,
2337 		0x45210, 0x45230,
2338 		0x45240, 0x45260,
2339 		0x45268, 0x45268,
2340 		0x45280, 0x45284,
2341 		0x452a0, 0x452b0,
2342 		0x460c0, 0x460e4,
2343 		0x47000, 0x4703c,
2344 		0x47044, 0x4708c,
2345 		0x47200, 0x47250,
2346 		0x47400, 0x47408,
2347 		0x47414, 0x47420,
2348 		0x47600, 0x47618,
2349 		0x47800, 0x47814,
2350 		0x48000, 0x4800c,
2351 		0x48040, 0x48050,
2352 		0x48060, 0x48068,
2353 		0x4807c, 0x4808c,
2354 		0x48094, 0x480b0,
2355 		0x480c0, 0x48144,
2356 		0x48180, 0x4818c,
2357 		0x48200, 0x48254,
2358 		0x48260, 0x48264,
2359 		0x48270, 0x48288,
2360 		0x48290, 0x48298,
2361 		0x482ac, 0x482c8,
2362 		0x482d0, 0x482e0,
2363 		0x482f0, 0x482f0,
2364 		0x48300, 0x4833c,
2365 		0x483f8, 0x483fc,
2366 		0x49304, 0x493c4,
2367 		0x49400, 0x4940c,
2368 		0x49414, 0x4941c,
2369 		0x49480, 0x494d0,
2370 		0x4c000, 0x4c054,
2371 		0x4c05c, 0x4c078,
2372 		0x4c0c0, 0x4c174,
2373 		0x4c180, 0x4c1ac,
2374 		0x4c1b4, 0x4c1b8,
2375 		0x4c1c0, 0x4c254,
2376 		0x4c25c, 0x4c278,
2377 		0x4c2c0, 0x4c374,
2378 		0x4c380, 0x4c3ac,
2379 		0x4c3b4, 0x4c3b8,
2380 		0x4c3c0, 0x4c454,
2381 		0x4c45c, 0x4c478,
2382 		0x4c4c0, 0x4c574,
2383 		0x4c580, 0x4c5ac,
2384 		0x4c5b4, 0x4c5b8,
2385 		0x4c5c0, 0x4c654,
2386 		0x4c65c, 0x4c678,
2387 		0x4c6c0, 0x4c774,
2388 		0x4c780, 0x4c7ac,
2389 		0x4c7b4, 0x4c7b8,
2390 		0x4c7c0, 0x4c854,
2391 		0x4c85c, 0x4c878,
2392 		0x4c8c0, 0x4c974,
2393 		0x4c980, 0x4c9ac,
2394 		0x4c9b4, 0x4c9b8,
2395 		0x4c9c0, 0x4c9fc,
2396 		0x4d000, 0x4d004,
2397 		0x4d010, 0x4d030,
2398 		0x4d040, 0x4d060,
2399 		0x4d068, 0x4d068,
2400 		0x4d080, 0x4d084,
2401 		0x4d0a0, 0x4d0b0,
2402 		0x4d200, 0x4d204,
2403 		0x4d210, 0x4d230,
2404 		0x4d240, 0x4d260,
2405 		0x4d268, 0x4d268,
2406 		0x4d280, 0x4d284,
2407 		0x4d2a0, 0x4d2b0,
2408 		0x4e0c0, 0x4e0e4,
2409 		0x4f000, 0x4f03c,
2410 		0x4f044, 0x4f08c,
2411 		0x4f200, 0x4f250,
2412 		0x4f400, 0x4f408,
2413 		0x4f414, 0x4f420,
2414 		0x4f600, 0x4f618,
2415 		0x4f800, 0x4f814,
2416 		0x50000, 0x50084,
2417 		0x50090, 0x500cc,
2418 		0x50400, 0x50400,
2419 		0x50800, 0x50884,
2420 		0x50890, 0x508cc,
2421 		0x50c00, 0x50c00,
2422 		0x51000, 0x5101c,
2423 		0x51300, 0x51308,
2424 	};
2425 
2426 	static const unsigned int t6_reg_ranges[] = {
2427 		0x1008, 0x101c,
2428 		0x1024, 0x10a8,
2429 		0x10b4, 0x10f8,
2430 		0x1100, 0x1114,
2431 		0x111c, 0x112c,
2432 		0x1138, 0x113c,
2433 		0x1144, 0x114c,
2434 		0x1180, 0x1184,
2435 		0x1190, 0x1194,
2436 		0x11a0, 0x11a4,
2437 		0x11b0, 0x11c4,
2438 		0x11fc, 0x1274,
2439 		0x1280, 0x133c,
2440 		0x1800, 0x18fc,
2441 		0x3000, 0x302c,
2442 		0x3060, 0x30b0,
2443 		0x30b8, 0x30d8,
2444 		0x30e0, 0x30fc,
2445 		0x3140, 0x357c,
2446 		0x35a8, 0x35cc,
2447 		0x35ec, 0x35ec,
2448 		0x3600, 0x5624,
2449 		0x56cc, 0x56ec,
2450 		0x56f4, 0x5720,
2451 		0x5728, 0x575c,
2452 		0x580c, 0x5814,
2453 		0x5890, 0x589c,
2454 		0x58a4, 0x58ac,
2455 		0x58b8, 0x58bc,
2456 		0x5940, 0x595c,
2457 		0x5980, 0x598c,
2458 		0x59b0, 0x59c8,
2459 		0x59d0, 0x59dc,
2460 		0x59fc, 0x5a18,
2461 		0x5a60, 0x5a6c,
2462 		0x5a80, 0x5a8c,
2463 		0x5a94, 0x5a9c,
2464 		0x5b94, 0x5bfc,
2465 		0x5c10, 0x5e48,
2466 		0x5e50, 0x5e94,
2467 		0x5ea0, 0x5eb0,
2468 		0x5ec0, 0x5ec0,
2469 		0x5ec8, 0x5ed0,
2470 		0x5ee0, 0x5ee0,
2471 		0x5ef0, 0x5ef0,
2472 		0x5f00, 0x5f00,
2473 		0x6000, 0x6020,
2474 		0x6028, 0x6040,
2475 		0x6058, 0x609c,
2476 		0x60a8, 0x619c,
2477 		0x7700, 0x7798,
2478 		0x77c0, 0x7880,
2479 		0x78cc, 0x78fc,
2480 		0x7b00, 0x7b58,
2481 		0x7b60, 0x7b84,
2482 		0x7b8c, 0x7c54,
2483 		0x7d00, 0x7d38,
2484 		0x7d40, 0x7d84,
2485 		0x7d8c, 0x7ddc,
2486 		0x7de4, 0x7e04,
2487 		0x7e10, 0x7e1c,
2488 		0x7e24, 0x7e38,
2489 		0x7e40, 0x7e44,
2490 		0x7e4c, 0x7e78,
2491 		0x7e80, 0x7edc,
2492 		0x7ee8, 0x7efc,
2493 		0x8dc0, 0x8de0,
2494 		0x8df8, 0x8e04,
2495 		0x8e10, 0x8e84,
2496 		0x8ea0, 0x8f88,
2497 		0x8fb8, 0x9058,
2498 		0x9060, 0x9060,
2499 		0x9068, 0x90f8,
2500 		0x9100, 0x9124,
2501 		0x9400, 0x9470,
2502 		0x9600, 0x9600,
2503 		0x9608, 0x9638,
2504 		0x9640, 0x9704,
2505 		0x9710, 0x971c,
2506 		0x9800, 0x9808,
2507 		0x9820, 0x983c,
2508 		0x9850, 0x9864,
2509 		0x9c00, 0x9c6c,
2510 		0x9c80, 0x9cec,
2511 		0x9d00, 0x9d6c,
2512 		0x9d80, 0x9dec,
2513 		0x9e00, 0x9e6c,
2514 		0x9e80, 0x9eec,
2515 		0x9f00, 0x9f6c,
2516 		0x9f80, 0xa020,
2517 		0xd004, 0xd03c,
2518 		0xd100, 0xd118,
2519 		0xd200, 0xd214,
2520 		0xd220, 0xd234,
2521 		0xd240, 0xd254,
2522 		0xd260, 0xd274,
2523 		0xd280, 0xd294,
2524 		0xd2a0, 0xd2b4,
2525 		0xd2c0, 0xd2d4,
2526 		0xd2e0, 0xd2f4,
2527 		0xd300, 0xd31c,
2528 		0xdfc0, 0xdfe0,
2529 		0xe000, 0xf008,
2530 		0xf010, 0xf018,
2531 		0xf020, 0xf028,
2532 		0x11000, 0x11014,
2533 		0x11048, 0x1106c,
2534 		0x11074, 0x11088,
2535 		0x11098, 0x11120,
2536 		0x1112c, 0x1117c,
2537 		0x11190, 0x112e0,
2538 		0x11300, 0x1130c,
2539 		0x12000, 0x1206c,
2540 		0x19040, 0x1906c,
2541 		0x19078, 0x19080,
2542 		0x1908c, 0x190e8,
2543 		0x190f0, 0x190f8,
2544 		0x19100, 0x19110,
2545 		0x19120, 0x19124,
2546 		0x19150, 0x19194,
2547 		0x1919c, 0x191b0,
2548 		0x191d0, 0x191e8,
2549 		0x19238, 0x19290,
2550 		0x192a4, 0x192b0,
2551 		0x19348, 0x1934c,
2552 		0x193f8, 0x19418,
2553 		0x19420, 0x19428,
2554 		0x19430, 0x19444,
2555 		0x1944c, 0x1946c,
2556 		0x19474, 0x19474,
2557 		0x19490, 0x194cc,
2558 		0x194f0, 0x194f8,
2559 		0x19c00, 0x19c48,
2560 		0x19c50, 0x19c80,
2561 		0x19c94, 0x19c98,
2562 		0x19ca0, 0x19cbc,
2563 		0x19ce4, 0x19ce4,
2564 		0x19cf0, 0x19cf8,
2565 		0x19d00, 0x19d28,
2566 		0x19d50, 0x19d78,
2567 		0x19d94, 0x19d98,
2568 		0x19da0, 0x19de0,
2569 		0x19df0, 0x19e10,
2570 		0x19e50, 0x19e6c,
2571 		0x19ea0, 0x19ebc,
2572 		0x19ec4, 0x19ef4,
2573 		0x19f04, 0x19f2c,
2574 		0x19f34, 0x19f34,
2575 		0x19f40, 0x19f50,
2576 		0x19f90, 0x19fac,
2577 		0x19fc4, 0x19fc8,
2578 		0x19fd0, 0x19fe4,
2579 		0x1a000, 0x1a004,
2580 		0x1a010, 0x1a06c,
2581 		0x1a0b0, 0x1a0e4,
2582 		0x1a0ec, 0x1a0f8,
2583 		0x1a100, 0x1a108,
2584 		0x1a114, 0x1a120,
2585 		0x1a128, 0x1a130,
2586 		0x1a138, 0x1a138,
2587 		0x1a190, 0x1a1c4,
2588 		0x1a1fc, 0x1a1fc,
2589 		0x1e008, 0x1e00c,
2590 		0x1e040, 0x1e044,
2591 		0x1e04c, 0x1e04c,
2592 		0x1e284, 0x1e290,
2593 		0x1e2c0, 0x1e2c0,
2594 		0x1e2e0, 0x1e2e0,
2595 		0x1e300, 0x1e384,
2596 		0x1e3c0, 0x1e3c8,
2597 		0x1e408, 0x1e40c,
2598 		0x1e440, 0x1e444,
2599 		0x1e44c, 0x1e44c,
2600 		0x1e684, 0x1e690,
2601 		0x1e6c0, 0x1e6c0,
2602 		0x1e6e0, 0x1e6e0,
2603 		0x1e700, 0x1e784,
2604 		0x1e7c0, 0x1e7c8,
2605 		0x1e808, 0x1e80c,
2606 		0x1e840, 0x1e844,
2607 		0x1e84c, 0x1e84c,
2608 		0x1ea84, 0x1ea90,
2609 		0x1eac0, 0x1eac0,
2610 		0x1eae0, 0x1eae0,
2611 		0x1eb00, 0x1eb84,
2612 		0x1ebc0, 0x1ebc8,
2613 		0x1ec08, 0x1ec0c,
2614 		0x1ec40, 0x1ec44,
2615 		0x1ec4c, 0x1ec4c,
2616 		0x1ee84, 0x1ee90,
2617 		0x1eec0, 0x1eec0,
2618 		0x1eee0, 0x1eee0,
2619 		0x1ef00, 0x1ef84,
2620 		0x1efc0, 0x1efc8,
2621 		0x1f008, 0x1f00c,
2622 		0x1f040, 0x1f044,
2623 		0x1f04c, 0x1f04c,
2624 		0x1f284, 0x1f290,
2625 		0x1f2c0, 0x1f2c0,
2626 		0x1f2e0, 0x1f2e0,
2627 		0x1f300, 0x1f384,
2628 		0x1f3c0, 0x1f3c8,
2629 		0x1f408, 0x1f40c,
2630 		0x1f440, 0x1f444,
2631 		0x1f44c, 0x1f44c,
2632 		0x1f684, 0x1f690,
2633 		0x1f6c0, 0x1f6c0,
2634 		0x1f6e0, 0x1f6e0,
2635 		0x1f700, 0x1f784,
2636 		0x1f7c0, 0x1f7c8,
2637 		0x1f808, 0x1f80c,
2638 		0x1f840, 0x1f844,
2639 		0x1f84c, 0x1f84c,
2640 		0x1fa84, 0x1fa90,
2641 		0x1fac0, 0x1fac0,
2642 		0x1fae0, 0x1fae0,
2643 		0x1fb00, 0x1fb84,
2644 		0x1fbc0, 0x1fbc8,
2645 		0x1fc08, 0x1fc0c,
2646 		0x1fc40, 0x1fc44,
2647 		0x1fc4c, 0x1fc4c,
2648 		0x1fe84, 0x1fe90,
2649 		0x1fec0, 0x1fec0,
2650 		0x1fee0, 0x1fee0,
2651 		0x1ff00, 0x1ff84,
2652 		0x1ffc0, 0x1ffc8,
2653 		0x30000, 0x30030,
2654 		0x30100, 0x30168,
2655 		0x30190, 0x301a0,
2656 		0x301a8, 0x301b8,
2657 		0x301c4, 0x301c8,
2658 		0x301d0, 0x301d0,
2659 		0x30200, 0x30320,
2660 		0x30400, 0x304b4,
2661 		0x304c0, 0x3052c,
2662 		0x30540, 0x3061c,
2663 		0x30800, 0x308a0,
2664 		0x308c0, 0x30908,
2665 		0x30910, 0x309b8,
2666 		0x30a00, 0x30a04,
2667 		0x30a0c, 0x30a14,
2668 		0x30a1c, 0x30a2c,
2669 		0x30a44, 0x30a50,
2670 		0x30a74, 0x30a74,
2671 		0x30a7c, 0x30afc,
2672 		0x30b08, 0x30c24,
2673 		0x30d00, 0x30d14,
2674 		0x30d1c, 0x30d3c,
2675 		0x30d44, 0x30d4c,
2676 		0x30d54, 0x30d74,
2677 		0x30d7c, 0x30d7c,
2678 		0x30de0, 0x30de0,
2679 		0x30e00, 0x30ed4,
2680 		0x30f00, 0x30fa4,
2681 		0x30fc0, 0x30fc4,
2682 		0x31000, 0x31004,
2683 		0x31080, 0x310fc,
2684 		0x31208, 0x31220,
2685 		0x3123c, 0x31254,
2686 		0x31300, 0x31300,
2687 		0x31308, 0x3131c,
2688 		0x31338, 0x3133c,
2689 		0x31380, 0x31380,
2690 		0x31388, 0x313a8,
2691 		0x313b4, 0x313b4,
2692 		0x31400, 0x31420,
2693 		0x31438, 0x3143c,
2694 		0x31480, 0x31480,
2695 		0x314a8, 0x314a8,
2696 		0x314b0, 0x314b4,
2697 		0x314c8, 0x314d4,
2698 		0x31a40, 0x31a4c,
2699 		0x31af0, 0x31b20,
2700 		0x31b38, 0x31b3c,
2701 		0x31b80, 0x31b80,
2702 		0x31ba8, 0x31ba8,
2703 		0x31bb0, 0x31bb4,
2704 		0x31bc8, 0x31bd4,
2705 		0x32140, 0x3218c,
2706 		0x321f0, 0x321f4,
2707 		0x32200, 0x32200,
2708 		0x32218, 0x32218,
2709 		0x32400, 0x32400,
2710 		0x32408, 0x3241c,
2711 		0x32618, 0x32620,
2712 		0x32664, 0x32664,
2713 		0x326a8, 0x326a8,
2714 		0x326ec, 0x326ec,
2715 		0x32a00, 0x32abc,
2716 		0x32b00, 0x32b18,
2717 		0x32b20, 0x32b38,
2718 		0x32b40, 0x32b58,
2719 		0x32b60, 0x32b78,
2720 		0x32c00, 0x32c00,
2721 		0x32c08, 0x32c3c,
2722 		0x33000, 0x3302c,
2723 		0x33034, 0x33050,
2724 		0x33058, 0x33058,
2725 		0x33060, 0x3308c,
2726 		0x3309c, 0x330ac,
2727 		0x330c0, 0x330c0,
2728 		0x330c8, 0x330d0,
2729 		0x330d8, 0x330e0,
2730 		0x330ec, 0x3312c,
2731 		0x33134, 0x33150,
2732 		0x33158, 0x33158,
2733 		0x33160, 0x3318c,
2734 		0x3319c, 0x331ac,
2735 		0x331c0, 0x331c0,
2736 		0x331c8, 0x331d0,
2737 		0x331d8, 0x331e0,
2738 		0x331ec, 0x33290,
2739 		0x33298, 0x332c4,
2740 		0x332e4, 0x33390,
2741 		0x33398, 0x333c4,
2742 		0x333e4, 0x3342c,
2743 		0x33434, 0x33450,
2744 		0x33458, 0x33458,
2745 		0x33460, 0x3348c,
2746 		0x3349c, 0x334ac,
2747 		0x334c0, 0x334c0,
2748 		0x334c8, 0x334d0,
2749 		0x334d8, 0x334e0,
2750 		0x334ec, 0x3352c,
2751 		0x33534, 0x33550,
2752 		0x33558, 0x33558,
2753 		0x33560, 0x3358c,
2754 		0x3359c, 0x335ac,
2755 		0x335c0, 0x335c0,
2756 		0x335c8, 0x335d0,
2757 		0x335d8, 0x335e0,
2758 		0x335ec, 0x33690,
2759 		0x33698, 0x336c4,
2760 		0x336e4, 0x33790,
2761 		0x33798, 0x337c4,
2762 		0x337e4, 0x337fc,
2763 		0x33814, 0x33814,
2764 		0x33854, 0x33868,
2765 		0x33880, 0x3388c,
2766 		0x338c0, 0x338d0,
2767 		0x338e8, 0x338ec,
2768 		0x33900, 0x3392c,
2769 		0x33934, 0x33950,
2770 		0x33958, 0x33958,
2771 		0x33960, 0x3398c,
2772 		0x3399c, 0x339ac,
2773 		0x339c0, 0x339c0,
2774 		0x339c8, 0x339d0,
2775 		0x339d8, 0x339e0,
2776 		0x339ec, 0x33a90,
2777 		0x33a98, 0x33ac4,
2778 		0x33ae4, 0x33b10,
2779 		0x33b24, 0x33b28,
2780 		0x33b38, 0x33b50,
2781 		0x33bf0, 0x33c10,
2782 		0x33c24, 0x33c28,
2783 		0x33c38, 0x33c50,
2784 		0x33cf0, 0x33cfc,
2785 		0x34000, 0x34030,
2786 		0x34100, 0x34168,
2787 		0x34190, 0x341a0,
2788 		0x341a8, 0x341b8,
2789 		0x341c4, 0x341c8,
2790 		0x341d0, 0x341d0,
2791 		0x34200, 0x34320,
2792 		0x34400, 0x344b4,
2793 		0x344c0, 0x3452c,
2794 		0x34540, 0x3461c,
2795 		0x34800, 0x348a0,
2796 		0x348c0, 0x34908,
2797 		0x34910, 0x349b8,
2798 		0x34a00, 0x34a04,
2799 		0x34a0c, 0x34a14,
2800 		0x34a1c, 0x34a2c,
2801 		0x34a44, 0x34a50,
2802 		0x34a74, 0x34a74,
2803 		0x34a7c, 0x34afc,
2804 		0x34b08, 0x34c24,
2805 		0x34d00, 0x34d14,
2806 		0x34d1c, 0x34d3c,
2807 		0x34d44, 0x34d4c,
2808 		0x34d54, 0x34d74,
2809 		0x34d7c, 0x34d7c,
2810 		0x34de0, 0x34de0,
2811 		0x34e00, 0x34ed4,
2812 		0x34f00, 0x34fa4,
2813 		0x34fc0, 0x34fc4,
2814 		0x35000, 0x35004,
2815 		0x35080, 0x350fc,
2816 		0x35208, 0x35220,
2817 		0x3523c, 0x35254,
2818 		0x35300, 0x35300,
2819 		0x35308, 0x3531c,
2820 		0x35338, 0x3533c,
2821 		0x35380, 0x35380,
2822 		0x35388, 0x353a8,
2823 		0x353b4, 0x353b4,
2824 		0x35400, 0x35420,
2825 		0x35438, 0x3543c,
2826 		0x35480, 0x35480,
2827 		0x354a8, 0x354a8,
2828 		0x354b0, 0x354b4,
2829 		0x354c8, 0x354d4,
2830 		0x35a40, 0x35a4c,
2831 		0x35af0, 0x35b20,
2832 		0x35b38, 0x35b3c,
2833 		0x35b80, 0x35b80,
2834 		0x35ba8, 0x35ba8,
2835 		0x35bb0, 0x35bb4,
2836 		0x35bc8, 0x35bd4,
2837 		0x36140, 0x3618c,
2838 		0x361f0, 0x361f4,
2839 		0x36200, 0x36200,
2840 		0x36218, 0x36218,
2841 		0x36400, 0x36400,
2842 		0x36408, 0x3641c,
2843 		0x36618, 0x36620,
2844 		0x36664, 0x36664,
2845 		0x366a8, 0x366a8,
2846 		0x366ec, 0x366ec,
2847 		0x36a00, 0x36abc,
2848 		0x36b00, 0x36b18,
2849 		0x36b20, 0x36b38,
2850 		0x36b40, 0x36b58,
2851 		0x36b60, 0x36b78,
2852 		0x36c00, 0x36c00,
2853 		0x36c08, 0x36c3c,
2854 		0x37000, 0x3702c,
2855 		0x37034, 0x37050,
2856 		0x37058, 0x37058,
2857 		0x37060, 0x3708c,
2858 		0x3709c, 0x370ac,
2859 		0x370c0, 0x370c0,
2860 		0x370c8, 0x370d0,
2861 		0x370d8, 0x370e0,
2862 		0x370ec, 0x3712c,
2863 		0x37134, 0x37150,
2864 		0x37158, 0x37158,
2865 		0x37160, 0x3718c,
2866 		0x3719c, 0x371ac,
2867 		0x371c0, 0x371c0,
2868 		0x371c8, 0x371d0,
2869 		0x371d8, 0x371e0,
2870 		0x371ec, 0x37290,
2871 		0x37298, 0x372c4,
2872 		0x372e4, 0x37390,
2873 		0x37398, 0x373c4,
2874 		0x373e4, 0x3742c,
2875 		0x37434, 0x37450,
2876 		0x37458, 0x37458,
2877 		0x37460, 0x3748c,
2878 		0x3749c, 0x374ac,
2879 		0x374c0, 0x374c0,
2880 		0x374c8, 0x374d0,
2881 		0x374d8, 0x374e0,
2882 		0x374ec, 0x3752c,
2883 		0x37534, 0x37550,
2884 		0x37558, 0x37558,
2885 		0x37560, 0x3758c,
2886 		0x3759c, 0x375ac,
2887 		0x375c0, 0x375c0,
2888 		0x375c8, 0x375d0,
2889 		0x375d8, 0x375e0,
2890 		0x375ec, 0x37690,
2891 		0x37698, 0x376c4,
2892 		0x376e4, 0x37790,
2893 		0x37798, 0x377c4,
2894 		0x377e4, 0x377fc,
2895 		0x37814, 0x37814,
2896 		0x37854, 0x37868,
2897 		0x37880, 0x3788c,
2898 		0x378c0, 0x378d0,
2899 		0x378e8, 0x378ec,
2900 		0x37900, 0x3792c,
2901 		0x37934, 0x37950,
2902 		0x37958, 0x37958,
2903 		0x37960, 0x3798c,
2904 		0x3799c, 0x379ac,
2905 		0x379c0, 0x379c0,
2906 		0x379c8, 0x379d0,
2907 		0x379d8, 0x379e0,
2908 		0x379ec, 0x37a90,
2909 		0x37a98, 0x37ac4,
2910 		0x37ae4, 0x37b10,
2911 		0x37b24, 0x37b28,
2912 		0x37b38, 0x37b50,
2913 		0x37bf0, 0x37c10,
2914 		0x37c24, 0x37c28,
2915 		0x37c38, 0x37c50,
2916 		0x37cf0, 0x37cfc,
2917 		0x40040, 0x40040,
2918 		0x40080, 0x40084,
2919 		0x40100, 0x40100,
2920 		0x40140, 0x401bc,
2921 		0x40200, 0x40214,
2922 		0x40228, 0x40228,
2923 		0x40240, 0x40258,
2924 		0x40280, 0x40280,
2925 		0x40304, 0x40304,
2926 		0x40330, 0x4033c,
2927 		0x41304, 0x413c8,
2928 		0x413d0, 0x413dc,
2929 		0x413f0, 0x413f0,
2930 		0x41400, 0x4140c,
2931 		0x41414, 0x4141c,
2932 		0x41480, 0x414d0,
2933 		0x44000, 0x4407c,
2934 		0x440c0, 0x441ac,
2935 		0x441b4, 0x4427c,
2936 		0x442c0, 0x443ac,
2937 		0x443b4, 0x4447c,
2938 		0x444c0, 0x445ac,
2939 		0x445b4, 0x4467c,
2940 		0x446c0, 0x447ac,
2941 		0x447b4, 0x4487c,
2942 		0x448c0, 0x449ac,
2943 		0x449b4, 0x44a7c,
2944 		0x44ac0, 0x44bac,
2945 		0x44bb4, 0x44c7c,
2946 		0x44cc0, 0x44dac,
2947 		0x44db4, 0x44e7c,
2948 		0x44ec0, 0x44fac,
2949 		0x44fb4, 0x4507c,
2950 		0x450c0, 0x451ac,
2951 		0x451b4, 0x451fc,
2952 		0x45800, 0x45804,
2953 		0x45810, 0x45830,
2954 		0x45840, 0x45860,
2955 		0x45868, 0x45868,
2956 		0x45880, 0x45884,
2957 		0x458a0, 0x458b0,
2958 		0x45a00, 0x45a04,
2959 		0x45a10, 0x45a30,
2960 		0x45a40, 0x45a60,
2961 		0x45a68, 0x45a68,
2962 		0x45a80, 0x45a84,
2963 		0x45aa0, 0x45ab0,
2964 		0x460c0, 0x460e4,
2965 		0x47000, 0x4703c,
2966 		0x47044, 0x4708c,
2967 		0x47200, 0x47250,
2968 		0x47400, 0x47408,
2969 		0x47414, 0x47420,
2970 		0x47600, 0x47618,
2971 		0x47800, 0x47814,
2972 		0x47820, 0x4782c,
2973 		0x50000, 0x50084,
2974 		0x50090, 0x500cc,
2975 		0x50300, 0x50384,
2976 		0x50400, 0x50400,
2977 		0x50800, 0x50884,
2978 		0x50890, 0x508cc,
2979 		0x50b00, 0x50b84,
2980 		0x50c00, 0x50c00,
2981 		0x51000, 0x51020,
2982 		0x51028, 0x510b0,
2983 		0x51300, 0x51324,
2984 	};
2985 
2986 	u32 *buf_end = (u32 *)((char *)buf + buf_size);
2987 	const unsigned int *reg_ranges;
2988 	int reg_ranges_size, range;
2989 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2990 
2991 	/* Select the right set of register ranges to dump depending on the
2992 	 * adapter chip type.
2993 	 */
2994 	switch (chip_version) {
2995 	case CHELSIO_T4:
2996 		reg_ranges = t4_reg_ranges;
2997 		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2998 		break;
2999 
3000 	case CHELSIO_T5:
3001 		reg_ranges = t5_reg_ranges;
3002 		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
3003 		break;
3004 
3005 	case CHELSIO_T6:
3006 		reg_ranges = t6_reg_ranges;
3007 		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
3008 		break;
3009 
3010 	default:
3011 		CH_ERR(adap,
3012 			"Unsupported chip version %d\n", chip_version);
3013 		return;
3014 	}
3015 
3016 	/* Clear the register buffer and insert the appropriate register
3017 	 * values selected by the above register ranges.
3018 	 */
3019 	memset(buf, 0, buf_size);
3020 	for (range = 0; range < reg_ranges_size; range += 2) {
3021 		unsigned int reg = reg_ranges[range];
3022 		unsigned int last_reg = reg_ranges[range + 1];
3023 		u32 *bufp = (u32 *)((char *)buf + reg);
3024 
3025 		/* Iterate across the register range filling in the register
3026 		 * buffer but don't write past the end of the register buffer.
3027 		 */
3028 		while (reg <= last_reg && bufp < buf_end) {
3029 			*bufp++ = t4_read_reg(adap, reg);
3030 			reg += sizeof(u32);
3031 		}
3032 	}
3033 }
3034 
3035 /*
3036  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
3037  */
3038 #define EEPROM_DELAY		10		// 10us per poll spin
3039 #define EEPROM_MAX_POLL		5000		// x 5000 == 50ms
3040 
3041 #define EEPROM_STAT_ADDR	0x7bfc
3042 #define VPD_SIZE		0x800
3043 #define VPD_BASE		0x400
3044 #define VPD_BASE_OLD		0
3045 #define VPD_LEN			1024
3046 #define VPD_INFO_FLD_HDR_SIZE	3
3047 #define CHELSIO_VPD_UNIQUE_ID	0x82
3048 
3049 /*
3050  * Small utility function to wait till any outstanding VPD Access is complete.
3051  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
3052  * VPD Access in flight.  This allows us to handle the problem of having a
3053  * previous VPD Access time out and prevent an attempt to inject a new VPD
3054  * Request before any in-flight VPD reguest has completed.
3055  */
t4_seeprom_wait(struct adapter * adapter)3056 static int t4_seeprom_wait(struct adapter *adapter)
3057 {
3058 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3059 	int max_poll;
3060 
3061 	/*
3062 	 * If no VPD Access is in flight, we can just return success right
3063 	 * away.
3064 	 */
3065 	if (!adapter->vpd_busy)
3066 		return 0;
3067 
3068 	/*
3069 	 * Poll the VPD Capability Address/Flag register waiting for it
3070 	 * to indicate that the operation is complete.
3071 	 */
3072 	max_poll = EEPROM_MAX_POLL;
3073 	do {
3074 		u16 val;
3075 
3076 		udelay(EEPROM_DELAY);
3077 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
3078 
3079 		/*
3080 		 * If the operation is complete, mark the VPD as no longer
3081 		 * busy and return success.
3082 		 */
3083 		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
3084 			adapter->vpd_busy = 0;
3085 			return 0;
3086 		}
3087 	} while (--max_poll);
3088 
3089 	/*
3090 	 * Failure!  Note that we leave the VPD Busy status set in order to
3091 	 * avoid pushing a new VPD Access request into the VPD Capability till
3092 	 * the current operation eventually succeeds.  It's a bug to issue a
3093 	 * new request when an existing request is in flight and will result
3094 	 * in corrupt hardware state.
3095 	 */
3096 	return -ETIMEDOUT;
3097 }
3098 
3099 /**
3100  *	t4_seeprom_read - read a serial EEPROM location
3101  *	@adapter: adapter to read
3102  *	@addr: EEPROM virtual address
3103  *	@data: where to store the read data
3104  *
3105  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
3106  *	VPD capability.  Note that this function must be called with a virtual
3107  *	address.
3108  */
t4_seeprom_read(struct adapter * adapter,u32 addr,u32 * data)3109 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3110 {
3111 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3112 	int ret;
3113 
3114 	/*
3115 	 * VPD Accesses must alway be 4-byte aligned!
3116 	 */
3117 	if (addr >= EEPROMVSIZE || (addr & 3))
3118 		return -EINVAL;
3119 
3120 	/*
3121 	 * Wait for any previous operation which may still be in flight to
3122 	 * complete.
3123 	 */
3124 	ret = t4_seeprom_wait(adapter);
3125 	if (ret) {
3126 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3127 		return ret;
3128 	}
3129 
3130 	/*
3131 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3132 	 * for our request to complete.  If it doesn't complete, note the
3133 	 * error and return it to our caller.  Note that we do not reset the
3134 	 * VPD Busy status!
3135 	 */
3136 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3137 	adapter->vpd_busy = 1;
3138 	adapter->vpd_flag = PCI_VPD_ADDR_F;
3139 	ret = t4_seeprom_wait(adapter);
3140 	if (ret) {
3141 		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3142 		return ret;
3143 	}
3144 
3145 	/*
3146 	 * Grab the returned data, swizzle it into our endianess and
3147 	 * return success.
3148 	 */
3149 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3150 	*data = le32_to_cpu(*data);
3151 	return 0;
3152 }
3153 
3154 /**
3155  *	t4_seeprom_write - write a serial EEPROM location
3156  *	@adapter: adapter to write
3157  *	@addr: virtual EEPROM address
3158  *	@data: value to write
3159  *
3160  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
3161  *	VPD capability.  Note that this function must be called with a virtual
3162  *	address.
3163  */
t4_seeprom_write(struct adapter * adapter,u32 addr,u32 data)3164 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3165 {
3166 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3167 	int ret;
3168 	u32 stats_reg;
3169 	int max_poll;
3170 
3171 	/*
3172 	 * VPD Accesses must alway be 4-byte aligned!
3173 	 */
3174 	if (addr >= EEPROMVSIZE || (addr & 3))
3175 		return -EINVAL;
3176 
3177 	/*
3178 	 * Wait for any previous operation which may still be in flight to
3179 	 * complete.
3180 	 */
3181 	ret = t4_seeprom_wait(adapter);
3182 	if (ret) {
3183 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3184 		return ret;
3185 	}
3186 
3187 	/*
3188 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3189 	 * for our request to complete.  If it doesn't complete, note the
3190 	 * error and return it to our caller.  Note that we do not reset the
3191 	 * VPD Busy status!
3192 	 */
3193 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3194 				 cpu_to_le32(data));
3195 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3196 				 (u16)addr | PCI_VPD_ADDR_F);
3197 	adapter->vpd_busy = 1;
3198 	adapter->vpd_flag = 0;
3199 	ret = t4_seeprom_wait(adapter);
3200 	if (ret) {
3201 		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3202 		return ret;
3203 	}
3204 
3205 	/*
3206 	 * Reset PCI_VPD_DATA register after a transaction and wait for our
3207 	 * request to complete. If it doesn't complete, return error.
3208 	 */
3209 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3210 	max_poll = EEPROM_MAX_POLL;
3211 	do {
3212 		udelay(EEPROM_DELAY);
3213 		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3214 	} while ((stats_reg & 0x1) && --max_poll);
3215 	if (!max_poll)
3216 		return -ETIMEDOUT;
3217 
3218 	/* Return success! */
3219 	return 0;
3220 }
3221 
3222 /**
3223  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
3224  *	@phys_addr: the physical EEPROM address
3225  *	@fn: the PCI function number
3226  *	@sz: size of function-specific area
3227  *
3228  *	Translate a physical EEPROM address to virtual.  The first 1K is
3229  *	accessed through virtual addresses starting at 31K, the rest is
3230  *	accessed through virtual addresses starting at 0.
3231  *
3232  *	The mapping is as follows:
3233  *	[0..1K) -> [31K..32K)
3234  *	[1K..1K+A) -> [ES-A..ES)
3235  *	[1K+A..ES) -> [0..ES-A-1K)
3236  *
3237  *	where A = @fn * @sz, and ES = EEPROM size.
3238  */
t4_eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)3239 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3240 {
3241 	fn *= sz;
3242 	if (phys_addr < 1024)
3243 		return phys_addr + (31 << 10);
3244 	if (phys_addr < 1024 + fn)
3245 		return EEPROMSIZE - fn + phys_addr - 1024;
3246 	if (phys_addr < EEPROMSIZE)
3247 		return phys_addr - 1024 - fn;
3248 	return -EINVAL;
3249 }
3250 
3251 /**
3252  *	t4_seeprom_wp - enable/disable EEPROM write protection
3253  *	@adapter: the adapter
3254  *	@enable: whether to enable or disable write protection
3255  *
3256  *	Enables or disables write protection on the serial EEPROM.
3257  */
t4_seeprom_wp(struct adapter * adapter,int enable)3258 int t4_seeprom_wp(struct adapter *adapter, int enable)
3259 {
3260 	return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3261 }
3262 
3263 /**
3264  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
3265  *	@v: Pointer to buffered vpd data structure
3266  *	@kw: The keyword to search for
3267  *
3268  *	Returns the value of the information field keyword or
3269  *	-ENOENT otherwise.
3270  */
get_vpd_keyword_val(const struct t4_vpd_hdr * v,const char * kw)3271 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
3272 {
3273 	int i;
3274 	unsigned int offset , len;
3275 	const u8 *buf = (const u8 *)v;
3276 	const u8 *vpdr_len = &v->vpdr_len[0];
3277 	offset = sizeof(struct t4_vpd_hdr);
3278 	len =  (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
3279 
3280 	if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
3281 		return -ENOENT;
3282 	}
3283 
3284 	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3285 		if(memcmp(buf + i , kw , 2) == 0){
3286 			i += VPD_INFO_FLD_HDR_SIZE;
3287 			return i;
3288 		}
3289 
3290 		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
3291 	}
3292 
3293 	return -ENOENT;
3294 }
3295 
3296 /*
3297  * str_strip
3298  * Removes trailing whitespaces from string "s"
3299  * Based on strstrip() implementation in string.c
3300  */
str_strip(char * s)3301 static void str_strip(char *s)
3302 {
3303 	size_t size;
3304 	char *end;
3305 
3306 	size = strlen(s);
3307 	if (!size)
3308 		return;
3309 
3310 	end = s + size - 1;
3311 	while (end >= s && isspace(*end))
3312 		end--;
3313 	*(end + 1) = '\0';
3314 }
3315 
3316 /**
3317  *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
3318  *	@adapter: adapter to read
3319  *	@p: where to store the parameters
3320  *
3321  *	Reads card parameters stored in VPD EEPROM.
3322  */
t4_get_raw_vpd_params(struct adapter * adapter,struct vpd_params * p)3323 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
3324 {
3325 	int i, ret = 0, addr;
3326 	int ec, sn, pn, na;
3327 	u8 *vpd, csum;
3328 	const struct t4_vpd_hdr *v;
3329 
3330 	vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN);
3331 	if (!vpd)
3332 		return -ENOMEM;
3333 
3334 	/* We have two VPD data structures stored in the adapter VPD area.
3335 	 * By default, Linux calculates the size of the VPD area by traversing
3336 	 * the first VPD area at offset 0x0, so we need to tell the OS what
3337 	 * our real VPD size is.
3338 	 */
3339 	ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE);
3340 	if (ret < 0)
3341 		goto out;
3342 
3343 	/* Card information normally starts at VPD_BASE but early cards had
3344 	 * it at 0.
3345 	 */
3346 	ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd));
3347 	if (ret)
3348 		goto out;
3349 
3350 	/* The VPD shall have a unique identifier specified by the PCI SIG.
3351 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3352 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3353 	 * is expected to automatically put this entry at the
3354 	 * beginning of the VPD.
3355 	 */
3356 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3357 
3358 	for (i = 0; i < VPD_LEN; i += 4) {
3359 		ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i));
3360 		if (ret)
3361 			goto out;
3362 	}
3363  	v = (const struct t4_vpd_hdr *)vpd;
3364 
3365 #define FIND_VPD_KW(var,name) do { \
3366 	var = get_vpd_keyword_val(v , name); \
3367 	if (var < 0) { \
3368 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3369 		ret = -EINVAL; \
3370 		goto out;      \
3371 	} \
3372 } while (0)
3373 
3374 	FIND_VPD_KW(i, "RV");
3375 	for (csum = 0; i >= 0; i--)
3376 		csum += vpd[i];
3377 
3378 	if (csum) {
3379 		CH_ERR(adapter,
3380 			"corrupted VPD EEPROM, actual csum %u\n", csum);
3381 		ret = -EINVAL;
3382 		goto out;
3383 	}
3384 
3385 	FIND_VPD_KW(ec, "EC");
3386 	FIND_VPD_KW(sn, "SN");
3387 	FIND_VPD_KW(pn, "PN");
3388 	FIND_VPD_KW(na, "NA");
3389 #undef FIND_VPD_KW
3390 
3391 	memcpy(p->id, v->id_data, ID_LEN);
3392 	str_strip((char *)p->id);
3393 	memcpy(p->ec, vpd + ec, EC_LEN);
3394 	str_strip((char *)p->ec);
3395 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3396 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3397 	str_strip((char *)p->sn);
3398 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3399 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3400 	str_strip((char *)p->pn);
3401 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3402 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3403 	str_strip((char *)p->na);
3404 
3405 out:
3406 	kmem_free(vpd, sizeof(u8) * VPD_LEN);
3407 	return ret < 0 ? ret : 0;
3408 }
3409 
3410 /**
3411  *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
3412  *	@adapter: adapter to read
3413  *	@p: where to store the parameters
3414  *
3415  *	Reads card parameters stored in VPD EEPROM and retrieves the Core
3416  *	Clock.  This can only be called after a connection to the firmware
3417  *	is established.
3418  */
t4_get_vpd_params(struct adapter * adapter,struct vpd_params * p)3419 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
3420 {
3421 	u32 cclk_param, cclk_val;
3422 	int ret;
3423 
3424 	/*
3425 	 * Grab the raw VPD parameters.
3426 	 */
3427 	ret = t4_get_raw_vpd_params(adapter, p);
3428 	if (ret)
3429 		return ret;
3430 
3431 	/*
3432 	 * Ask firmware for the Core Clock since it knows how to translate the
3433 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
3434 	 */
3435 	cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3436 		      V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
3437 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3438 			      1, &cclk_param, &cclk_val);
3439 
3440 	if (ret)
3441 		return ret;
3442 	p->cclk = cclk_val;
3443 
3444 	return 0;
3445 }
3446 
3447 /**
3448  *	t4_get_pfres - retrieve VF resource limits
3449  *	@adapter: the adapter
3450  *
3451  *	Retrieves configured resource limits and capabilities for a physical
3452  *	function.  The results are stored in @adapter->pfres.
3453  */
t4_get_pfres(struct adapter * adapter)3454 int t4_get_pfres(struct adapter *adapter)
3455 {
3456 	struct pf_resources *pfres = &adapter->params.pfres;
3457 	struct fw_pfvf_cmd cmd, rpl;
3458 	int v;
3459 	u32 word;
3460 
3461 	/*
3462 	 * Execute PFVF Read command to get VF resource limits; bail out early
3463 	 * with error on command failure.
3464 	 */
3465 	memset(&cmd, 0, sizeof(cmd));
3466 	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
3467 				    F_FW_CMD_REQUEST |
3468 				    F_FW_CMD_READ |
3469 				    V_FW_PFVF_CMD_PFN(adapter->pf) |
3470 				    V_FW_PFVF_CMD_VFN(0));
3471 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3472 	v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
3473 	if (v != FW_SUCCESS)
3474 		return v;
3475 
3476 	/*
3477 	 * Extract PF resource limits and return success.
3478 	 */
3479 	word = be32_to_cpu(rpl.niqflint_niq);
3480 	pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
3481 
3482 	word = be32_to_cpu(rpl.type_to_neq);
3483 	pfres->neq = G_FW_PFVF_CMD_NEQ(word);
3484 	pfres->pmask = G_FW_PFVF_CMD_PMASK(word);
3485 
3486 	word = be32_to_cpu(rpl.tc_to_nexactf);
3487 	pfres->tc = G_FW_PFVF_CMD_TC(word);
3488 	pfres->nvi = G_FW_PFVF_CMD_NVI(word);
3489 	pfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
3490 
3491 	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
3492 	pfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
3493 	pfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
3494 	pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
3495 
3496 	return 0;
3497 }
3498 
3499 /* serial flash and firmware constants and flash config file constants */
3500 enum {
3501 	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3502 
3503 	/* flash command opcodes */
3504 	SF_PROG_PAGE    = 2,	/* program page */
3505 	SF_WR_DISABLE   = 4,	/* disable writes */
3506 	SF_RD_STATUS    = 5,	/* read status register */
3507 	SF_WR_ENABLE    = 6,	/* enable writes */
3508 	SF_RD_DATA_FAST = 0xb,	/* read flash */
3509 	SF_RD_ID	= 0x9f,	/* read ID */
3510 	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
3511 };
3512 
3513 /**
3514  *	sf1_read - read data from the serial flash
3515  *	@adapter: the adapter
3516  *	@byte_cnt: number of bytes to read
3517  *	@cont: whether another operation will be chained
3518  *	@lock: whether to lock SF for PL access only
3519  *	@valp: where to store the read data
3520  *
3521  *	Reads up to 4 bytes of data from the serial flash.  The location of
3522  *	the read needs to be specified prior to calling this by issuing the
3523  *	appropriate commands to the serial flash.
3524  */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)3525 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3526 		    int lock, u32 *valp)
3527 {
3528 	int ret;
3529 
3530 	if (!byte_cnt || byte_cnt > 4)
3531 		return -EINVAL;
3532 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3533 		return -EBUSY;
3534 	t4_write_reg(adapter, A_SF_OP,
3535 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3536 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3537 	if (!ret)
3538 		*valp = t4_read_reg(adapter, A_SF_DATA);
3539 	return ret;
3540 }
3541 
3542 /**
3543  *	sf1_write - write data to the serial flash
3544  *	@adapter: the adapter
3545  *	@byte_cnt: number of bytes to write
3546  *	@cont: whether another operation will be chained
3547  *	@lock: whether to lock SF for PL access only
3548  *	@val: value to write
3549  *
3550  *	Writes up to 4 bytes of data to the serial flash.  The location of
3551  *	the write needs to be specified prior to calling this by issuing the
3552  *	appropriate commands to the serial flash.
3553  */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)3554 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3555 		     int lock, u32 val)
3556 {
3557 	if (!byte_cnt || byte_cnt > 4)
3558 		return -EINVAL;
3559 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3560 		return -EBUSY;
3561 	t4_write_reg(adapter, A_SF_DATA, val);
3562 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3563 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3564 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3565 }
3566 
3567 /**
3568  *	flash_wait_op - wait for a flash operation to complete
3569  *	@adapter: the adapter
3570  *	@attempts: max number of polls of the status register
3571  *	@delay: delay between polls in ms
3572  *
3573  *	Wait for a flash operation to complete by polling the status register.
3574  */
flash_wait_op(struct adapter * adapter,int attempts,int ch_delay)3575 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay)
3576 {
3577 	int ret;
3578 	u32 status;
3579 
3580 	while (1) {
3581 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3582 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3583 			return ret;
3584 		if (!(status & 1))
3585 			return 0;
3586 		if (--attempts == 0)
3587 			return -EAGAIN;
3588 		if (ch_delay) {
3589 #ifdef CONFIG_CUDBG
3590 			if (adapter->flags & K_CRASH)
3591 				mdelay(ch_delay);
3592 			else
3593 #endif
3594 				msleep(ch_delay);
3595 		}
3596 	}
3597 }
3598 
3599 /**
3600  *	t4_read_flash - read words from serial flash
3601  *	@adapter: the adapter
3602  *	@addr: the start address for the read
3603  *	@nwords: how many 32-bit words to read
3604  *	@data: where to store the read data
3605  *	@byte_oriented: whether to store data as bytes or as words
3606  *
3607  *	Read the specified number of 32-bit words from the serial flash.
3608  *	If @byte_oriented is set the read data is stored as a byte array
3609  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3610  *	natural endianness.
3611  */
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)3612 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3613 		  unsigned int nwords, u32 *data, int byte_oriented)
3614 {
3615 	int ret;
3616 
3617 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3618 		return -EINVAL;
3619 
3620 	addr = swab32(addr) | SF_RD_DATA_FAST;
3621 
3622 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3623 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3624 		return ret;
3625 
3626 	for ( ; nwords; nwords--, data++) {
3627 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3628 		if (nwords == 1)
3629 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3630 		if (ret)
3631 			return ret;
3632 		if (byte_oriented)
3633 			*data = (__force __u32)(cpu_to_be32(*data));
3634 	}
3635 	return 0;
3636 }
3637 
3638 /**
3639  *	t4_write_flash - write up to a page of data to the serial flash
3640  *	@adapter: the adapter
3641  *	@addr: the start address to write
3642  *	@n: length of data to write in bytes
3643  *	@data: the data to write
3644  *	@byte_oriented: whether to store data as bytes or as words
3645  *
3646  *	Writes up to a page of data (256 bytes) to the serial flash starting
3647  *	at the given address.  All the data must be written to the same page.
3648  *	If @byte_oriented is set the write data is stored as byte stream
3649  *	(i.e. matches what on disk), otherwise in big-endian.
3650  */
t4_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data,int byte_oriented)3651 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3652 			  unsigned int n, const u8 *data, int byte_oriented)
3653 {
3654 	int ret;
3655 	u32 buf[64];
3656 	unsigned int i, c, left, val, offset = addr & 0xff;
3657 
3658 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3659 		return -EINVAL;
3660 
3661 	val = swab32(addr) | SF_PROG_PAGE;
3662 
3663 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3664 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3665 		goto unlock;
3666 
3667 	for (left = n; left; left -= c) {
3668 		c = min(left, 4U);
3669 		for (val = 0, i = 0; i < c; ++i)
3670 			val = (val << 8) + *data++;
3671 
3672 		if (!byte_oriented)
3673 			val = cpu_to_be32(val);
3674 
3675 		ret = sf1_write(adapter, c, c != left, 1, val);
3676 		if (ret)
3677 			goto unlock;
3678 	}
3679 	ret = flash_wait_op(adapter, 8, 1);
3680 	if (ret)
3681 		goto unlock;
3682 
3683 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3684 
3685 	/* Read the page to verify the write succeeded */
3686 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3687 			    byte_oriented);
3688 	if (ret)
3689 		return ret;
3690 
3691 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3692 		CH_ERR(adapter,
3693 			"failed to correctly write the flash page at %#x\n",
3694 			addr);
3695 		return -EIO;
3696 	}
3697 	return 0;
3698 
3699 unlock:
3700 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3701 	return ret;
3702 }
3703 
3704 /**
3705  *	t4_get_fw_version - read the firmware version
3706  *	@adapter: the adapter
3707  *	@vers: where to place the version
3708  *
3709  *	Reads the FW version from flash.
3710  */
t4_get_fw_version(struct adapter * adapter,u32 * vers)3711 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3712 {
3713 	return t4_read_flash(adapter, FLASH_FW_START +
3714 			     offsetof(struct fw_hdr, fw_ver), 1,
3715 			     vers, 0);
3716 }
3717 
3718 /**
3719  *	t4_get_bs_version - read the firmware bootstrap version
3720  *	@adapter: the adapter
3721  *	@vers: where to place the version
3722  *
3723  *	Reads the FW Bootstrap version from flash.
3724  */
t4_get_bs_version(struct adapter * adapter,u32 * vers)3725 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3726 {
3727 	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3728 			     offsetof(struct fw_hdr, fw_ver), 1,
3729 			     vers, 0);
3730 }
3731 
3732 /**
3733  *	t4_get_tp_version - read the TP microcode version
3734  *	@adapter: the adapter
3735  *	@vers: where to place the version
3736  *
3737  *	Reads the TP microcode version from flash.
3738  */
t4_get_tp_version(struct adapter * adapter,u32 * vers)3739 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3740 {
3741 	return t4_read_flash(adapter, FLASH_FW_START +
3742 			     offsetof(struct fw_hdr, tp_microcode_ver),
3743 			     1, vers, 0);
3744 }
3745 
3746 /**
3747  *	t4_get_exprom_version - return the Expansion ROM version (if any)
3748  *	@adapter: the adapter
3749  *	@vers: where to place the version
3750  *
3751  *	Reads the Expansion ROM header from FLASH and returns the version
3752  *	number (if present) through the @vers return value pointer.  We return
3753  *	this in the Firmware Version Format since it's convenient.  Return
3754  *	0 on success, -ENOENT if no Expansion ROM is present.
3755  */
t4_get_exprom_version(struct adapter * adapter,u32 * vers)3756 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3757 {
3758 	struct exprom_header {
3759 		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3760 		unsigned char hdr_ver[4];	/* Expansion ROM version */
3761 	} *hdr;
3762 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3763 					   sizeof(u32))];
3764 	int ret;
3765 
3766 	ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3767 			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3768 			    0);
3769 	if (ret)
3770 		return ret;
3771 
3772 	hdr = (struct exprom_header *)exprom_header_buf;
3773 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3774 		return -ENOENT;
3775 
3776 	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3777 		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3778 		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3779 		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3780 	return 0;
3781 }
3782 
3783 /**
3784  *	t4_get_scfg_version - return the Serial Configuration version
3785  *	@adapter: the adapter
3786  *	@vers: where to place the version
3787  *
3788  *	Reads the Serial Configuration Version via the Firmware interface
3789  *	(thus this can only be called once we're ready to issue Firmware
3790  *	commands).  The format of the Serial Configuration version is
3791  *	adapter specific.  Returns 0 on success, an error on failure.
3792  *
3793  *	Note that early versions of the Firmware didn't include the ability
3794  *	to retrieve the Serial Configuration version, so we zero-out the
3795  *	return-value parameter in that case to avoid leaving it with
3796  *	garbage in it.
3797  *
3798  *	Also note that the Firmware will return its cached copy of the Serial
3799  *	Initialization Revision ID, not the actual Revision ID as written in
3800  *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3801  *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3802  *	it's best to defer calling this routine till after a FW_RESET_CMD has
3803  *	been issued if the Host Driver will be performing a full adapter
3804  *	initialization.
3805  */
t4_get_scfg_version(struct adapter * adapter,u32 * vers)3806 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3807 {
3808 	u32 scfgrev_param;
3809 	int ret;
3810 
3811 	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3812 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3813 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3814 			      1, &scfgrev_param, vers);
3815 	if (ret)
3816 		*vers = 0;
3817 	return ret;
3818 }
3819 
3820 /**
3821  *	t4_get_vpd_version - return the VPD version
3822  *	@adapter: the adapter
3823  *	@vers: where to place the version
3824  *
3825  *	Reads the VPD via the Firmware interface (thus this can only be called
3826  *	once we're ready to issue Firmware commands).  The format of the
3827  *	VPD version is adapter specific.  Returns 0 on success, an error on
3828  *	failure.
3829  *
3830  *	Note that early versions of the Firmware didn't include the ability
3831  *	to retrieve the VPD version, so we zero-out the return-value parameter
3832  *	in that case to avoid leaving it with garbage in it.
3833  *
3834  *	Also note that the Firmware will return its cached copy of the VPD
3835  *	Revision ID, not the actual Revision ID as written in the Serial
3836  *	EEPROM.  This is only an issue if a new VPD has been written and the
3837  *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3838  *	to defer calling this routine till after a FW_RESET_CMD has been issued
3839  *	if the Host Driver will be performing a full adapter initialization.
3840  */
t4_get_vpd_version(struct adapter * adapter,u32 * vers)3841 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3842 {
3843 	u32 vpdrev_param;
3844 	int ret;
3845 
3846 	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3847 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3848 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3849 			      1, &vpdrev_param, vers);
3850 	if (ret)
3851 		*vers = 0;
3852 	return ret;
3853 }
3854 
3855 /**
3856  *	t4_get_version_info - extract various chip/firmware version information
3857  *	@adapter: the adapter
3858  *
3859  *	Reads various chip/firmware version numbers and stores them into the
3860  *	adapter Adapter Parameters structure.  If any of the efforts fails
3861  *	the first failure will be returned, but all of the version numbers
3862  *	will be read.
3863  */
t4_get_version_info(struct adapter * adapter)3864 int t4_get_version_info(struct adapter *adapter)
3865 {
3866 	int ret = 0;
3867 
3868 	#define FIRST_RET(__getvinfo) \
3869 	do { \
3870 		int __ret = __getvinfo; \
3871 		if (__ret && !ret) \
3872 			ret = __ret; \
3873 	} while (0)
3874 
3875 	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3876 	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3877 	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3878 	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3879 	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3880 	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3881 
3882 	#undef FIRST_RET
3883 
3884 	return ret;
3885 }
3886 
3887 /**
3888  *	t4_dump_version_info - dump all of the adapter configuration IDs
3889  *	@adapter: the adapter
3890  *
3891  *	Dumps all of the various bits of adapter configuration version/revision
3892  *	IDs information.  This is typically called at some point after
3893  *	t4_get_version_info() has been called.
3894  */
t4_dump_version_info(struct adapter * adapter)3895 void t4_dump_version_info(struct adapter *adapter)
3896 {
3897 	/*
3898 	 * Device information.
3899 	 */
3900 	CH_INFO(adapter, "Chelsio %s rev %d\n",
3901 		adapter->params.vpd.id,
3902 		CHELSIO_CHIP_RELEASE(adapter->params.chip));
3903 	CH_INFO(adapter, "S/N: %s, P/N: %s\n",
3904 		adapter->params.vpd.sn,
3905 		adapter->params.vpd.pn);
3906 
3907 	/*
3908 	 * Firmware Version.
3909 	 */
3910 	if (!adapter->params.fw_vers)
3911 		CH_WARN(adapter, "No firmware loaded\n");
3912 	else
3913 		CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n",
3914 			G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
3915 			G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
3916 			G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
3917 			G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
3918 
3919 	/*
3920 	 * Bootstrap Firmware Version.  (Some adapters don't have Bootstrap
3921 	 * Firmware, so dev_info() is more appropriate here.)
3922 	 */
3923 	if (!adapter->params.bs_vers)
3924 		CH_INFO(adapter, "No bootstrap loaded\n");
3925 	else
3926 		CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n",
3927 			G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
3928 			G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
3929 			G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
3930 			G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
3931 
3932 	/*
3933 	 * TP Microcode Version.
3934 	 */
3935 	if (!adapter->params.tp_vers)
3936 		CH_WARN(adapter, "No TP Microcode loaded\n");
3937 	else
3938 		CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n",
3939 			G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
3940 			G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
3941 			G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
3942 			G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
3943 
3944 	/*
3945 	 * Expansion ROM version.
3946 	 */
3947 	if (!adapter->params.er_vers)
3948 		CH_INFO(adapter, "No Expansion ROM loaded\n");
3949 	else
3950 		CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
3951 			G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
3952 			G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
3953 			G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
3954 			G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
3955 
3956 
3957 	/*
3958 	 * Serial Configuration version.
3959 	 */
3960 	CH_INFO(adapter, "Serial Configuration version: %x\n",
3961 		adapter->params.scfg_vers);
3962 
3963 	/*
3964 	 * VPD  version.
3965 	 */
3966 	CH_INFO(adapter, "VPD version: %x\n",
3967 		adapter->params.vpd_vers);
3968 }
3969 
3970 /**
3971  *	t4_check_fw_version - check if the FW is supported with this driver
3972  *	@adap: the adapter
3973  *
3974  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
3975  *	if there's exact match, a negative error if the version could not be
3976  *	read or there's a major version mismatch
3977  */
t4_check_fw_version(struct adapter * adap)3978 int t4_check_fw_version(struct adapter *adap)
3979 {
3980 	int ret, major, minor, micro;
3981 	int exp_major, exp_minor, exp_micro;
3982 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3983 
3984 	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3985 	if (ret)
3986 		return ret;
3987 
3988 	major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers);
3989 	minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers);
3990 	micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers);
3991 
3992 	switch (chip_version) {
3993 	case CHELSIO_T4:
3994 		exp_major = T4FW_MIN_VERSION_MAJOR;
3995 		exp_minor = T4FW_MIN_VERSION_MINOR;
3996 		exp_micro = T4FW_MIN_VERSION_MICRO;
3997 		break;
3998 	case CHELSIO_T5:
3999 		exp_major = T5FW_MIN_VERSION_MAJOR;
4000 		exp_minor = T5FW_MIN_VERSION_MINOR;
4001 		exp_micro = T5FW_MIN_VERSION_MICRO;
4002 		break;
4003 	case CHELSIO_T6:
4004 		exp_major = T6FW_MIN_VERSION_MAJOR;
4005 		exp_minor = T6FW_MIN_VERSION_MINOR;
4006 		exp_micro = T6FW_MIN_VERSION_MICRO;
4007 		break;
4008 	default:
4009 		CH_ERR(adap, "Unsupported chip type, %x\n",
4010 			adap->params.chip);
4011 		return -EINVAL;
4012 	}
4013 
4014 	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
4015 	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
4016 		CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum "
4017 			"supported firmware is %u.%u.%u.\n", major, minor,
4018 			micro, exp_major, exp_minor, exp_micro);
4019 		return -EFAULT;
4020 	}
4021 	return 0;
4022 }
4023 
4024 /* Is the given firmware API compatible with the one the driver was compiled
4025  * with?
4026  */
fw_compatible(const struct fw_hdr * hdr1,const struct fw_hdr * hdr2)4027 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
4028 {
4029 
4030 	/* short circuit if it's the exact same firmware version */
4031 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
4032 		return 1;
4033 
4034 	/*
4035 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
4036 	 * features that are supported in the driver.
4037 	 */
4038 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
4039 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
4040 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
4041 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
4042 		return 1;
4043 #undef SAME_INTF
4044 
4045 	return 0;
4046 }
4047 
4048 /* The firmware in the filesystem is usable, but should it be installed?
4049  * This routine explains itself in detail if it indicates the filesystem
4050  * firmware should be installed.
4051  */
should_install_fs_fw(struct adapter * adap,int card_fw_usable,int k,int c,int t4_fw_install)4052 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
4053 				int k, int c, int t4_fw_install)
4054 {
4055 	const char *reason;
4056 
4057 	if (!card_fw_usable) {
4058 		reason = "incompatible or unusable";
4059 		goto install;
4060 	}
4061 
4062 	if (k > c) {
4063 		reason = "older than the version bundled with this driver";
4064 		goto install;
4065 	}
4066 
4067 	if (t4_fw_install == 2 && k != c) {
4068 		reason = "different than the version bundled with this driver";
4069 		goto install;
4070 	}
4071 
4072 	return 0;
4073 
4074 install:
4075 	if (t4_fw_install == 0) {
4076 		CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
4077 		       "but the driver is prohibited from installing a "
4078 		       "different firmware on the card.\n",
4079 		       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4080 		       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4081 		       reason);
4082 
4083 		return (0);
4084 	}
4085 
4086 	CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
4087 	       "installing firmware %u.%u.%u.%u on card.\n",
4088 	       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4089 	       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
4090 	       G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4091 	       G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4092 
4093 	return 1;
4094 }
4095 
t4_prep_fw(struct adapter * adap,struct fw_info * fw_info,const u8 * fw_data,unsigned int fw_size,struct fw_hdr * card_fw,const int t4_fw_install,enum dev_state state,int * reset)4096 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
4097 	       const u8 *fw_data, unsigned int fw_size,
4098 	       struct fw_hdr *card_fw, const int t4_fw_install,
4099 	       enum dev_state state, int *reset)
4100 {
4101 	int ret, card_fw_usable, fs_fw_usable;
4102 	const struct fw_hdr *fs_fw;
4103 	const struct fw_hdr *drv_fw;
4104 
4105 	drv_fw = &fw_info->fw_hdr;
4106 
4107 	/* Read the header of the firmware on the card */
4108 	ret = -t4_read_flash(adap, FLASH_FW_START,
4109 			    sizeof(*card_fw) / sizeof(uint32_t),
4110 			    (uint32_t *)card_fw, 1);
4111 	if (ret == 0) {
4112 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
4113 	} else {
4114 		CH_ERR(adap,
4115 			"Unable to read card's firmware header: %d\n", ret);
4116 		card_fw_usable = 0;
4117 	}
4118 
4119 	if (fw_data != NULL) {
4120 		fs_fw = (const void *)fw_data;
4121 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
4122 	} else {
4123 		fs_fw = NULL;
4124 		fs_fw_usable = 0;
4125 	}
4126 
4127 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
4128 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
4129 		/* Common case: the firmware on the card is an exact match and
4130 		 * the filesystem one is an exact match too, or the filesystem
4131 		 * one is absent/incompatible.  Note that t4_fw_install = 2
4132 		 * is ignored here -- use cxgbtool loadfw if you want to
4133 		 * reinstall the same firmware as the one on the card.
4134 		 */
4135 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
4136 		   should_install_fs_fw(adap, card_fw_usable,
4137 					be32_to_cpu(fs_fw->fw_ver),
4138 					be32_to_cpu(card_fw->fw_ver),
4139 					 t4_fw_install)) {
4140 
4141 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
4142 				     fw_size, 0);
4143 		if (ret != 0) {
4144 			CH_ERR(adap,
4145 				"failed to install firmware: %d\n", ret);
4146 			goto bye;
4147 		}
4148 
4149 		/* Installed successfully, update cached information */
4150 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
4151 		(void)t4_init_devlog_params(adap, 1);
4152 		card_fw_usable = 1;
4153 		*reset = 0;	/* already reset as part of load_fw */
4154 	}
4155 
4156 	if (!card_fw_usable) {
4157 		uint32_t d, c, k;
4158 
4159 		d = be32_to_cpu(drv_fw->fw_ver);
4160 		c = be32_to_cpu(card_fw->fw_ver);
4161 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
4162 
4163 		CH_ERR(adap, "Cannot find a usable firmware: "
4164 			"fw_install %d, chip state %d, "
4165 			"driver compiled with %d.%d.%d.%d, "
4166 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
4167 			t4_fw_install, state,
4168 			G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4169 			G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
4170 			G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4171 			G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4172 			G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4173 			G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4174 		ret = EINVAL;
4175 		goto bye;
4176 	}
4177 
4178 	/* We're using whatever's on the card and it's known to be good. */
4179 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
4180 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
4181 
4182 bye:
4183 	return ret;
4184 
4185 }
4186 
4187 /**
4188  *	t4_flash_erase_sectors - erase a range of flash sectors
4189  *	@adapter: the adapter
4190  *	@start: the first sector to erase
4191  *	@end: the last sector to erase
4192  *
4193  *	Erases the sectors in the given inclusive range.
4194  */
t4_flash_erase_sectors(struct adapter * adapter,int start,int end)4195 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4196 {
4197 	int ret = 0;
4198 
4199 	if (end >= adapter->params.sf_nsec)
4200 		return -EINVAL;
4201 
4202 	while (start <= end) {
4203 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4204 		    (ret = sf1_write(adapter, 4, 0, 1,
4205 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
4206 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4207 			CH_ERR(adapter,
4208 				"erase of flash sector %d failed, error %d\n",
4209 				start, ret);
4210 			break;
4211 		}
4212 		start++;
4213 	}
4214 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
4215 	return ret;
4216 }
4217 
4218 /**
4219  *	t4_flash_cfg_addr - return the address of the flash configuration file
4220  *	@adapter: the adapter
4221  *
4222  *	Return the address within the flash where the Firmware Configuration
4223  *	File is stored, or an error if the device FLASH is too small to contain
4224  *	a Firmware Configuration File.
4225  */
t4_flash_cfg_addr(struct adapter * adapter)4226 int t4_flash_cfg_addr(struct adapter *adapter)
4227 {
4228 	/*
4229 	 * If the device FLASH isn't large enough to hold a Firmware
4230 	 * Configuration File, return an error.
4231 	 */
4232 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
4233 		return -ENOSPC;
4234 
4235 	return FLASH_CFG_START;
4236 }
4237 
4238 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
4239  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
4240  * and emit an error message for mismatched firmware to save our caller the
4241  * effort ...
4242  */
t4_fw_matches_chip(const struct adapter * adap,const struct fw_hdr * hdr)4243 static int t4_fw_matches_chip(const struct adapter *adap,
4244 			      const struct fw_hdr *hdr)
4245 {
4246 	/*
4247 	 * The expression below will return FALSE for any unsupported adapter
4248 	 * which will keep us "honest" in the future ...
4249 	 */
4250 	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
4251 	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
4252 	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
4253 		return 1;
4254 
4255 	CH_ERR(adap,
4256 		"FW image (%d) is not suitable for this adapter (%d)\n",
4257 		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
4258 	return 0;
4259 }
4260 
4261 /**
4262  *	t4_load_fw - download firmware
4263  *	@adap: the adapter
4264  *	@fw_data: the firmware image to write
4265  *	@size: image size
4266  *	@bootstrap: indicates if the binary is a bootstrap fw
4267  *
4268  *	Write the supplied firmware image to the card's serial flash.
4269  */
t4_load_fw(struct adapter * adap,const u8 * fw_data,unsigned int size,unsigned int bootstrap)4270 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size,
4271 	       unsigned int bootstrap)
4272 {
4273 	u32 csum;
4274 	int ret, addr;
4275 	unsigned int i;
4276 	u8 first_page[SF_PAGE_SIZE];
4277 	const __be32 *p = (const __be32 *)fw_data;
4278 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4279 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
4280 	unsigned int fw_start_sec;
4281 	unsigned int fw_start;
4282 	unsigned int fw_size;
4283 
4284 	if (bootstrap) {
4285 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
4286 		fw_start = FLASH_FWBOOTSTRAP_START;
4287 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
4288 	} else {
4289 		fw_start_sec = FLASH_FW_START_SEC;
4290  		fw_start = FLASH_FW_START;
4291 		fw_size = FLASH_FW_MAX_SIZE;
4292 	}
4293 
4294 	if (!size) {
4295 		CH_ERR(adap, "FW image has no data\n");
4296 		return -EINVAL;
4297 	}
4298 	if (size & 511) {
4299 		CH_ERR(adap,
4300 			"FW image size not multiple of 512 bytes\n");
4301 		return -EINVAL;
4302 	}
4303 	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4304 		CH_ERR(adap,
4305 			"FW image size differs from size in FW header\n");
4306 		return -EINVAL;
4307 	}
4308 	if (size > fw_size) {
4309 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
4310 			fw_size);
4311 		return -EFBIG;
4312 	}
4313 	if (!t4_fw_matches_chip(adap, hdr))
4314 		return -EINVAL;
4315 
4316 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4317 		csum += be32_to_cpu(p[i]);
4318 
4319 	if (csum != 0xffffffff) {
4320 		CH_ERR(adap,
4321 			"corrupted firmware image, checksum %#x\n", csum);
4322 		return -EINVAL;
4323 	}
4324 
4325 	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
4326 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4327 	if (ret)
4328 		goto out;
4329 
4330 	/*
4331 	 * We write the correct version at the end so the driver can see a bad
4332 	 * version if the FW write fails.  Start by writing a copy of the
4333 	 * first page with a bad version.
4334 	 */
4335 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
4336 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4337 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4338 	if (ret)
4339 		goto out;
4340 
4341 	addr = fw_start;
4342 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4343 		addr += SF_PAGE_SIZE;
4344 		fw_data += SF_PAGE_SIZE;
4345 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4346 		if (ret)
4347 			goto out;
4348 	}
4349 
4350 	ret = t4_write_flash(adap,
4351 			     fw_start + offsetof(struct fw_hdr, fw_ver),
4352 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4353 out:
4354 	if (ret)
4355 		CH_ERR(adap, "firmware download failed, error %d\n",
4356 			ret);
4357 	else {
4358 		if (bootstrap)
4359 			ret = t4_get_bs_version(adap, &adap->params.bs_vers);
4360 		else
4361 			ret = t4_get_fw_version(adap, &adap->params.fw_vers);
4362 	}
4363 	return ret;
4364 }
4365 
4366 /**
4367  *	t4_phy_fw_ver - return current PHY firmware version
4368  *	@adap: the adapter
4369  *	@phy_fw_ver: return value buffer for PHY firmware version
4370  *
4371  *	Returns the current version of external PHY firmware on the
4372  *	adapter.
4373  */
t4_phy_fw_ver(struct adapter * adap,int * phy_fw_ver)4374 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
4375 {
4376 	u32 param, val;
4377 	int ret;
4378 
4379 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4380 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4381 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4382 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
4383 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4384 			      &param, &val);
4385 	if (ret < 0)
4386 		return ret;
4387 	*phy_fw_ver = val;
4388 	return 0;
4389 }
4390 
4391 /**
4392  *	t4_load_phy_fw - download port PHY firmware
4393  *	@adap: the adapter
4394  *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
4395  *	@lock: the lock to use to guard the memory copy
4396  *	@phy_fw_version: function to check PHY firmware versions
4397  *	@phy_fw_data: the PHY firmware image to write
4398  *	@phy_fw_size: image size
4399  *
4400  *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
4401  *	@phy_fw_version is supplied, then it will be used to determine if
4402  *	it's necessary to perform the transfer by comparing the version
4403  *	of any existing adapter PHY firmware with that of the passed in
4404  *	PHY firmware image.  If @lock is non-NULL then it will be used
4405  *	around the call to t4_memory_rw() which transfers the PHY firmware
4406  *	to the adapter.
4407  *
4408  *	A negative error number will be returned if an error occurs.  If
4409  *	version number support is available and there's no need to upgrade
4410  *	the firmware, 0 will be returned.  If firmware is successfully
4411  *	transferred to the adapter, 1 will be retured.
4412  *
4413  *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
4414  *	a result, a RESET of the adapter would cause that RAM to lose its
4415  *	contents.  Thus, loading PHY firmware on such adapters must happen after any
4416  *	FW_RESET_CMDs ...
4417  */
t4_load_phy_fw(struct adapter * adap,int win,t4_os_lock_t * lock,int (* phy_fw_version)(const u8 *,size_t),const u8 * phy_fw_data,size_t phy_fw_size)4418 int t4_load_phy_fw(struct adapter *adap,
4419 		   int win, t4_os_lock_t *lock,
4420 		   int (*phy_fw_version)(const u8 *, size_t),
4421 		   const u8 *phy_fw_data, size_t phy_fw_size)
4422 {
4423 	unsigned long mtype = 0, maddr = 0;
4424 	u32 param, val;
4425 	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
4426 	int ret;
4427 
4428 	/*
4429 	 * If we have version number support, then check to see if the adapter
4430 	 * already has up-to-date PHY firmware loaded.
4431 	 */
4432 	if (phy_fw_version) {
4433 		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
4434 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4435 		if (ret < 0)
4436 			return ret;;
4437 
4438 		if (cur_phy_fw_ver >= new_phy_fw_vers) {
4439 			CH_WARN(adap, "PHY Firmware already up-to-date, "
4440 				"version %#x\n", cur_phy_fw_ver);
4441 			return 0;
4442 		}
4443 	}
4444 
4445 	/*
4446 	 * Ask the firmware where it wants us to copy the PHY firmware image.
4447 	 * The size of the file requires a special version of the READ coommand
4448 	 * which will pass the file size via the values field in PARAMS_CMD and
4449 	 * retreive the return value from firmware and place it in the same
4450 	 * buffer values
4451 	 */
4452 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4453 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4454 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4455 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4456 	val = phy_fw_size;
4457 	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
4458 			      &param, &val, 1, true);
4459 	if (ret < 0)
4460 		return ret;
4461 	mtype = val >> 8;
4462 	maddr = (val & 0xff) << 16;
4463 
4464 	/*
4465 	 * Copy the supplied PHY Firmware image to the adapter memory location
4466 	 * allocated by the adapter firmware.
4467 	 */
4468 	if (lock)
4469 		t4_os_lock(lock);
4470 	ret = t4_memory_rw(adap, win, mtype, maddr,
4471 			   phy_fw_size, (__be32*)phy_fw_data,
4472 			   T4_MEMORY_WRITE);
4473 	if (lock)
4474 		t4_os_unlock(lock);
4475 	if (ret)
4476 		return ret;
4477 
4478 	/*
4479 	 * Tell the firmware that the PHY firmware image has been written to
4480 	 * RAM and it can now start copying it over to the PHYs.  The chip
4481 	 * firmware will RESET the affected PHYs as part of this operation
4482 	 * leaving them running the new PHY firmware image.
4483 	 */
4484 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4485 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4486 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4487 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4488 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
4489 				    &param, &val, 30000);
4490 
4491 	/*
4492 	 * If we have version number support, then check to see that the new
4493 	 * firmware got loaded properly.
4494 	 */
4495 	if (phy_fw_version) {
4496 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4497 		if (ret < 0)
4498 			return ret;
4499 
4500 		if (cur_phy_fw_ver != new_phy_fw_vers) {
4501 			CH_WARN(adap, "PHY Firmware did not update: "
4502 				"version on adapter %#x, "
4503 				"version flashed %#x\n",
4504 				cur_phy_fw_ver, new_phy_fw_vers);
4505 			return -ENXIO;
4506 		}
4507 	}
4508 
4509 	return 1;
4510 }
4511 
4512 /**
4513  *	t4_fwcache - firmware cache operation
4514  *	@adap: the adapter
4515  *	@op  : the operation (flush or flush and invalidate)
4516  */
t4_fwcache(struct adapter * adap,enum fw_params_param_dev_fwcache op)4517 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4518 {
4519 	struct fw_params_cmd c;
4520 
4521 	memset(&c, 0, sizeof(c));
4522 	c.op_to_vfn =
4523 	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4524 			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4525 				V_FW_PARAMS_CMD_PFN(adap->pf) |
4526 				V_FW_PARAMS_CMD_VFN(0));
4527 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4528 	c.param[0].mnem =
4529 	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4530 			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4531 	c.param[0].val = (__force __be32)op;
4532 
4533 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4534 }
4535 
t4_cim_read_pif_la(struct adapter * adap,u32 * pif_req,u32 * pif_rsp,unsigned int * pif_req_wrptr,unsigned int * pif_rsp_wrptr)4536 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4537 			unsigned int *pif_req_wrptr,
4538 			unsigned int *pif_rsp_wrptr)
4539 {
4540 	int i, j;
4541 	u32 cfg, val, req, rsp;
4542 
4543 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4544 	if (cfg & F_LADBGEN)
4545 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4546 
4547 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4548 	req = G_POLADBGWRPTR(val);
4549 	rsp = G_PILADBGWRPTR(val);
4550 	if (pif_req_wrptr)
4551 		*pif_req_wrptr = req;
4552 	if (pif_rsp_wrptr)
4553 		*pif_rsp_wrptr = rsp;
4554 
4555 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4556 		for (j = 0; j < 6; j++) {
4557 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4558 				     V_PILADBGRDPTR(rsp));
4559 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4560 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4561 			req++;
4562 			rsp++;
4563 		}
4564 		req = (req + 2) & M_POLADBGRDPTR;
4565 		rsp = (rsp + 2) & M_PILADBGRDPTR;
4566 	}
4567 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4568 }
4569 
t4_cim_read_ma_la(struct adapter * adap,u32 * ma_req,u32 * ma_rsp)4570 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4571 {
4572 	u32 cfg;
4573 	int i, j, idx;
4574 
4575 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4576 	if (cfg & F_LADBGEN)
4577 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4578 
4579 	for (i = 0; i < CIM_MALA_SIZE; i++) {
4580 		for (j = 0; j < 5; j++) {
4581 			idx = 8 * i + j;
4582 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4583 				     V_PILADBGRDPTR(idx));
4584 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4585 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4586 		}
4587 	}
4588 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4589 }
4590 
t4_ulprx_read_la(struct adapter * adap,u32 * la_buf)4591 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4592 {
4593 	unsigned int i, j;
4594 
4595 	for (i = 0; i < 8; i++) {
4596 		u32 *p = la_buf + i;
4597 
4598 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4599 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4600 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4601 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4602 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4603 	}
4604 }
4605 
4606 typedef void (*int_handler_t)(struct adapter *adap);
4607 
4608 struct intr_info {
4609 	unsigned int mask;	/* bits to check in interrupt status */
4610 	const char *msg;	/* message to print or NULL */
4611 	short stat_idx;		/* stat counter to increment or -1 */
4612 	unsigned short fatal;	/* whether the condition reported is fatal */
4613 	int_handler_t int_handler;	/* platform-specific int handler */
4614 };
4615 
4616 /**
4617  *	t4_handle_intr_status - table driven interrupt handler
4618  *	@adapter: the adapter that generated the interrupt
4619  *	@reg: the interrupt status register to process
4620  *	@acts: table of interrupt actions
4621  *
4622  *	A table driven interrupt handler that applies a set of masks to an
4623  *	interrupt status word and performs the corresponding actions if the
4624  *	interrupts described by the mask have occurred.  The actions include
4625  *	optionally emitting a warning or alert message.  The table is terminated
4626  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
4627  *	conditions.
4628  */
t4_handle_intr_status(struct adapter * adapter,unsigned int reg,const struct intr_info * acts)4629 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4630 				 const struct intr_info *acts)
4631 {
4632 	int fatal = 0;
4633 	unsigned int mask = 0;
4634 	unsigned int status = t4_read_reg(adapter, reg);
4635 
4636 	for ( ; acts->mask; ++acts) {
4637 		if (!(status & acts->mask))
4638 			continue;
4639 		if (acts->fatal) {
4640 			fatal++;
4641 			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4642 				  status & acts->mask);
4643 		} else if (acts->msg)
4644 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4645 				 status & acts->mask);
4646 		if (acts->int_handler)
4647 			acts->int_handler(adapter);
4648 		mask |= acts->mask;
4649 	}
4650 	status &= mask;
4651 	if (status)	/* clear processed interrupts */
4652 		t4_write_reg(adapter, reg, status);
4653 	return fatal;
4654 }
4655 
4656 /*
4657  * Interrupt handler for the PCIE module.
4658  */
pcie_intr_handler(struct adapter * adapter)4659 static void pcie_intr_handler(struct adapter *adapter)
4660 {
4661 	static const struct intr_info sysbus_intr_info[] = {
4662 		{ F_RNPP, "RXNP array parity error", -1, 1 },
4663 		{ F_RPCP, "RXPC array parity error", -1, 1 },
4664 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
4665 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
4666 		{ F_RFTP, "RXFT array parity error", -1, 1 },
4667 		{ 0 }
4668 	};
4669 	static const struct intr_info pcie_port_intr_info[] = {
4670 		{ F_TPCP, "TXPC array parity error", -1, 1 },
4671 		{ F_TNPP, "TXNP array parity error", -1, 1 },
4672 		{ F_TFTP, "TXFT array parity error", -1, 1 },
4673 		{ F_TCAP, "TXCA array parity error", -1, 1 },
4674 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
4675 		{ F_RCAP, "RXCA array parity error", -1, 1 },
4676 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
4677 		{ F_RDPE, "Rx data parity error", -1, 1 },
4678 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
4679 		{ 0 }
4680 	};
4681 	static const struct intr_info pcie_intr_info[] = {
4682 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4683 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4684 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4685 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4686 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4687 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4688 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4689 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4690 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4691 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4692 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4693 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4694 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4695 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4696 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4697 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4698 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4699 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4700 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4701 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4702 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4703 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4704 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4705 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4706 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4707 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4708 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4709 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
4710 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
4711 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4712 		  0 },
4713 		{ 0 }
4714 	};
4715 
4716 	static struct intr_info t5_pcie_intr_info[] = {
4717 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
4718 		  -1, 1 },
4719 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4720 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4721 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4722 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4723 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4724 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4725 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4726 		  -1, 1 },
4727 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4728 		  -1, 1 },
4729 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4730 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4731 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4732 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4733 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
4734 		  -1, 1 },
4735 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4736 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4737 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4738 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4739 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4740 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4741 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4742 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4743 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4744 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4745 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4746 		  -1, 1 },
4747 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4748 		  -1, 1 },
4749 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4750 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4751 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4752 		{ F_READRSPERR, "Outbound read error", -1,
4753 		  0 },
4754 		{ 0 }
4755 	};
4756 
4757 	int fat;
4758 
4759 	if (is_t4(adapter->params.chip))
4760 		fat = t4_handle_intr_status(adapter,
4761 				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4762 				sysbus_intr_info) +
4763 			t4_handle_intr_status(adapter,
4764 					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4765 					pcie_port_intr_info) +
4766 			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4767 					      pcie_intr_info);
4768 	else
4769 		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4770 					    t5_pcie_intr_info);
4771 	if (fat)
4772 		t4_fatal_err(adapter);
4773 }
4774 
4775 /*
4776  * TP interrupt handler.
4777  */
tp_intr_handler(struct adapter * adapter)4778 static void tp_intr_handler(struct adapter *adapter)
4779 {
4780 	static const struct intr_info tp_intr_info[] = {
4781 		{ 0x3fffffff, "TP parity error", -1, 1 },
4782 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4783 		{ 0 }
4784 	};
4785 
4786 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4787 		t4_fatal_err(adapter);
4788 }
4789 
4790 /*
4791  * SGE interrupt handler.
4792  */
sge_intr_handler(struct adapter * adapter)4793 static void sge_intr_handler(struct adapter *adapter)
4794 {
4795 	u32 v = 0, perr;
4796 	u32 err;
4797 
4798 	static const struct intr_info sge_intr_info[] = {
4799 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
4800 		  "SGE received CPL exceeding IQE size", -1, 1 },
4801 		{ F_ERR_INVALID_CIDX_INC,
4802 		  "SGE GTS CIDX increment too large", -1, 0 },
4803 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4804 		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4805 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4806 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
4807 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4808 		  0 },
4809 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4810 		  0 },
4811 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4812 		  0 },
4813 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4814 		  0 },
4815 		{ F_ERR_ING_CTXT_PRIO,
4816 		  "SGE too many priority ingress contexts", -1, 0 },
4817 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4818 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4819 		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4820 		  F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4821 		  "SGE PCIe error for a DBP thread", -1, 0 },
4822 		{ 0 }
4823 	};
4824 
4825 	static struct intr_info t4t5_sge_intr_info[] = {
4826 		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4827 		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4828 		{ F_ERR_EGR_CTXT_PRIO,
4829 		  "SGE too many priority egress contexts", -1, 0 },
4830 		{ 0 }
4831 	};
4832 
4833 	/*
4834  	* For now, treat below interrupts as fatal so that we disable SGE and
4835  	* get better debug */
4836 	static struct intr_info t6_sge_intr_info[] = {
4837 		{ F_FATAL_WRE_LEN,
4838 		  "SGE Actual WRE packet is less than advertized length",
4839 		  -1, 1 },
4840 		{ 0 }
4841 	};
4842 
4843 	perr = t4_read_reg(adapter, A_SGE_INT_CAUSE1);
4844 	if (perr) {
4845 		v |= perr;
4846 		CH_ALERT(adapter, "SGE Cause1 Parity Error %#x\n", perr);
4847 	}
4848 	perr = t4_read_reg(adapter, A_SGE_INT_CAUSE2);
4849 	if (perr) {
4850 		v |= perr;
4851 		CH_ALERT(adapter, "SGE Cause2 Parity Error %#x\n", perr);
4852 	}
4853 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4854 		perr = t4_read_reg(adapter, A_SGE_INT_CAUSE5);
4855 		if (perr) {
4856 			v |= perr;
4857 			CH_ALERT(adapter, "SGE Cause5 Parity Error %#x\n", perr);
4858 		}
4859 	}
4860 
4861 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4862 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4863 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4864 					   t4t5_sge_intr_info);
4865 	else
4866 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4867 					   t6_sge_intr_info);
4868 
4869 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4870 	if (err & F_ERROR_QID_VALID) {
4871 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4872 		if (err & F_UNCAPTURED_ERROR)
4873 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4874 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4875 			     F_UNCAPTURED_ERROR);
4876 	}
4877 
4878 	if (v != 0)
4879 		t4_fatal_err(adapter);
4880 }
4881 
4882 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4883 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4884 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4885 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4886 
4887 /*
4888  * CIM interrupt handler.
4889  */
cim_intr_handler(struct adapter * adapter)4890 static void cim_intr_handler(struct adapter *adapter)
4891 {
4892 	static const struct intr_info cim_intr_info[] = {
4893 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4894 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4895 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4896 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4897 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4898 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4899 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4900 		{ F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4901 		{ 0 }
4902 	};
4903 	static const struct intr_info cim_upintr_info[] = {
4904 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4905 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4906 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
4907 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
4908 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4909 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4910 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4911 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4912 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4913 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4914 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4915 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4916 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4917 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4918 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4919 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4920 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4921 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4922 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4923 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4924 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4925 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4926 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4927 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4928 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4929 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4930 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4931 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4932 		{ 0 }
4933 	};
4934 	u32 val, fw_err;
4935 	int fat;
4936 
4937 	fw_err = t4_read_reg(adapter, A_PCIE_FW);
4938 	if (fw_err & F_PCIE_FW_ERR)
4939 		t4_report_fw_error(adapter);
4940 
4941 	/* When the Firmware detects an internal error which normally wouldn't
4942 	 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4943 	 * to make sure the Host sees the Firmware Crash.  So if we have a
4944 	 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4945 	 * interrupt.
4946 	 */
4947 	val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4948 	if (val & F_TIMER0INT)
4949 		if (!(fw_err & F_PCIE_FW_ERR) ||
4950 		    (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4951 			t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4952 				     F_TIMER0INT);
4953 
4954 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4955 				    cim_intr_info) +
4956 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4957 				    cim_upintr_info);
4958 	if (fat)
4959 		t4_fatal_err(adapter);
4960 }
4961 
4962 /*
4963  * ULP RX interrupt handler.
4964  */
ulprx_intr_handler(struct adapter * adapter)4965 static void ulprx_intr_handler(struct adapter *adapter)
4966 {
4967 	static const struct intr_info ulprx_intr_info[] = {
4968 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4969 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4970 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4971 		{ 0 }
4972 	};
4973 
4974 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4975 		t4_fatal_err(adapter);
4976 }
4977 
4978 /*
4979  * ULP TX interrupt handler.
4980  */
ulptx_intr_handler(struct adapter * adapter)4981 static void ulptx_intr_handler(struct adapter *adapter)
4982 {
4983 	static const struct intr_info ulptx_intr_info[] = {
4984 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4985 		  0 },
4986 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4987 		  0 },
4988 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4989 		  0 },
4990 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4991 		  0 },
4992 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4993 		{ 0 }
4994 	};
4995 
4996 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4997 		t4_fatal_err(adapter);
4998 }
4999 
5000 /*
5001  * PM TX interrupt handler.
5002  */
pmtx_intr_handler(struct adapter * adapter)5003 static void pmtx_intr_handler(struct adapter *adapter)
5004 {
5005 	static const struct intr_info pmtx_intr_info[] = {
5006 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
5007 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
5008 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
5009 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
5010 		{ 0xffffff0, "PMTX framing error", -1, 1 },
5011 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
5012 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
5013 		  1 },
5014 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
5015 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
5016 		{ 0 }
5017 	};
5018 
5019 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
5020 		t4_fatal_err(adapter);
5021 }
5022 
5023 /*
5024  * PM RX interrupt handler.
5025  */
pmrx_intr_handler(struct adapter * adapter)5026 static void pmrx_intr_handler(struct adapter *adapter)
5027 {
5028 	static const struct intr_info pmrx_intr_info[] = {
5029 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
5030 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
5031 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
5032 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
5033 		  1 },
5034 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
5035 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
5036 		{ 0 }
5037 	};
5038 
5039 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
5040 		t4_fatal_err(adapter);
5041 }
5042 
5043 /*
5044  * CPL switch interrupt handler.
5045  */
cplsw_intr_handler(struct adapter * adapter)5046 static void cplsw_intr_handler(struct adapter *adapter)
5047 {
5048 	static const struct intr_info cplsw_intr_info[] = {
5049 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
5050 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
5051 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
5052 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
5053 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
5054 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
5055 		{ 0 }
5056 	};
5057 
5058 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
5059 		t4_fatal_err(adapter);
5060 }
5061 
5062 /*
5063  * LE interrupt handler.
5064  */
le_intr_handler(struct adapter * adap)5065 static void le_intr_handler(struct adapter *adap)
5066 {
5067 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
5068 	static const struct intr_info le_intr_info[] = {
5069 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
5070 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
5071 		{ F_PARITYERR, "LE parity error", -1, 1 },
5072 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
5073 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
5074 		{ 0 }
5075 	};
5076 
5077 	static struct intr_info t6_le_intr_info[] = {
5078 		/* log an error for HASHTBLMEMCRCERR and clear the bit */
5079 		{ F_T6_HASHTBLMEMCRCERR, "LE hash table mem crc error", -1, 0 },
5080 		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
5081 		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
5082 		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
5083 		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
5084 		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
5085 		{ 0 }
5086 	};
5087 
5088 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
5089 				  (chip_ver <= CHELSIO_T5) ?
5090 				  le_intr_info : t6_le_intr_info))
5091 		t4_fatal_err(adap);
5092 }
5093 
5094 /*
5095  * MPS interrupt handler.
5096  */
mps_intr_handler(struct adapter * adapter)5097 static void mps_intr_handler(struct adapter *adapter)
5098 {
5099 	static const struct intr_info mps_rx_intr_info[] = {
5100 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
5101 		{ 0 }
5102 	};
5103 	static const struct intr_info mps_tx_intr_info[] = {
5104 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5105 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5106 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5107 		  -1, 1 },
5108 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5109 		  -1, 1 },
5110 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
5111 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5112 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
5113 		{ 0 }
5114 	};
5115 	static const struct intr_info t6_mps_tx_intr_info[] = {
5116 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5117 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5118 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5119 		  -1, 1 },
5120 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5121 		  -1, 1 },
5122 		/* MPS Tx Bubble is normal for T6 */
5123 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5124 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
5125 		{ 0 }
5126 	};
5127 	static const struct intr_info mps_trc_intr_info[] = {
5128 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
5129 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
5130 		  1 },
5131 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
5132 		{ 0 }
5133 	};
5134 	static const struct intr_info mps_stat_sram_intr_info[] = {
5135 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
5136 		{ 0 }
5137 	};
5138 	static const struct intr_info mps_stat_tx_intr_info[] = {
5139 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
5140 		{ 0 }
5141 	};
5142 	static const struct intr_info mps_stat_rx_intr_info[] = {
5143 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
5144 		{ 0 }
5145 	};
5146 	static const struct intr_info mps_cls_intr_info[] = {
5147 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
5148 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
5149 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
5150 		{ 0 }
5151 	};
5152 
5153 	int fat;
5154 
5155 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
5156 				    mps_rx_intr_info) +
5157 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
5158 				    is_t6(adapter->params.chip)
5159 				    ? t6_mps_tx_intr_info
5160 				    : mps_tx_intr_info) +
5161 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
5162 				    mps_trc_intr_info) +
5163 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5164 				    mps_stat_sram_intr_info) +
5165 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5166 				    mps_stat_tx_intr_info) +
5167 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5168 				    mps_stat_rx_intr_info) +
5169 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
5170 				    mps_cls_intr_info);
5171 
5172 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
5173 	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
5174 	if (fat)
5175 		t4_fatal_err(adapter);
5176 }
5177 
5178 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
5179 		      F_ECC_UE_INT_CAUSE)
5180 
5181 /*
5182  * EDC/MC interrupt handler.
5183  */
mem_intr_handler(struct adapter * adapter,int idx)5184 static void mem_intr_handler(struct adapter *adapter, int idx)
5185 {
5186 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
5187 
5188 	unsigned int addr, cnt_addr, v;
5189 
5190 	if (idx <= MEM_EDC1) {
5191 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
5192 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
5193 	} else if (idx == MEM_MC) {
5194 		if (is_t4(adapter->params.chip)) {
5195 			addr = A_MC_INT_CAUSE;
5196 			cnt_addr = A_MC_ECC_STATUS;
5197 		} else {
5198 			addr = A_MC_P_INT_CAUSE;
5199 			cnt_addr = A_MC_P_ECC_STATUS;
5200 		}
5201 	} else {
5202 		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
5203 		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
5204 	}
5205 
5206 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
5207 	if (v & F_PERR_INT_CAUSE)
5208 		CH_ALERT(adapter, "%s FIFO parity error\n",
5209 			  name[idx]);
5210 	if (v & F_ECC_CE_INT_CAUSE) {
5211 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
5212 
5213 		if (idx <= MEM_EDC1)
5214 			t4_edc_err_read(adapter, idx);
5215 
5216 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
5217 		CH_WARN_RATELIMIT(adapter,
5218 				  "%u %s correctable ECC data error%s\n",
5219 				  cnt, name[idx], cnt > 1 ? "s" : "");
5220 	}
5221 	if (v & F_ECC_UE_INT_CAUSE)
5222 		CH_ALERT(adapter,
5223 			 "%s uncorrectable ECC data error\n", name[idx]);
5224 
5225 	t4_write_reg(adapter, addr, v);
5226 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
5227 		t4_fatal_err(adapter);
5228 }
5229 
5230 /*
5231  * MA interrupt handler.
5232  */
ma_intr_handler(struct adapter * adapter)5233 static void ma_intr_handler(struct adapter *adapter)
5234 {
5235 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
5236 
5237 	if (status & F_MEM_PERR_INT_CAUSE) {
5238 		CH_ALERT(adapter,
5239 			  "MA parity error, parity status %#x\n",
5240 			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
5241 		if (is_t5(adapter->params.chip))
5242 			CH_ALERT(adapter,
5243 				  "MA parity error, parity status %#x\n",
5244 				  t4_read_reg(adapter,
5245 					      A_MA_PARITY_ERROR_STATUS2));
5246 	}
5247 	if (status & F_MEM_WRAP_INT_CAUSE) {
5248 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
5249 		CH_ALERT(adapter, "MA address wrap-around error by "
5250 			  "client %u to address %#x\n",
5251 			  G_MEM_WRAP_CLIENT_NUM(v),
5252 			  G_MEM_WRAP_ADDRESS(v) << 4);
5253 	}
5254 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
5255 	t4_fatal_err(adapter);
5256 }
5257 
5258 /*
5259  * SMB interrupt handler.
5260  */
smb_intr_handler(struct adapter * adap)5261 static void smb_intr_handler(struct adapter *adap)
5262 {
5263 	static const struct intr_info smb_intr_info[] = {
5264 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
5265 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
5266 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
5267 		{ 0 }
5268 	};
5269 
5270 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
5271 		t4_fatal_err(adap);
5272 }
5273 
5274 /*
5275  * NC-SI interrupt handler.
5276  */
ncsi_intr_handler(struct adapter * adap)5277 static void ncsi_intr_handler(struct adapter *adap)
5278 {
5279 	static const struct intr_info ncsi_intr_info[] = {
5280 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
5281 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
5282 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
5283 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
5284 		{ 0 }
5285 	};
5286 
5287 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
5288 		t4_fatal_err(adap);
5289 }
5290 
5291 /*
5292  * XGMAC interrupt handler.
5293  */
xgmac_intr_handler(struct adapter * adap,int port)5294 static void xgmac_intr_handler(struct adapter *adap, int port)
5295 {
5296 	u32 v, int_cause_reg;
5297 
5298 	if (is_t4(adap->params.chip))
5299 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5300 	else
5301 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5302 
5303 	v = t4_read_reg(adap, int_cause_reg);
5304 
5305 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
5306 	if (!v)
5307 		return;
5308 
5309 	if (v & F_TXFIFO_PRTY_ERR)
5310 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
5311 			  port);
5312 	if (v & F_RXFIFO_PRTY_ERR)
5313 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
5314 			  port);
5315 	t4_write_reg(adap, int_cause_reg, v);
5316 	t4_fatal_err(adap);
5317 }
5318 
5319 /*
5320  * PL Parity Error interrupt handler.
5321  */
pl_perr_intr_handler(struct adapter * adap)5322 static void pl_perr_intr_handler(struct adapter *adap)
5323 {
5324 	static const struct intr_info pl_perr_info[] = {
5325 		{ F_UART, "UART Parity Error", -1, },
5326 		{ F_ULP_TX, "ULP TX Parity Error", -1 },
5327 		{ F_SGE, "SGE Parity Error", -1 },
5328 		{ F_HMA, "HMA Parity Error", -1 },
5329 		{ F_CPL_SWITCH, "CPL Switch Parity Error", -1 },
5330 		{ F_ULP_RX, "ULP RX Parity Error", -1 },
5331 		{ F_PM_RX, "PM RX Parity Error", -1 },
5332 		{ F_PM_TX, "PM TX Parity Error", -1 },
5333 		{ F_MA, "MA Parity Error", -1 },
5334 		{ F_TP, "TP Parity Error", -1 },
5335 		{ F_LE, "LE Parity Error", -1 },
5336 		{ F_EDC1, "EDC1 Parity Error", -1 },
5337 		{ F_EDC0, "EDC0 Parity Error", -1 },
5338 		{ F_MC, "MC Parity Error", -1 },
5339 		{ F_PCIE, "PCIE Parity Error", -1 },
5340 		{ F_PMU, "PMU Parity Error", -1 },
5341 		{ F_XGMAC_KR1, "XGMAC_KR1 Parity Error", -1 },
5342 		{ F_XGMAC_KR0, "XGMAC_KR0 Parity Error", -1 },
5343 		{ F_XGMAC1, "XGMAC1 Parity Error", -1 },
5344 		{ F_XGMAC0, "XGMAC0 Parity Error", -1 },
5345 		{ F_SMB, "SMB Parity Error", -1 },
5346 		{ F_SF, "SF Parity Error", -1 },
5347 		{ F_PL, "PL Parity Error", -1 },
5348 		{ F_NCSI, "NCSI Parity Error", -1 },
5349 		{ F_MPS, "MPS Parity Error", -1 },
5350 		{ F_MI, "MI Parity Error", -1 },
5351 		{ F_DBG, "DBG Parity Error", -1 },
5352 		{ F_I2CM, "I2CM Parity Error", -1 },
5353 		{ F_CIM, "CIM Parity Error", -1 },
5354 	};
5355 
5356 	t4_handle_intr_status(adap, A_PL_PERR_CAUSE, pl_perr_info);
5357 	/* pl_intr_handler() will do the t4_fatal_err(adap) */
5358 }
5359 
5360 /*
5361  * PL interrupt handler.
5362  */
pl_intr_handler(struct adapter * adap)5363 static void pl_intr_handler(struct adapter *adap)
5364 {
5365 	static const struct intr_info pl_intr_info[] = {
5366 		{ F_FATALPERR, "Fatal parity error", -1, 1,
5367 		  pl_perr_intr_handler },
5368 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
5369 		{ 0 }
5370 	};
5371 
5372 	static struct intr_info t5_pl_intr_info[] = {
5373 		{ F_FATALPERR, "Fatal parity error", -1, 1,
5374 		  pl_perr_intr_handler },
5375 		{ 0 }
5376 	};
5377 
5378 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
5379 				  is_t4(adap->params.chip) ?
5380 				  pl_intr_info : t5_pl_intr_info))
5381 		t4_fatal_err(adap);
5382 }
5383 
5384 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5385 
5386 /**
5387  *	t4_slow_intr_handler - control path interrupt handler
5388  *	@adapter: the adapter
5389  *
5390  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
5391  *	The designation 'slow' is because it involves register reads, while
5392  *	data interrupts typically don't involve any MMIOs.
5393  */
t4_slow_intr_handler(struct adapter * adapter)5394 int t4_slow_intr_handler(struct adapter *adapter)
5395 {
5396 	/* There are rare cases where a PL_INT_CAUSE bit may end up getting
5397 	 * set when the corresponding PL_INT_ENABLE bit isn't set.  It's
5398 	 * easiest just to mask that case here.
5399 	 */
5400 	u32 raw_cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
5401 	u32 enable = t4_read_reg(adapter, A_PL_INT_ENABLE);
5402 	u32 cause = raw_cause & enable;
5403 
5404 	if (!(cause & GLBL_INTR_MASK))
5405 		return 0;
5406 
5407 	/* Disable all the interrupt(bits) in PL_INT_ENABLE */
5408 	t4_write_reg(adapter, A_PL_INT_ENABLE, 0);
5409 	(void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5410 
5411 	if (cause & F_CIM)
5412 		cim_intr_handler(adapter);
5413 	if (cause & F_MPS)
5414 		mps_intr_handler(adapter);
5415 	if (cause & F_NCSI)
5416 		ncsi_intr_handler(adapter);
5417 	if (cause & F_PL)
5418 		pl_intr_handler(adapter);
5419 	if (cause & F_SMB)
5420 		smb_intr_handler(adapter);
5421 	if (cause & F_MAC0)
5422 		xgmac_intr_handler(adapter, 0);
5423 	if (cause & F_MAC1)
5424 		xgmac_intr_handler(adapter, 1);
5425 	if (cause & F_MAC2)
5426 		xgmac_intr_handler(adapter, 2);
5427 	if (cause & F_MAC3)
5428 		xgmac_intr_handler(adapter, 3);
5429 	if (cause & F_PCIE)
5430 		pcie_intr_handler(adapter);
5431 	if (cause & F_MC0)
5432 		mem_intr_handler(adapter, MEM_MC);
5433 	if (is_t5(adapter->params.chip) && (cause & F_MC1))
5434 		mem_intr_handler(adapter, MEM_MC1);
5435 	if (cause & F_EDC0)
5436 		mem_intr_handler(adapter, MEM_EDC0);
5437 	if (cause & F_EDC1)
5438 		mem_intr_handler(adapter, MEM_EDC1);
5439 	if (cause & F_LE)
5440 		le_intr_handler(adapter);
5441 	if (cause & F_TP)
5442 		tp_intr_handler(adapter);
5443 	if (cause & F_MA)
5444 		ma_intr_handler(adapter);
5445 	if (cause & F_PM_TX)
5446 		pmtx_intr_handler(adapter);
5447 	if (cause & F_PM_RX)
5448 		pmrx_intr_handler(adapter);
5449 	if (cause & F_ULP_RX)
5450 		ulprx_intr_handler(adapter);
5451 	if (cause & F_CPL_SWITCH)
5452 		cplsw_intr_handler(adapter);
5453 	if (cause & F_SGE)
5454 		sge_intr_handler(adapter);
5455 	if (cause & F_ULP_TX)
5456 		ulptx_intr_handler(adapter);
5457 
5458 	/* Clear the interrupts just processed for which we are the master. */
5459 	t4_write_reg(adapter, A_PL_INT_CAUSE, raw_cause & GLBL_INTR_MASK);
5460 
5461 	/* re-enable the interrupts (bits that were disabled
5462 	 * earlier in PL_INT_ENABLE)
5463 	 */
5464 	t4_write_reg(adapter, A_PL_INT_ENABLE, enable);
5465 	(void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5466 	return 1;
5467 }
5468 
5469 /**
5470  *	t4_intr_enable - enable interrupts
5471  *	@adapter: the adapter whose interrupts should be enabled
5472  *
5473  *	Enable PF-specific interrupts for the calling function and the top-level
5474  *	interrupt concentrator for global interrupts.  Interrupts are already
5475  *	enabled at each module,	here we just enable the roots of the interrupt
5476  *	hierarchies.
5477  *
5478  *	Note: this function should be called only when the driver manages
5479  *	non PF-specific interrupts from the various HW modules.  Only one PCI
5480  *	function at a time should be doing this.
5481  */
t4_intr_enable(struct adapter * adapter)5482 void t4_intr_enable(struct adapter *adapter)
5483 {
5484 	u32 val = 0;
5485 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5486 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5487 		  ? G_SOURCEPF(whoami)
5488 		  : G_T6_SOURCEPF(whoami));
5489 
5490 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5491 		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5492 	else
5493 		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5494 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
5495 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
5496 		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
5497 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
5498 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5499 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
5500 		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
5501 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5502 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
5503 }
5504 
5505 /**
5506  *	t4_intr_disable - disable interrupts
5507  *	@adapter: the adapter whose interrupts should be disabled
5508  *
5509  *	Disable interrupts.  We only disable the top-level interrupt
5510  *	concentrators.  The caller must be a PCI function managing global
5511  *	interrupts.
5512  */
t4_intr_disable(struct adapter * adapter)5513 void t4_intr_disable(struct adapter *adapter)
5514 {
5515 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5516 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5517 		  ? G_SOURCEPF(whoami)
5518 		  : G_T6_SOURCEPF(whoami));
5519 
5520 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5521 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
5522 }
5523 
t4_chip_rss_size(struct adapter * adap)5524 unsigned int t4_chip_rss_size(struct adapter *adap)
5525 {
5526 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5527 		return RSS_NENTRIES;
5528 	else
5529 		return T6_RSS_NENTRIES;
5530 }
5531 
5532 /**
5533  *	t4_config_rss_range - configure a portion of the RSS mapping table
5534  *	@adapter: the adapter
5535  *	@mbox: mbox to use for the FW command
5536  *	@viid: virtual interface whose RSS subtable is to be written
5537  *	@start: start entry in the table to write
5538  *	@n: how many table entries to write
5539  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
5540  *	@nrspq: number of values in @rspq
5541  *
5542  *	Programs the selected part of the VI's RSS mapping table with the
5543  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
5544  *	until the full table range is populated.
5545  *
5546  *	The caller must ensure the values in @rspq are in the range allowed for
5547  *	@viid.
5548  */
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)5549 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5550 			int start, int n, const u16 *rspq, unsigned int nrspq)
5551 {
5552 	int ret;
5553 	const u16 *rsp = rspq;
5554 	const u16 *rsp_end = rspq + nrspq;
5555 	struct fw_rss_ind_tbl_cmd cmd;
5556 
5557 	memset(&cmd, 0, sizeof(cmd));
5558 	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5559 				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5560 				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
5561 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5562 
5563 	/* Each firmware RSS command can accommodate up to 32 RSS Ingress
5564 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
5565 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5566 	 * reserved.
5567 	 */
5568 	while (n > 0) {
5569 		int nq = min(n, 32);
5570 		int nq_packed = 0;
5571 		__be32 *qp = &cmd.iq0_to_iq2;
5572 
5573 		/* Set up the firmware RSS command header to send the next
5574 		 * "nq" Ingress Queue IDs to the firmware.
5575 		 */
5576 		cmd.niqid = cpu_to_be16(nq);
5577 		cmd.startidx = cpu_to_be16(start);
5578 
5579 		/* "nq" more done for the start of the next loop.
5580 		 */
5581 		start += nq;
5582 		n -= nq;
5583 
5584 		/* While there are still Ingress Queue IDs to stuff into the
5585 		 * current firmware RSS command, retrieve them from the
5586 		 * Ingress Queue ID array and insert them into the command.
5587 		 */
5588 		while (nq > 0) {
5589 			/* Grab up to the next 3 Ingress Queue IDs (wrapping
5590 			 * around the Ingress Queue ID array if necessary) and
5591 			 * insert them into the firmware RSS command at the
5592 			 * current 3-tuple position within the commad.
5593 			 */
5594 			u16 qbuf[3];
5595 			u16 *qbp = qbuf;
5596 			int nqbuf = min(3, nq);
5597 
5598 			nq -= nqbuf;
5599 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
5600 			while (nqbuf && nq_packed < 32) {
5601 				nqbuf--;
5602 				nq_packed++;
5603 				*qbp++ = *rsp++;
5604 				if (rsp >= rsp_end)
5605 					rsp = rspq;
5606 			}
5607 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5608 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5609 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5610 		}
5611 
5612 		/* Send this portion of the RRS table update to the firmware;
5613 		 * bail out on any errors.
5614 		 */
5615 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5616 		if (ret)
5617 			return ret;
5618 	}
5619 	return 0;
5620 }
5621 
5622 /**
5623  *	t4_config_glbl_rss - configure the global RSS mode
5624  *	@adapter: the adapter
5625  *	@mbox: mbox to use for the FW command
5626  *	@mode: global RSS mode
5627  *	@flags: mode-specific flags
5628  *
5629  *	Sets the global RSS mode.
5630  */
t4_config_glbl_rss(struct adapter * adapter,int mbox,unsigned int mode,unsigned int flags)5631 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5632 		       unsigned int flags)
5633 {
5634 	struct fw_rss_glb_config_cmd c;
5635 
5636 	memset(&c, 0, sizeof(c));
5637 	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5638 				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5639 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5640 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5641 		c.u.manual.mode_pkd =
5642 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5643 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5644 		c.u.basicvirtual.mode_keymode =
5645 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5646 		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5647 	} else
5648 		return -EINVAL;
5649 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5650 }
5651 
5652 /**
5653  *	t4_config_vi_rss - configure per VI RSS settings
5654  *	@adapter: the adapter
5655  *	@mbox: mbox to use for the FW command
5656  *	@viid: the VI id
5657  *	@flags: RSS flags
5658  *	@defq: id of the default RSS queue for the VI.
5659  *	@skeyidx: RSS secret key table index for non-global mode
5660  *	@skey: RSS vf_scramble key for VI.
5661  *
5662  *	Configures VI-specific RSS properties.
5663  */
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq,unsigned int skeyidx,unsigned int skey)5664 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5665 		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
5666 		     unsigned int skey)
5667 {
5668 	struct fw_rss_vi_config_cmd c;
5669 
5670 	memset(&c, 0, sizeof(c));
5671 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5672 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5673 				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5674 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5675 	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5676 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5677 	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5678 					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5679 	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5680 
5681 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5682 }
5683 
5684 /* Read an RSS table row */
rd_rss_row(struct adapter * adap,int row,u32 * val)5685 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5686 {
5687 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5688 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5689 				   5, 0, val);
5690 }
5691 
5692 /**
5693  *	t4_read_rss - read the contents of the RSS mapping table
5694  *	@adapter: the adapter
5695  *	@map: holds the contents of the RSS mapping table
5696  *
5697  *	Reads the contents of the RSS hash->queue mapping table.
5698  */
t4_read_rss(struct adapter * adapter,u16 * map)5699 int t4_read_rss(struct adapter *adapter, u16 *map)
5700 {
5701 	u32 val;
5702 	int i, ret, nentries;
5703 
5704 	nentries = t4_chip_rss_size(adapter);
5705 	for (i = 0; i < nentries / 2; ++i) {
5706 		ret = rd_rss_row(adapter, i, &val);
5707 		if (ret)
5708 			return ret;
5709 		*map++ = G_LKPTBLQUEUE0(val);
5710 		*map++ = G_LKPTBLQUEUE1(val);
5711 	}
5712 	return 0;
5713 }
5714 
5715 /**
5716  * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5717  * @adap: the adapter
5718  * @cmd: TP fw ldst address space type
5719  * @vals: where the indirect register values are stored/written
5720  * @nregs: how many indirect registers to read/write
5721  * @start_idx: index of first indirect register to read/write
5722  * @rw: Read (1) or Write (0)
5723  * @sleep_ok: if true we may sleep while awaiting command completion
5724  *
5725  * Access TP indirect registers through LDST
5726  **/
t4_tp_fw_ldst_rw(struct adapter * adap,int cmd,u32 * vals,unsigned int nregs,unsigned int start_index,unsigned int rw,bool sleep_ok)5727 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5728 			    unsigned int nregs, unsigned int start_index,
5729 			    unsigned int rw, bool sleep_ok)
5730 {
5731 	int ret = 0;
5732 	unsigned int i;
5733 	struct fw_ldst_cmd c;
5734 
5735 	for (i = 0; i < nregs; i++) {
5736 		memset(&c, 0, sizeof(c));
5737 		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5738 						F_FW_CMD_REQUEST |
5739 						(rw ? F_FW_CMD_READ :
5740 						      F_FW_CMD_WRITE) |
5741 						V_FW_LDST_CMD_ADDRSPACE(cmd));
5742 		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5743 
5744 		c.u.addrval.addr = cpu_to_be32(start_index + i);
5745 		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5746 		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5747 				      sleep_ok);
5748 		if (ret)
5749 			return ret;
5750 
5751 		if (rw)
5752 			vals[i] = be32_to_cpu(c.u.addrval.val);
5753 	}
5754 	return 0;
5755 }
5756 
5757 /**
5758  * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5759  * @adap: the adapter
5760  * @reg_addr: Address Register
5761  * @reg_data: Data register
5762  * @buff: where the indirect register values are stored/written
5763  * @nregs: how many indirect registers to read/write
5764  * @start_index: index of first indirect register to read/write
5765  * @rw: READ(1) or WRITE(0)
5766  * @sleep_ok: if true we may sleep while awaiting command completion
5767  *
5768  * Read/Write TP indirect registers through LDST if possible.
5769  * Else, use backdoor access
5770  **/
t4_tp_indirect_rw(struct adapter * adap,u32 reg_addr,u32 reg_data,u32 * buff,u32 nregs,u32 start_index,int rw,bool sleep_ok)5771 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5772 			      u32 *buff, u32 nregs, u32 start_index, int rw,
5773 			      bool sleep_ok)
5774 {
5775 	int rc = -EINVAL;
5776 	int cmd;
5777 
5778 	switch (reg_addr) {
5779 	case A_TP_PIO_ADDR:
5780 		cmd = FW_LDST_ADDRSPC_TP_PIO;
5781 		break;
5782 	case A_TP_TM_PIO_ADDR:
5783 		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5784 		break;
5785 	case A_TP_MIB_INDEX:
5786 		cmd = FW_LDST_ADDRSPC_TP_MIB;
5787 		break;
5788 	default:
5789 		goto indirect_access;
5790 	}
5791 
5792 	if (t4_use_ldst(adap))
5793 		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5794 				      sleep_ok);
5795 
5796 indirect_access:
5797 
5798 	if (rc) {
5799 		if (rw)
5800 			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5801 					 start_index);
5802 		else
5803 			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5804 					  start_index);
5805 	}
5806 }
5807 
5808 /**
5809  * t4_tp_pio_read - Read TP PIO registers
5810  * @adap: the adapter
5811  * @buff: where the indirect register values are written
5812  * @nregs: how many indirect registers to read
5813  * @start_index: index of first indirect register to read
5814  * @sleep_ok: if true we may sleep while awaiting command completion
5815  *
5816  * Read TP PIO Registers
5817  **/
t4_tp_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5818 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5819 		    u32 start_index, bool sleep_ok)
5820 {
5821 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5822 			  start_index, 1, sleep_ok);
5823 }
5824 
5825 /**
5826  * t4_tp_pio_write - Write TP PIO registers
5827  * @adap: the adapter
5828  * @buff: where the indirect register values are stored
5829  * @nregs: how many indirect registers to write
5830  * @start_index: index of first indirect register to write
5831  * @sleep_ok: if true we may sleep while awaiting command completion
5832  *
5833  * Write TP PIO Registers
5834  **/
t4_tp_pio_write(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5835 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5836 		     u32 start_index, bool sleep_ok)
5837 {
5838 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5839 			  start_index, 0, sleep_ok);
5840 }
5841 
5842 /**
5843  * t4_tp_tm_pio_read - Read TP TM PIO registers
5844  * @adap: the adapter
5845  * @buff: where the indirect register values are written
5846  * @nregs: how many indirect registers to read
5847  * @start_index: index of first indirect register to read
5848  * @sleep_ok: if true we may sleep while awaiting command completion
5849  *
5850  * Read TP TM PIO Registers
5851  **/
t4_tp_tm_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5852 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5853 		       u32 start_index, bool sleep_ok)
5854 {
5855 	t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5856 			  nregs, start_index, 1, sleep_ok);
5857 }
5858 
5859 /**
5860  * t4_tp_mib_read - Read TP MIB registers
5861  * @adap: the adapter
5862  * @buff: where the indirect register values are written
5863  * @nregs: how many indirect registers to read
5864  * @start_index: index of first indirect register to read
5865  * @sleep_ok: if true we may sleep while awaiting command completion
5866  *
5867  * Read TP MIB Registers
5868  **/
t4_tp_mib_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5869 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5870 		    bool sleep_ok)
5871 {
5872 	t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5873 			  start_index, 1, sleep_ok);
5874 }
5875 
5876 /**
5877  *	t4_read_rss_key - read the global RSS key
5878  *	@adap: the adapter
5879  *	@key: 10-entry array holding the 320-bit RSS key
5880  * 	@sleep_ok: if true we may sleep while awaiting command completion
5881  *
5882  *	Reads the global 320-bit RSS key.
5883  */
t4_read_rss_key(struct adapter * adap,u32 * key,bool sleep_ok)5884 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5885 {
5886 	t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5887 }
5888 
5889 /**
5890  *	t4_write_rss_key - program one of the RSS keys
5891  *	@adap: the adapter
5892  *	@key: 10-entry array holding the 320-bit RSS key
5893  *	@idx: which RSS key to write
5894  * 	@sleep_ok: if true we may sleep while awaiting command completion
5895  *
5896  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5897  *	0..15 the corresponding entry in the RSS key table is written,
5898  *	otherwise the global RSS key is written.
5899  */
t4_write_rss_key(struct adapter * adap,const u32 * key,int idx,bool sleep_ok)5900 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5901 		      bool sleep_ok)
5902 {
5903 	u8 rss_key_addr_cnt = 16;
5904 	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5905 
5906 	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5907 	 * allows access to key addresses 16-63 by using KeyWrAddrX
5908 	 * as index[5:4](upper 2) into key table
5909 	 */
5910 	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5911 	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5912 		rss_key_addr_cnt = 32;
5913 
5914 	t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5915 
5916 	if (idx >= 0 && idx < rss_key_addr_cnt) {
5917 		if (rss_key_addr_cnt > 16)
5918 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5919 				     vrt | V_KEYWRADDRX(idx >> 4) |
5920 				     V_T6_VFWRADDR(idx) | F_KEYWREN);
5921 		else
5922 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5923 				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5924 	}
5925 }
5926 
5927 /**
5928  *	t4_read_rss_pf_config - read PF RSS Configuration Table
5929  *	@adapter: the adapter
5930  *	@index: the entry in the PF RSS table to read
5931  *	@valp: where to store the returned value
5932  * 	@sleep_ok: if true we may sleep while awaiting command completion
5933  *
5934  *	Reads the PF RSS Configuration Table at the specified index and returns
5935  *	the value found there.
5936  */
t4_read_rss_pf_config(struct adapter * adapter,unsigned int index,u32 * valp,bool sleep_ok)5937 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5938 			   u32 *valp, bool sleep_ok)
5939 {
5940 	t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5941 }
5942 
5943 /**
5944  *	t4_write_rss_pf_config - write PF RSS Configuration Table
5945  *	@adapter: the adapter
5946  *	@index: the entry in the VF RSS table to read
5947  *	@val: the value to store
5948  * 	@sleep_ok: if true we may sleep while awaiting command completion
5949  *
5950  *	Writes the PF RSS Configuration Table at the specified index with the
5951  *	specified value.
5952  */
t4_write_rss_pf_config(struct adapter * adapter,unsigned int index,u32 val,bool sleep_ok)5953 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5954 			    u32 val, bool sleep_ok)
5955 {
5956 	t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5957 			sleep_ok);
5958 }
5959 
5960 /**
5961  *	t4_read_rss_vf_config - read VF RSS Configuration Table
5962  *	@adapter: the adapter
5963  *	@index: the entry in the VF RSS table to read
5964  *	@vfl: where to store the returned VFL
5965  *	@vfh: where to store the returned VFH
5966  * 	@sleep_ok: if true we may sleep while awaiting command completion
5967  *
5968  *	Reads the VF RSS Configuration Table at the specified index and returns
5969  *	the (VFL, VFH) values found there.
5970  */
t4_read_rss_vf_config(struct adapter * adapter,unsigned int index,u32 * vfl,u32 * vfh,bool sleep_ok)5971 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5972 			   u32 *vfl, u32 *vfh, bool sleep_ok)
5973 {
5974 	u32 vrt, mask, data;
5975 
5976 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5977 		mask = V_VFWRADDR(M_VFWRADDR);
5978 		data = V_VFWRADDR(index);
5979 	} else {
5980 		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5981 		 data = V_T6_VFWRADDR(index);
5982 	}
5983 	/*
5984 	 * Request that the index'th VF Table values be read into VFL/VFH.
5985 	 */
5986 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5987 	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5988 	vrt |= data | F_VFRDEN;
5989 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5990 
5991 	/*
5992 	 * Grab the VFL/VFH values ...
5993 	 */
5994 	t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5995 	t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5996 }
5997 
5998 /**
5999  *	t4_read_rss_pf_map - read PF RSS Map
6000  *	@adapter: the adapter
6001  * 	@sleep_ok: if true we may sleep while awaiting command completion
6002  *
6003  *	Reads the PF RSS Map register and returns its value.
6004  */
t4_read_rss_pf_map(struct adapter * adapter,bool sleep_ok)6005 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
6006 {
6007 	u32 pfmap;
6008 
6009 	t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
6010 
6011 	return pfmap;
6012 }
6013 
6014 /**
6015  *	t4_read_rss_pf_mask - read PF RSS Mask
6016  *	@adapter: the adapter
6017  * 	@sleep_ok: if true we may sleep while awaiting command completion
6018  *
6019  *	Reads the PF RSS Mask register and returns its value.
6020  */
t4_read_rss_pf_mask(struct adapter * adapter,bool sleep_ok)6021 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
6022 {
6023 	u32 pfmask;
6024 
6025 	t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6026 
6027 	return pfmask;
6028 }
6029 
6030 /**
6031  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
6032  *	@adap: the adapter
6033  *	@v4: holds the TCP/IP counter values
6034  *	@v6: holds the TCP/IPv6 counter values
6035  * 	@sleep_ok: if true we may sleep while awaiting command completion
6036  *
6037  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
6038  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
6039  */
t4_tp_get_tcp_stats(struct adapter * adap,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6,bool sleep_ok)6040 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6041 			 struct tp_tcp_stats *v6, bool sleep_ok)
6042 {
6043 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
6044 
6045 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
6046 #define STAT(x)     val[STAT_IDX(x)]
6047 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
6048 
6049 	if (v4) {
6050 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6051 			       A_TP_MIB_TCP_OUT_RST, sleep_ok);
6052 		v4->tcp_out_rsts = STAT(OUT_RST);
6053 		v4->tcp_in_segs  = STAT64(IN_SEG);
6054 		v4->tcp_out_segs = STAT64(OUT_SEG);
6055 		v4->tcp_retrans_segs = STAT64(RXT_SEG);
6056 	}
6057 	if (v6) {
6058 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6059 			       A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
6060 		v6->tcp_out_rsts = STAT(OUT_RST);
6061 		v6->tcp_in_segs  = STAT64(IN_SEG);
6062 		v6->tcp_out_segs = STAT64(OUT_SEG);
6063 		v6->tcp_retrans_segs = STAT64(RXT_SEG);
6064 	}
6065 #undef STAT64
6066 #undef STAT
6067 #undef STAT_IDX
6068 }
6069 
6070 /**
6071  *	t4_tp_get_err_stats - read TP's error MIB counters
6072  *	@adap: the adapter
6073  *	@st: holds the counter values
6074  * 	@sleep_ok: if true we may sleep while awaiting command completion
6075  *
6076  *	Returns the values of TP's error counters.
6077  */
t4_tp_get_err_stats(struct adapter * adap,struct tp_err_stats * st,bool sleep_ok)6078 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6079 			 bool sleep_ok)
6080 {
6081 	int nchan = adap->params.arch.nchan;
6082 
6083 	t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6084 		       sleep_ok);
6085 
6086 	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6087 		       sleep_ok);
6088 
6089 	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6090 		       sleep_ok);
6091 
6092 	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6093 		       A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
6094 
6095 	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6096 		       A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
6097 
6098 	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6099 		       sleep_ok);
6100 
6101 	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6102 		       A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6103 
6104 	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6105 		       A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6106 
6107 	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6108 		       sleep_ok);
6109 }
6110 
6111 /**
6112  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
6113  *	@adap: the adapter
6114  *	@st: holds the counter values
6115  * 	@sleep_ok: if true we may sleep while awaiting command completion
6116  *
6117  *	Returns the values of TP's CPL counters.
6118  */
t4_tp_get_cpl_stats(struct adapter * adap,struct tp_cpl_stats * st,bool sleep_ok)6119 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6120 			 bool sleep_ok)
6121 {
6122 	int nchan = adap->params.arch.nchan;
6123 
6124 	t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6125 
6126 	t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6127 }
6128 
6129 /**
6130  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6131  *	@adap: the adapter
6132  *	@st: holds the counter values
6133  *
6134  *	Returns the values of TP's RDMA counters.
6135  */
t4_tp_get_rdma_stats(struct adapter * adap,struct tp_rdma_stats * st,bool sleep_ok)6136 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6137 			  bool sleep_ok)
6138 {
6139 	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6140 		       sleep_ok);
6141 }
6142 
6143 /**
6144  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6145  *	@adap: the adapter
6146  *	@idx: the port index
6147  *	@st: holds the counter values
6148  * 	@sleep_ok: if true we may sleep while awaiting command completion
6149  *
6150  *	Returns the values of TP's FCoE counters for the selected port.
6151  */
t4_get_fcoe_stats(struct adapter * adap,unsigned int idx,struct tp_fcoe_stats * st,bool sleep_ok)6152 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6153 		       struct tp_fcoe_stats *st, bool sleep_ok)
6154 {
6155 	u32 val[2];
6156 
6157 	t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6158 		       sleep_ok);
6159 
6160 	t4_tp_mib_read(adap, &st->frames_drop, 1,
6161 		       A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6162 
6163 	t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6164 		       sleep_ok);
6165 
6166 	st->octets_ddp = ((u64)val[0] << 32) | val[1];
6167 }
6168 
6169 /**
6170  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6171  *	@adap: the adapter
6172  *	@st: holds the counter values
6173  * 	@sleep_ok: if true we may sleep while awaiting command completion
6174  *
6175  *	Returns the values of TP's counters for non-TCP directly-placed packets.
6176  */
t4_get_usm_stats(struct adapter * adap,struct tp_usm_stats * st,bool sleep_ok)6177 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6178 		      bool sleep_ok)
6179 {
6180 	u32 val[4];
6181 
6182 	t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6183 
6184 	st->frames = val[0];
6185 	st->drops = val[1];
6186 	st->octets = ((u64)val[2] << 32) | val[3];
6187 }
6188 
6189 /**
6190  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
6191  *	@adap: the adapter
6192  *	@mtus: where to store the MTU values
6193  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
6194  *
6195  *	Reads the HW path MTU table.
6196  */
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)6197 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6198 {
6199 	u32 v;
6200 	int i;
6201 
6202 	for (i = 0; i < NMTUS; ++i) {
6203 		t4_write_reg(adap, A_TP_MTU_TABLE,
6204 			     V_MTUINDEX(0xffU) | V_MTUVALUE(i));
6205 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
6206 		mtus[i] = G_MTUVALUE(v);
6207 		if (mtu_log)
6208 			mtu_log[i] = G_MTUWIDTH(v);
6209 	}
6210 }
6211 
6212 /**
6213  *	t4_read_cong_tbl - reads the congestion control table
6214  *	@adap: the adapter
6215  *	@incr: where to store the alpha values
6216  *
6217  *	Reads the additive increments programmed into the HW congestion
6218  *	control table.
6219  */
t4_read_cong_tbl(struct adapter * adap,u16 incr[NMTUS][NCCTRL_WIN])6220 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6221 {
6222 	unsigned int mtu, w;
6223 
6224 	for (mtu = 0; mtu < NMTUS; ++mtu)
6225 		for (w = 0; w < NCCTRL_WIN; ++w) {
6226 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
6227 				     V_ROWINDEX(0xffffU) | (mtu << 5) | w);
6228 			incr[mtu][w] = (u16)t4_read_reg(adap,
6229 						A_TP_CCTRL_TABLE) & 0x1fff;
6230 		}
6231 }
6232 
6233 /**
6234  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6235  *	@adap: the adapter
6236  *	@addr: the indirect TP register address
6237  *	@mask: specifies the field within the register to modify
6238  *	@val: new value for the field
6239  *
6240  *	Sets a field of an indirect TP register to the given value.
6241  */
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)6242 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6243 			    unsigned int mask, unsigned int val)
6244 {
6245 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6246 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6247 	t4_write_reg(adap, A_TP_PIO_DATA, val);
6248 }
6249 
6250 /**
6251  *	init_cong_ctrl - initialize congestion control parameters
6252  *	@a: the alpha values for congestion control
6253  *	@b: the beta values for congestion control
6254  *
6255  *	Initialize the congestion control parameters.
6256  */
init_cong_ctrl(unsigned short * a,unsigned short * b)6257 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6258 {
6259 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6260 	a[9] = 2;
6261 	a[10] = 3;
6262 	a[11] = 4;
6263 	a[12] = 5;
6264 	a[13] = 6;
6265 	a[14] = 7;
6266 	a[15] = 8;
6267 	a[16] = 9;
6268 	a[17] = 10;
6269 	a[18] = 14;
6270 	a[19] = 17;
6271 	a[20] = 21;
6272 	a[21] = 25;
6273 	a[22] = 30;
6274 	a[23] = 35;
6275 	a[24] = 45;
6276 	a[25] = 60;
6277 	a[26] = 80;
6278 	a[27] = 100;
6279 	a[28] = 200;
6280 	a[29] = 300;
6281 	a[30] = 400;
6282 	a[31] = 500;
6283 
6284 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6285 	b[9] = b[10] = 1;
6286 	b[11] = b[12] = 2;
6287 	b[13] = b[14] = b[15] = b[16] = 3;
6288 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6289 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6290 	b[28] = b[29] = 6;
6291 	b[30] = b[31] = 7;
6292 }
6293 
6294 /* The minimum additive increment value for the congestion control table */
6295 #define CC_MIN_INCR 2U
6296 
6297 /**
6298  *	t4_load_mtus - write the MTU and congestion control HW tables
6299  *	@adap: the adapter
6300  *	@mtus: the values for the MTU table
6301  *	@alpha: the values for the congestion control alpha parameter
6302  *	@beta: the values for the congestion control beta parameter
6303  *
6304  *	Write the HW MTU table with the supplied MTUs and the high-speed
6305  *	congestion control table with the supplied alpha, beta, and MTUs.
6306  *	We write the two tables together because the additive increments
6307  *	depend on the MTUs.
6308  */
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)6309 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6310 		  const unsigned short *alpha, const unsigned short *beta)
6311 {
6312 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
6313 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6314 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6315 		28672, 40960, 57344, 81920, 114688, 163840, 229376
6316 	};
6317 
6318 	unsigned int i, w;
6319 
6320 	for (i = 0; i < NMTUS; ++i) {
6321 		unsigned int mtu = mtus[i];
6322 		unsigned int log2 = fls(mtu);
6323 
6324 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
6325 			log2--;
6326 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6327 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6328 
6329 		for (w = 0; w < NCCTRL_WIN; ++w) {
6330 			unsigned int inc;
6331 
6332 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6333 				  CC_MIN_INCR);
6334 
6335 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6336 				     (w << 16) | (beta[w] << 13) | inc);
6337 		}
6338 	}
6339 }
6340 
6341 /*
6342  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6343  * clocks.  The formula is
6344  *
6345  * bytes/s = bytes256 * 256 * ClkFreq / 4096
6346  *
6347  * which is equivalent to
6348  *
6349  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6350  */
chan_rate(struct adapter * adap,unsigned int bytes256)6351 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6352 {
6353 	u64 v = bytes256 * adap->params.vpd.cclk;
6354 
6355 	return v * 62 + v / 2;
6356 }
6357 
6358 /**
6359  *	t4_get_chan_txrate - get the current per channel Tx rates
6360  *	@adap: the adapter
6361  *	@nic_rate: rates for NIC traffic
6362  *	@ofld_rate: rates for offloaded traffic
6363  *
6364  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
6365  *	for each channel.
6366  */
t4_get_chan_txrate(struct adapter * adap,u64 * nic_rate,u64 * ofld_rate)6367 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6368 {
6369 	u32 v;
6370 
6371 	v = t4_read_reg(adap, A_TP_TX_TRATE);
6372 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6373 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6374 	if (adap->params.arch.nchan == NCHAN) {
6375 		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6376 		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6377 	}
6378 
6379 	v = t4_read_reg(adap, A_TP_TX_ORATE);
6380 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6381 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6382 	if (adap->params.arch.nchan == NCHAN) {
6383 		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6384 		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6385 	}
6386 }
6387 
6388 /**
6389  *	t4_set_trace_filter - configure one of the tracing filters
6390  *	@adap: the adapter
6391  *	@tp: the desired trace filter parameters
6392  *	@idx: which filter to configure
6393  *	@enable: whether to enable or disable the filter
6394  *
6395  *	Configures one of the tracing filters available in HW.  If @enable is
6396  *	%0 @tp is not examined and may be %NULL. The user is responsible to
6397  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
6398  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
6399  *	docs/readme.txt for a complete description of how to setup traceing on
6400  *	T4.
6401  */
t4_set_trace_filter(struct adapter * adap,const struct trace_params * tp,int idx,int enable)6402 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
6403 			int enable)
6404 {
6405 	int i, ofst = idx * 4;
6406 	u32 data_reg, mask_reg, cfg;
6407 
6408 	if (!enable) {
6409 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6410 		return 0;
6411 	}
6412 
6413 	/*
6414 	 * TODO - After T4 data book is updated, specify the exact
6415 	 * section below.
6416 	 *
6417 	 * See T4 data book - MPS section for a complete description
6418 	 * of the below if..else handling of A_MPS_TRC_CFG register
6419 	 * value.
6420 	 */
6421 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6422 	if (cfg & F_TRCMULTIFILTER) {
6423 		/*
6424 		 * If multiple tracers are enabled, then maximum
6425 		 * capture size is 2.5KB (FIFO size of a single channel)
6426 		 * minus 2 flits for CPL_TRACE_PKT header.
6427 		 */
6428 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6429 			return -EINVAL;
6430 	}
6431 	else {
6432 		/*
6433 		 * If multiple tracers are disabled, to avoid deadlocks
6434 		 * maximum packet capture size of 9600 bytes is recommended.
6435 		 * Also in this mode, only trace0 can be enabled and running.
6436 		 */
6437 		if (tp->snap_len > 9600 || idx)
6438 			return -EINVAL;
6439 	}
6440 
6441 	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
6442 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6443 	    tp->min_len > M_TFMINPKTSIZE)
6444 		return -EINVAL;
6445 
6446 	/* stop the tracer we'll be changing */
6447 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6448 
6449 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6450 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6451 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6452 
6453 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6454 		t4_write_reg(adap, data_reg, tp->data[i]);
6455 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6456 	}
6457 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6458 		     V_TFCAPTUREMAX(tp->snap_len) |
6459 		     V_TFMINPKTSIZE(tp->min_len));
6460 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6461 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
6462 		     (is_t4(adap->params.chip) ?
6463 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
6464 		     V_T5_TFPORT(tp->port) | F_T5_TFEN |
6465 		     V_T5_TFINVERTMATCH(tp->invert)));
6466 
6467 	return 0;
6468 }
6469 
6470 /**
6471  *	t4_get_trace_filter - query one of the tracing filters
6472  *	@adap: the adapter
6473  *	@tp: the current trace filter parameters
6474  *	@idx: which trace filter to query
6475  *	@enabled: non-zero if the filter is enabled
6476  *
6477  *	Returns the current settings of one of the HW tracing filters.
6478  */
t4_get_trace_filter(struct adapter * adap,struct trace_params * tp,int idx,int * enabled)6479 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6480 			 int *enabled)
6481 {
6482 	u32 ctla, ctlb;
6483 	int i, ofst = idx * 4;
6484 	u32 data_reg, mask_reg;
6485 
6486 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6487 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6488 
6489 	if (is_t4(adap->params.chip)) {
6490 		*enabled = !!(ctla & F_TFEN);
6491 		tp->port =  G_TFPORT(ctla);
6492 		tp->invert = !!(ctla & F_TFINVERTMATCH);
6493 	} else {
6494 		*enabled = !!(ctla & F_T5_TFEN);
6495 		tp->port = G_T5_TFPORT(ctla);
6496 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6497 	}
6498 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
6499 	tp->min_len = G_TFMINPKTSIZE(ctlb);
6500 	tp->skip_ofst = G_TFOFFSET(ctla);
6501 	tp->skip_len = G_TFLENGTH(ctla);
6502 
6503 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6504 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6505 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6506 
6507 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6508 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6509 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6510 	}
6511 }
6512 
6513 /**
6514  *	t4_read_tcb - read a hardware TCP Control Block structure
6515  *	@adap: the adapter
6516  *	@win: PCI-E Memory Window to use
6517  *	@tid: the TCB ID
6518  *	@tcb: the buffer to return the TCB in
6519  *
6520  *	Reads the indicated hardware TCP Control Block and returns it in
6521  *	the supplied buffer.  Returns 0 on success.
6522  */
t4_read_tcb(struct adapter * adap,int win,int tid,u32 tcb[TCB_SIZE/4])6523 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4])
6524 {
6525 	u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE);
6526 	u32 tcb_addr = tcb_base + tid * TCB_SIZE;
6527 	__be32 raw_tcb[TCB_SIZE/4];
6528 	int ret, word;
6529 
6530 	ret = t4_memory_rw_addr(adap, win,
6531 				tcb_addr, sizeof raw_tcb, raw_tcb,
6532 				T4_MEMORY_READ);
6533 	if (ret)
6534 		return ret;
6535 
6536 	for (word = 0; word < 32; word++)
6537 		tcb[word] = be32_to_cpu(raw_tcb[word]);
6538 	return 0;
6539 }
6540 
6541 /**
6542  *	t4_pmtx_get_stats - returns the HW stats from PMTX
6543  *	@adap: the adapter
6544  *	@cnt: where to store the count statistics
6545  *	@cycles: where to store the cycle statistics
6546  *
6547  *	Returns performance statistics from PMTX.
6548  */
t4_pmtx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6549 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6550 {
6551 	int i;
6552 	u32 data[2];
6553 
6554 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6555 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6556 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6557 		if (is_t4(adap->params.chip)) {
6558 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6559 		} else {
6560 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6561 					 A_PM_TX_DBG_DATA, data, 2,
6562 					 A_PM_TX_DBG_STAT_MSB);
6563 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6564 		}
6565 	}
6566 }
6567 
6568 /**
6569  *	t4_pmrx_get_stats - returns the HW stats from PMRX
6570  *	@adap: the adapter
6571  *	@cnt: where to store the count statistics
6572  *	@cycles: where to store the cycle statistics
6573  *
6574  *	Returns performance statistics from PMRX.
6575  */
t4_pmrx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6576 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6577 {
6578 	int i;
6579 	u32 data[2];
6580 
6581 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6582 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6583 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6584 		if (is_t4(adap->params.chip)) {
6585 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6586 		} else {
6587 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6588 					 A_PM_RX_DBG_DATA, data, 2,
6589 					 A_PM_RX_DBG_STAT_MSB);
6590 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6591 		}
6592 	}
6593 }
6594 
6595 /**
6596  *	compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6597  *	@adapter: the adapter
6598  *	@pidx: the port index
6599  *
6600  *	Compuytes and returns a bitmap indicating which MPS buffer groups are
6601  *	associated with the given Port.  Bit i is set if buffer group i is
6602  *	used by the Port.
6603  */
compute_mps_bg_map(struct adapter * adapter,int pidx)6604 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6605 					      int pidx)
6606 {
6607 	unsigned int chip_version, nports;
6608 
6609 	chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6610 	nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6611 
6612 	switch (chip_version) {
6613 	case CHELSIO_T4:
6614 	case CHELSIO_T5:
6615 		switch (nports) {
6616 		case 1: return 0xf;
6617 		case 2: return 3 << (2 * pidx);
6618 		case 4: return 1 << pidx;
6619 		}
6620 		break;
6621 
6622 	case CHELSIO_T6:
6623 		switch (nports) {
6624 		case 2: return 1 << (2 * pidx);
6625 		}
6626 		break;
6627 	}
6628 
6629 	CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6630 	       chip_version, nports);
6631 
6632 	return 0;
6633 }
6634 
6635 /**
6636  *	t4_get_mps_bg_map - return the buffer groups associated with a port
6637  *	@adapter: the adapter
6638  *	@pidx: the port index
6639  *
6640  *	Returns a bitmap indicating which MPS buffer groups are associated
6641  *	with the given Port.  Bit i is set if buffer group i is used by the
6642  *	Port.
6643  */
t4_get_mps_bg_map(struct adapter * adapter,int pidx)6644 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6645 {
6646 	u8 *mps_bg_map;
6647 	unsigned int nports;
6648 
6649 	nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6650 	if (pidx >= nports) {
6651 		CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports);
6652 		return 0;
6653 	}
6654 
6655 	/* If we've already retrieved/computed this, just return the result.
6656 	 */
6657 	mps_bg_map = adapter->params.mps_bg_map;
6658 	if (mps_bg_map[pidx])
6659 		return mps_bg_map[pidx];
6660 
6661 	/* Newer Firmware can tell us what the MPS Buffer Group Map is.
6662 	 * If we're talking to such Firmware, let it tell us.  If the new
6663 	 * API isn't supported, revert back to old hardcoded way.  The value
6664 	 * obtained from Firmware is encoded in below format:
6665 	 *
6666 	 * val = (( MPSBGMAP[Port 3] << 24 ) |
6667 	 *        ( MPSBGMAP[Port 2] << 16 ) |
6668 	 *        ( MPSBGMAP[Port 1] <<  8 ) |
6669 	 *        ( MPSBGMAP[Port 0] <<  0 ))
6670 	 */
6671 	if (adapter->flags & FW_OK) {
6672 		u32 param, val;
6673 		int ret;
6674 
6675 		param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6676 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6677 		ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6678 					 0, 1, &param, &val);
6679 		if (!ret) {
6680 			int p;
6681 
6682 			/* Store the BG Map for all of the Ports in order to
6683 			 * avoid more calls to the Firmware in the future.
6684 			 */
6685 			for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6686 				mps_bg_map[p] = val & 0xff;
6687 
6688 			return mps_bg_map[pidx];
6689 		}
6690 	}
6691 
6692 	/* Either we're not talking to the Firmware or we're dealing with
6693 	 * older Firmware which doesn't support the new API to get the MPS
6694 	 * Buffer Group Map.  Fall back to computing it ourselves.
6695 	 */
6696 	mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6697 	return mps_bg_map[pidx];
6698 }
6699 
6700 /**
6701  *      t4_get_tp_e2c_map - return the E2C channel map associated with a port
6702  *      @adapter: the adapter
6703  *      @pidx: the port index
6704  */
t4_get_tp_e2c_map(struct adapter * adapter,int pidx)6705 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6706 {
6707 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6708 	u32 param, val = 0;
6709 	int ret;
6710 
6711 	if (pidx >= nports) {
6712 		CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports);
6713 		return 0;
6714 	}
6715 
6716 	/* FW version >= 1.16.44.0 can determine E2C channel map using
6717 	 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6718 	 */
6719 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6720 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
6721 	ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6722 					 0, 1, &param, &val);
6723 	if (!ret)
6724 		return (val >> (8*pidx)) & 0xff;
6725 
6726 	return 0;
6727 }
6728 
6729 /**
6730  *	t4_get_tp_ch_map - return TP ingress channels associated with a port
6731  *	@adapter: the adapter
6732  *	@pidx: the port index
6733  *
6734  *	Returns a bitmap indicating which TP Ingress Channels are associated with
6735  *	a given Port.  Bit i is set if TP Ingress Channel i is used by the Port.
6736  */
t4_get_tp_ch_map(struct adapter * adapter,int pidx)6737 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx)
6738 {
6739 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6740 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6741 
6742 	if (pidx >= nports) {
6743 		CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports);
6744 		return 0;
6745 	}
6746 
6747 	switch (chip_version) {
6748 	case CHELSIO_T4:
6749 	case CHELSIO_T5:
6750 		/*
6751 		 * Note that this happens to be the same values as the MPS
6752 		 * Buffer Group Map for these Chips.  But we replicate the code
6753 		 * here because they're really separate concepts.
6754 		 */
6755 		switch (nports) {
6756 		case 1: return 0xf;
6757 		case 2: return 3 << (2 * pidx);
6758 		case 4: return 1 << pidx;
6759 		}
6760 		break;
6761 
6762 	case CHELSIO_T6:
6763 		switch (nports) {
6764 		case 1: return 1 << pidx;
6765 		case 2: return 1 << pidx;
6766 		}
6767 		break;
6768 	}
6769 
6770 	CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
6771 	       chip_version, nports);
6772 	return 0;
6773 }
6774 
6775 /**
6776  *      t4_get_port_type_description - return Port Type string description
6777  *      @port_type: firmware Port Type enumeration
6778  */
t4_get_port_type_description(enum fw_port_type port_type)6779 const char *t4_get_port_type_description(enum fw_port_type port_type)
6780 {
6781 	static const char *const port_type_description[] = {
6782 		"Fiber_XFI",
6783 		"Fiber_XAUI",
6784 		"BT_SGMII",
6785 		"BT_XFI",
6786 		"BT_XAUI",
6787 		"KX4",
6788 		"CX4",
6789 		"KX",
6790 		"KR",
6791 		"SFP",
6792 		"BP_AP",
6793 		"BP4_AP",
6794 		"QSFP_10G",
6795 		"QSA",
6796 		"QSFP",
6797 		"BP40_BA",
6798 		"KR4_100G",
6799 		"CR4_QSFP",
6800 		"CR_QSFP",
6801 		"CR2_QSFP",
6802 		"SFP28",
6803 		"KR_SFP28",
6804 		"KR_XLAUI",
6805 	};
6806 
6807 	if (port_type < ARRAY_SIZE(port_type_description))
6808 		return port_type_description[port_type];
6809 	return "UNKNOWN";
6810 }
6811 
6812 /**
6813  *      t4_get_port_stats_offset - collect port stats relative to a previous
6814  *				   snapshot
6815  *      @adap: The adapter
6816  *      @idx: The port
6817  *      @stats: Current stats to fill
6818  *      @offset: Previous stats snapshot
6819  */
t4_get_port_stats_offset(struct adapter * adap,int idx,struct port_stats * stats,struct port_stats * offset)6820 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6821 		struct port_stats *stats,
6822 		struct port_stats *offset)
6823 {
6824 	u64 *s, *o;
6825 	int i;
6826 
6827 	t4_get_port_stats(adap, idx, stats);
6828 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6829 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
6830 			i++, s++, o++)
6831 		*s -= *o;
6832 }
6833 
6834 /**
6835  *	t4_get_port_stats - collect port statistics
6836  *	@adap: the adapter
6837  *	@idx: the port index
6838  *	@p: the stats structure to fill
6839  *
6840  *	Collect statistics related to the given port from HW.
6841  */
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)6842 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6843 {
6844 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6845 	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6846 
6847 #define GET_STAT(name) \
6848 	t4_read_reg64(adap, \
6849 	(is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6850 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6851 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6852 
6853 	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
6854 	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
6855 	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
6856 	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
6857 	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
6858 	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
6859 	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
6860 	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
6861 	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
6862 	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
6863 	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
6864 	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
6865 	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
6866 	p->tx_drop		= GET_STAT(TX_PORT_DROP);
6867 	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
6868 	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
6869 	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
6870 	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
6871 	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
6872 	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
6873 	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
6874 	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
6875 	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
6876 
6877 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6878 		if (stat_ctl & F_COUNTPAUSESTATTX)
6879 			p->tx_frames_64 -= p->tx_pause;
6880 		if (stat_ctl & F_COUNTPAUSEMCTX)
6881 			p->tx_mcast_frames -= p->tx_pause;
6882 	}
6883 
6884 	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
6885 	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
6886 	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
6887 	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
6888 	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
6889 	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
6890 	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
6891 	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
6892 	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
6893 	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
6894 	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
6895 	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
6896 	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
6897 	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
6898 	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
6899 	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
6900 	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
6901 	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
6902 	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
6903 	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
6904 	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
6905 	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
6906 	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
6907 	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
6908 	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
6909 	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
6910 	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
6911 
6912 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6913 		if (stat_ctl & F_COUNTPAUSESTATRX)
6914 			p->rx_frames_64 -= p->rx_pause;
6915 		if (stat_ctl & F_COUNTPAUSEMCRX)
6916 			p->rx_mcast_frames -= p->rx_pause;
6917 	}
6918 
6919 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6920 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6921 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6922 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6923 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6924 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6925 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6926 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6927 
6928 #undef GET_STAT
6929 #undef GET_STAT_COM
6930 }
6931 
6932 /**
6933  *	t4_get_lb_stats - collect loopback port statistics
6934  *	@adap: the adapter
6935  *	@idx: the loopback port index
6936  *	@p: the stats structure to fill
6937  *
6938  *	Return HW statistics for the given loopback port.
6939  */
t4_get_lb_stats(struct adapter * adap,int idx,struct lb_port_stats * p)6940 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6941 {
6942 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6943 
6944 #define GET_STAT(name) \
6945 	t4_read_reg64(adap, \
6946 	(is_t4(adap->params.chip) ? \
6947 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6948 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6949 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6950 
6951 	p->octets	= GET_STAT(BYTES);
6952 	p->frames	= GET_STAT(FRAMES);
6953 	p->bcast_frames	= GET_STAT(BCAST);
6954 	p->mcast_frames	= GET_STAT(MCAST);
6955 	p->ucast_frames	= GET_STAT(UCAST);
6956 	p->error_frames	= GET_STAT(ERROR);
6957 
6958 	p->frames_64		= GET_STAT(64B);
6959 	p->frames_65_127	= GET_STAT(65B_127B);
6960 	p->frames_128_255	= GET_STAT(128B_255B);
6961 	p->frames_256_511	= GET_STAT(256B_511B);
6962 	p->frames_512_1023	= GET_STAT(512B_1023B);
6963 	p->frames_1024_1518	= GET_STAT(1024B_1518B);
6964 	p->frames_1519_max	= GET_STAT(1519B_MAX);
6965 	p->drop			= GET_STAT(DROP_FRAMES);
6966 
6967 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6968 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6969 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6970 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6971 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6972 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6973 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6974 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6975 
6976 #undef GET_STAT
6977 #undef GET_STAT_COM
6978 }
6979 
6980 /*	t4_mk_filtdelwr - create a delete filter WR
6981  *	@ftid: the filter ID
6982  *	@wr: the filter work request to populate
6983  *	@rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6
6984  *	@qid: ingress queue to receive the delete notification
6985  *
6986  *	Creates a filter work request to delete the supplied filter.  If @qid
6987  *	is negative the delete notification is suppressed.
6988  */
t4_mk_filtdelwr(unsigned int ftid,struct fw_filter_wr * wr,int rqtype,int qid)6989 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr,
6990 		     int rqtype, int qid)
6991 {
6992 	memset(wr, 0, sizeof(*wr));
6993 	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6994 	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6995 	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6996 				    V_FW_FILTER_WR_RQTYPE(rqtype) |
6997 				    V_FW_FILTER_WR_NOREPLY(qid < 0));
6998 	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6999 	if (qid >= 0)
7000 		wr->rx_chan_rx_rpl_iq =
7001 				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
7002 }
7003 
7004 #define INIT_CMD(var, cmd, rd_wr) do { \
7005 	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
7006 					F_FW_CMD_REQUEST | \
7007 					F_FW_CMD_##rd_wr); \
7008 	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
7009 } while (0)
7010 
t4_fwaddrspace_write(struct adapter * adap,unsigned int mbox,u32 addr,u32 val)7011 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
7012 			  u32 addr, u32 val)
7013 {
7014 	u32 ldst_addrspace;
7015 	struct fw_ldst_cmd c;
7016 
7017 	memset(&c, 0, sizeof(c));
7018 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
7019 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7020 					F_FW_CMD_REQUEST |
7021 					F_FW_CMD_WRITE |
7022 					ldst_addrspace);
7023 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7024 	c.u.addrval.addr = cpu_to_be32(addr);
7025 	c.u.addrval.val = cpu_to_be32(val);
7026 
7027 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7028 }
7029 
7030 /**
7031  *	t4_mdio_rd - read a PHY register through MDIO
7032  *	@adap: the adapter
7033  *	@mbox: mailbox to use for the FW command
7034  *	@phy_addr: the PHY address
7035  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
7036  *	@reg: the register to read
7037  *	@valp: where to store the value
7038  *
7039  *	Issues a FW command through the given mailbox to read a PHY register.
7040  */
t4_mdio_rd(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int * valp)7041 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7042 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
7043 {
7044 	int ret;
7045 	u32 ldst_addrspace;
7046 	struct fw_ldst_cmd c;
7047 
7048 	memset(&c, 0, sizeof(c));
7049 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7050 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7051 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7052 					ldst_addrspace);
7053 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7054 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7055 					 V_FW_LDST_CMD_MMD(mmd));
7056 	c.u.mdio.raddr = cpu_to_be16(reg);
7057 
7058 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7059 	if (ret == 0)
7060 		*valp = be16_to_cpu(c.u.mdio.rval);
7061 	return ret;
7062 }
7063 
7064 /**
7065  *	t4_mdio_wr - write a PHY register through MDIO
7066  *	@adap: the adapter
7067  *	@mbox: mailbox to use for the FW command
7068  *	@phy_addr: the PHY address
7069  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
7070  *	@reg: the register to write
7071  *	@valp: value to write
7072  *
7073  *	Issues a FW command through the given mailbox to write a PHY register.
7074  */
t4_mdio_wr(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int val)7075 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7076 	       unsigned int mmd, unsigned int reg, unsigned int val)
7077 {
7078 	u32 ldst_addrspace;
7079 	struct fw_ldst_cmd c;
7080 
7081 	memset(&c, 0, sizeof(c));
7082 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7083 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7084 					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7085 					ldst_addrspace);
7086 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7087 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7088 					 V_FW_LDST_CMD_MMD(mmd));
7089 	c.u.mdio.raddr = cpu_to_be16(reg);
7090 	c.u.mdio.rval = cpu_to_be16(val);
7091 
7092 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7093 }
7094 
7095 /**
7096  *
7097  *	t4_sge_decode_idma_state - decode the idma state
7098  *	@adap: the adapter
7099  *	@state: the state idma is stuck in
7100  */
t4_sge_decode_idma_state(struct adapter * adapter,int state)7101 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
7102 {
7103 	static const char * const t4_decode[] = {
7104 		"IDMA_IDLE",
7105 		"IDMA_PUSH_MORE_CPL_FIFO",
7106 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7107 		"Not used",
7108 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7109 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7110 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7111 		"IDMA_SEND_FIFO_TO_IMSG",
7112 		"IDMA_FL_REQ_DATA_FL_PREP",
7113 		"IDMA_FL_REQ_DATA_FL",
7114 		"IDMA_FL_DROP",
7115 		"IDMA_FL_H_REQ_HEADER_FL",
7116 		"IDMA_FL_H_SEND_PCIEHDR",
7117 		"IDMA_FL_H_PUSH_CPL_FIFO",
7118 		"IDMA_FL_H_SEND_CPL",
7119 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7120 		"IDMA_FL_H_SEND_IP_HDR",
7121 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7122 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7123 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7124 		"IDMA_FL_D_SEND_PCIEHDR",
7125 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7126 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7127 		"IDMA_FL_SEND_PCIEHDR",
7128 		"IDMA_FL_PUSH_CPL_FIFO",
7129 		"IDMA_FL_SEND_CPL",
7130 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7131 		"IDMA_FL_SEND_PAYLOAD",
7132 		"IDMA_FL_REQ_NEXT_DATA_FL",
7133 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7134 		"IDMA_FL_SEND_PADDING",
7135 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7136 		"IDMA_FL_SEND_FIFO_TO_IMSG",
7137 		"IDMA_FL_REQ_DATAFL_DONE",
7138 		"IDMA_FL_REQ_HEADERFL_DONE",
7139 	};
7140 	static const char * const t5_decode[] = {
7141 		"IDMA_IDLE",
7142 		"IDMA_ALMOST_IDLE",
7143 		"IDMA_PUSH_MORE_CPL_FIFO",
7144 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7145 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7146 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7147 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7148 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7149 		"IDMA_SEND_FIFO_TO_IMSG",
7150 		"IDMA_FL_REQ_DATA_FL",
7151 		"IDMA_FL_DROP",
7152 		"IDMA_FL_DROP_SEND_INC",
7153 		"IDMA_FL_H_REQ_HEADER_FL",
7154 		"IDMA_FL_H_SEND_PCIEHDR",
7155 		"IDMA_FL_H_PUSH_CPL_FIFO",
7156 		"IDMA_FL_H_SEND_CPL",
7157 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7158 		"IDMA_FL_H_SEND_IP_HDR",
7159 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7160 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7161 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7162 		"IDMA_FL_D_SEND_PCIEHDR",
7163 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7164 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7165 		"IDMA_FL_SEND_PCIEHDR",
7166 		"IDMA_FL_PUSH_CPL_FIFO",
7167 		"IDMA_FL_SEND_CPL",
7168 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7169 		"IDMA_FL_SEND_PAYLOAD",
7170 		"IDMA_FL_REQ_NEXT_DATA_FL",
7171 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7172 		"IDMA_FL_SEND_PADDING",
7173 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7174 	};
7175 	static const char * const t6_decode[] = {
7176 		"IDMA_IDLE",
7177 		"IDMA_PUSH_MORE_CPL_FIFO",
7178 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7179 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7180 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7181 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7182 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7183 		"IDMA_FL_REQ_DATA_FL",
7184 		"IDMA_FL_DROP",
7185 		"IDMA_FL_DROP_SEND_INC",
7186 		"IDMA_FL_H_REQ_HEADER_FL",
7187 		"IDMA_FL_H_SEND_PCIEHDR",
7188 		"IDMA_FL_H_PUSH_CPL_FIFO",
7189 		"IDMA_FL_H_SEND_CPL",
7190 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7191 		"IDMA_FL_H_SEND_IP_HDR",
7192 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7193 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7194 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7195 		"IDMA_FL_D_SEND_PCIEHDR",
7196 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7197 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7198 		"IDMA_FL_SEND_PCIEHDR",
7199 		"IDMA_FL_PUSH_CPL_FIFO",
7200 		"IDMA_FL_SEND_CPL",
7201 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7202 		"IDMA_FL_SEND_PAYLOAD",
7203 		"IDMA_FL_REQ_NEXT_DATA_FL",
7204 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7205 		"IDMA_FL_SEND_PADDING",
7206 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7207 	};
7208 	static const u32 sge_regs[] = {
7209 		A_SGE_DEBUG_DATA_LOW_INDEX_2,
7210 		A_SGE_DEBUG_DATA_LOW_INDEX_3,
7211 		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7212 	};
7213 	const char **sge_idma_decode;
7214 	int sge_idma_decode_nstates;
7215 	int i;
7216 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
7217 
7218 	/* Select the right set of decode strings to dump depending on the
7219 	 * adapter chip type.
7220 	 */
7221 	switch (chip_version) {
7222 	case CHELSIO_T4:
7223 		sge_idma_decode = (const char **)t4_decode;
7224 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7225 		break;
7226 
7227 	case CHELSIO_T5:
7228 		sge_idma_decode = (const char **)t5_decode;
7229 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7230 		break;
7231 
7232 	case CHELSIO_T6:
7233 		sge_idma_decode = (const char **)t6_decode;
7234 		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7235 		break;
7236 
7237 	default:
7238 		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
7239 		return;
7240 	}
7241 
7242 	if (state < sge_idma_decode_nstates)
7243 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7244 	else
7245 		CH_WARN(adapter, "idma state %d unknown\n", state);
7246 
7247 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7248 		CH_WARN(adapter, "SGE register %#x value %#x\n",
7249 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7250 }
7251 
7252 /**
7253  *      t4_sge_ctxt_flush - flush the SGE context cache
7254  *      @adap: the adapter
7255  *      @mbox: mailbox to use for the FW command
7256  *
7257  *      Issues a FW command through the given mailbox to flush the
7258  *      SGE context cache.
7259  */
t4_sge_ctxt_flush(struct adapter * adap,unsigned int mbox,int ctxt_type)7260 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
7261 {
7262 	int ret;
7263 	u32 ldst_addrspace;
7264 	struct fw_ldst_cmd c;
7265 
7266 	memset(&c, 0, sizeof(c));
7267 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
7268 						 FW_LDST_ADDRSPC_SGE_EGRC :
7269 						 FW_LDST_ADDRSPC_SGE_INGC);
7270 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7271 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7272 					ldst_addrspace);
7273 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7274 	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7275 
7276 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7277 	return ret;
7278 }
7279 
7280 /**
7281  *	t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values
7282  *	@adap - the adapter
7283  *	@ndbqtimers: size of the provided SGE Doorbell Queue Timer table
7284  *	@dbqtimers: SGE Doorbell Queue Timer table
7285  *
7286  *	Reads the SGE Doorbell Queue Timer values into the provided table.
7287  *	Returns 0 on success (Firmware and Hardware support this feature),
7288  *	an error on failure.
7289  */
t4_read_sge_dbqtimers(struct adapter * adap,unsigned int ndbqtimers,u16 * dbqtimers)7290 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
7291 			  u16 *dbqtimers)
7292 {
7293 	int ret, dbqtimerix;
7294 
7295 	ret = 0;
7296 	dbqtimerix = 0;
7297 	while (dbqtimerix < ndbqtimers) {
7298 		int nparams, param;
7299 		u32 params[7], vals[7];
7300 
7301 		nparams = ndbqtimers - dbqtimerix;
7302 		if (nparams > ARRAY_SIZE(params))
7303 			nparams = ARRAY_SIZE(params);
7304 
7305 		for (param = 0; param < nparams; param++)
7306 			params[param] =
7307 			  (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7308 			   V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
7309 			   V_FW_PARAMS_PARAM_Y(dbqtimerix + param));
7310 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
7311 				      nparams, params, vals);
7312 		if (ret)
7313 			break;
7314 
7315 		for (param = 0; param < nparams; param++)
7316 			dbqtimers[dbqtimerix++] = vals[param];
7317 	}
7318 	return ret;
7319 }
7320 
7321 /**
7322  *      t4_fw_hello - establish communication with FW
7323  *      @adap: the adapter
7324  *      @mbox: mailbox to use for the FW command
7325  *      @evt_mbox: mailbox to receive async FW events
7326  *      @master: specifies the caller's willingness to be the device master
7327  *	@state: returns the current device state (if non-NULL)
7328  *
7329  *	Issues a command to establish communication with FW.  Returns either
7330  *	an error (negative integer) or the mailbox of the Master PF.
7331  */
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)7332 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7333 		enum dev_master master, enum dev_state *state)
7334 {
7335 	int ret;
7336 	struct fw_hello_cmd c;
7337 	u32 v;
7338 	unsigned int master_mbox;
7339 	int retries = FW_CMD_HELLO_RETRIES;
7340 
7341 retry:
7342 	memset(&c, 0, sizeof(c));
7343 	INIT_CMD(c, HELLO, WRITE);
7344 	c.err_to_clearinit = cpu_to_be32(
7345 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7346 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7347 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7348 					mbox : M_FW_HELLO_CMD_MBMASTER) |
7349 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7350 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7351 		F_FW_HELLO_CMD_CLEARINIT);
7352 
7353 	/*
7354 	 * Issue the HELLO command to the firmware.  If it's not successful
7355 	 * but indicates that we got a "busy" or "timeout" condition, retry
7356 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
7357 	 * retry limit, check to see if the firmware left us any error
7358 	 * information and report that if so ...
7359 	 */
7360 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7361 	if (ret != FW_SUCCESS) {
7362 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7363 			goto retry;
7364 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7365 			t4_report_fw_error(adap);
7366 		return ret;
7367 	}
7368 
7369 	v = be32_to_cpu(c.err_to_clearinit);
7370 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7371 	if (state) {
7372 		if (v & F_FW_HELLO_CMD_ERR)
7373 			*state = DEV_STATE_ERR;
7374 		else if (v & F_FW_HELLO_CMD_INIT)
7375 			*state = DEV_STATE_INIT;
7376 		else
7377 			*state = DEV_STATE_UNINIT;
7378 	}
7379 
7380 	/*
7381 	 * If we're not the Master PF then we need to wait around for the
7382 	 * Master PF Driver to finish setting up the adapter.
7383 	 *
7384 	 * Note that we also do this wait if we're a non-Master-capable PF and
7385 	 * there is no current Master PF; a Master PF may show up momentarily
7386 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
7387 	 * OS loads lots of different drivers rapidly at the same time).  In
7388 	 * this case, the Master PF returned by the firmware will be
7389 	 * M_PCIE_FW_MASTER so the test below will work ...
7390 	 */
7391 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7392 	    master_mbox != mbox) {
7393 		int waiting = FW_CMD_HELLO_TIMEOUT;
7394 
7395 		/*
7396 		 * Wait for the firmware to either indicate an error or
7397 		 * initialized state.  If we see either of these we bail out
7398 		 * and report the issue to the caller.  If we exhaust the
7399 		 * "hello timeout" and we haven't exhausted our retries, try
7400 		 * again.  Otherwise bail with a timeout error.
7401 		 */
7402 		for (;;) {
7403 			u32 pcie_fw;
7404 
7405 			msleep(50);
7406 			waiting -= 50;
7407 
7408 			/*
7409 			 * If neither Error nor Initialialized are indicated
7410 			 * by the firmware keep waiting till we exaust our
7411 			 * timeout ... and then retry if we haven't exhausted
7412 			 * our retries ...
7413 			 */
7414 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7415 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7416 				if (waiting <= 0) {
7417 					if (retries-- > 0)
7418 						goto retry;
7419 
7420 					return -ETIMEDOUT;
7421 				}
7422 				continue;
7423 			}
7424 
7425 			/*
7426 			 * We either have an Error or Initialized condition
7427 			 * report errors preferentially.
7428 			 */
7429 			if (state) {
7430 				if (pcie_fw & F_PCIE_FW_ERR)
7431 					*state = DEV_STATE_ERR;
7432 				else if (pcie_fw & F_PCIE_FW_INIT)
7433 					*state = DEV_STATE_INIT;
7434 			}
7435 
7436 			/*
7437 			 * If we arrived before a Master PF was selected and
7438 			 * there's not a valid Master PF, grab its identity
7439 			 * for our caller.
7440 			 */
7441 			if (master_mbox == M_PCIE_FW_MASTER &&
7442 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
7443 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7444 			break;
7445 		}
7446 	}
7447 
7448 	return master_mbox;
7449 }
7450 
7451 /**
7452  *	t4_fw_bye - end communication with FW
7453  *	@adap: the adapter
7454  *	@mbox: mailbox to use for the FW command
7455  *
7456  *	Issues a command to terminate communication with FW.
7457  */
t4_fw_bye(struct adapter * adap,unsigned int mbox)7458 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7459 {
7460 	struct fw_bye_cmd c;
7461 
7462 	memset(&c, 0, sizeof(c));
7463 	INIT_CMD(c, BYE, WRITE);
7464 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7465 }
7466 
7467 /**
7468  *	t4_fw_reset - issue a reset to FW
7469  *	@adap: the adapter
7470  *	@mbox: mailbox to use for the FW command
7471  *	@reset: specifies the type of reset to perform
7472  *
7473  *	Issues a reset command of the specified type to FW.
7474  */
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)7475 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7476 {
7477 	struct fw_reset_cmd c;
7478 
7479 	memset(&c, 0, sizeof(c));
7480 	INIT_CMD(c, RESET, WRITE);
7481 	c.val = cpu_to_be32(reset);
7482 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7483 }
7484 
7485 /**
7486  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7487  *	@adap: the adapter
7488  *	@mbox: mailbox to use for the FW RESET command (if desired)
7489  *	@force: force uP into RESET even if FW RESET command fails
7490  *
7491  *	Issues a RESET command to firmware (if desired) with a HALT indication
7492  *	and then puts the microprocessor into RESET state.  The RESET command
7493  *	will only be issued if a legitimate mailbox is provided (mbox <=
7494  *	M_PCIE_FW_MASTER).
7495  *
7496  *	This is generally used in order for the host to safely manipulate the
7497  *	adapter without fear of conflicting with whatever the firmware might
7498  *	be doing.  The only way out of this state is to RESTART the firmware
7499  *	...
7500  */
t4_fw_halt(struct adapter * adap,unsigned int mbox,int force)7501 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7502 {
7503 	int ret = 0;
7504 
7505 	/*
7506 	 * If a legitimate mailbox is provided, issue a RESET command
7507 	 * with a HALT indication.
7508 	 */
7509 	if (mbox <= M_PCIE_FW_MASTER) {
7510 		struct fw_reset_cmd c;
7511 
7512 		memset(&c, 0, sizeof(c));
7513 		INIT_CMD(c, RESET, WRITE);
7514 		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7515 		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7516 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7517 	}
7518 
7519 	/*
7520 	 * Normally we won't complete the operation if the firmware RESET
7521 	 * command fails but if our caller insists we'll go ahead and put the
7522 	 * uP into RESET.  This can be useful if the firmware is hung or even
7523 	 * missing ...  We'll have to take the risk of putting the uP into
7524 	 * RESET without the cooperation of firmware in that case.
7525 	 *
7526 	 * We also force the firmware's HALT flag to be on in case we bypassed
7527 	 * the firmware RESET command above or we're dealing with old firmware
7528 	 * which doesn't have the HALT capability.  This will serve as a flag
7529 	 * for the incoming firmware to know that it's coming out of a HALT
7530 	 * rather than a RESET ... if it's new enough to understand that ...
7531 	 */
7532 	if (ret == 0 || force) {
7533 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7534 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7535 				 F_PCIE_FW_HALT);
7536 	}
7537 
7538 	/*
7539 	 * And we always return the result of the firmware RESET command
7540 	 * even when we force the uP into RESET ...
7541 	 */
7542 	return ret;
7543 }
7544 
7545 /**
7546  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
7547  *	@adap: the adapter
7548  *	@reset: if we want to do a RESET to restart things
7549  *
7550  *	Restart firmware previously halted by t4_fw_halt().  On successful
7551  *	return the previous PF Master remains as the new PF Master and there
7552  *	is no need to issue a new HELLO command, etc.
7553  *
7554  *	We do this in two ways:
7555  *
7556  *	 1. If we're dealing with newer firmware we'll simply want to take
7557  *	    the chip's microprocessor out of RESET.  This will cause the
7558  *	    firmware to start up from its start vector.  And then we'll loop
7559  *	    until the firmware indicates it's started again (PCIE_FW.HALT
7560  *	    reset to 0) or we timeout.
7561  *
7562  *	 2. If we're dealing with older firmware then we'll need to RESET
7563  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
7564  *	    flag and automatically RESET itself on startup.
7565  */
t4_fw_restart(struct adapter * adap,unsigned int mbox,int reset)7566 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7567 {
7568 	if (reset) {
7569 		/*
7570 		 * Since we're directing the RESET instead of the firmware
7571 		 * doing it automatically, we need to clear the PCIE_FW.HALT
7572 		 * bit.
7573 		 */
7574 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
7575 
7576 		/*
7577 		 * If we've been given a valid mailbox, first try to get the
7578 		 * firmware to do the RESET.  If that works, great and we can
7579 		 * return success.  Otherwise, if we haven't been given a
7580 		 * valid mailbox or the RESET command failed, fall back to
7581 		 * hitting the chip with a hammer.
7582 		 */
7583 		if (mbox <= M_PCIE_FW_MASTER) {
7584 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7585 			msleep(100);
7586 			if (t4_fw_reset(adap, mbox,
7587 					F_PIORST | F_PIORSTMODE) == 0)
7588 				return 0;
7589 		}
7590 
7591 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7592 		msleep(2000);
7593 	} else {
7594 		int ms;
7595 
7596 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7597 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7598 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7599 				return FW_SUCCESS;
7600 			msleep(100);
7601 			ms += 100;
7602 		}
7603 		return -ETIMEDOUT;
7604 	}
7605 	return 0;
7606 }
7607 
7608 /**
7609  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7610  *	@adap: the adapter
7611  *	@mbox: mailbox to use for the FW RESET command (if desired)
7612  *	@fw_data: the firmware image to write
7613  *	@size: image size
7614  *	@force: force upgrade even if firmware doesn't cooperate
7615  *
7616  *	Perform all of the steps necessary for upgrading an adapter's
7617  *	firmware image.  Normally this requires the cooperation of the
7618  *	existing firmware in order to halt all existing activities
7619  *	but if an invalid mailbox token is passed in we skip that step
7620  *	(though we'll still put the adapter microprocessor into RESET in
7621  *	that case).
7622  *
7623  *	On successful return the new firmware will have been loaded and
7624  *	the adapter will have been fully RESET losing all previous setup
7625  *	state.  On unsuccessful return the adapter may be completely hosed ...
7626  *	positive errno indicates that the adapter is ~probably~ intact, a
7627  *	negative errno indicates that things are looking bad ...
7628  */
t4_fw_upgrade(struct adapter * adap,unsigned int mbox,const u8 * fw_data,unsigned int size,int force)7629 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7630 		  const u8 *fw_data, unsigned int size, int force)
7631 {
7632 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7633 	unsigned int bootstrap =
7634 	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7635 	int reset, ret;
7636 
7637 	if (!t4_fw_matches_chip(adap, fw_hdr))
7638 		return -EINVAL;
7639 
7640 	/* Disable FW_OK flags so that mbox commands with FW_OK flags check
7641 	 * wont be send when we are flashing FW.
7642 	 */
7643 	adap->flags &= ~FW_OK;
7644 
7645 	if (!bootstrap) {
7646 		ret = t4_fw_halt(adap, mbox, force);
7647 		if (ret < 0 && !force)
7648 			goto out;
7649 	}
7650 
7651 	ret = t4_load_fw(adap, fw_data, size, bootstrap);
7652 	if (ret < 0 || bootstrap)
7653 		goto out;
7654 
7655 	/*
7656 	 * If there was a Firmware Configuration File staored in FLASH,
7657 	 * there's a good chance that it won't be compatible with the new
7658 	 * Firmware.  In order to prevent difficult to diagnose adapter
7659 	 * initialization issues, we clear out the Firmware Configuration File
7660 	 * portion of the FLASH .  The user will need to re-FLASH a new
7661 	 * Firmware Configuration File which is compatible with the new
7662 	 * Firmware if that's desired.
7663 	 */
7664 	(void)t4_load_cfg(adap, NULL, 0);
7665 
7666 	/*
7667 	 * Older versions of the firmware don't understand the new
7668 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7669 	 * restart.  So for newly loaded older firmware we'll have to do the
7670 	 * RESET for it so it starts up on a clean slate.  We can tell if
7671 	 * the newly loaded firmware will handle this right by checking
7672 	 * its header flags to see if it advertises the capability.
7673 	 */
7674 	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7675 	ret = t4_fw_restart(adap, mbox, reset);
7676 
7677 	/* Grab potentially new Firmware Device Log parameters so we can see
7678 	 * how helthy the new Firmware is.  It's okay to contact the new
7679 	 * Firmware for these parameters even though, as far as it's
7680 	 * concerned, we've never said "HELLO" to it ...
7681 	 */
7682 	(void)t4_init_devlog_params(adap, 1);
7683 
7684 out:
7685 	adap->flags |= FW_OK;
7686 	return ret;
7687 }
7688 
7689 /**
7690  *	t4_fl_pkt_align - return the fl packet alignment
7691  *	@adap: the adapter
7692  *	is_packed: True when the driver uses packed FLM mode
7693  *
7694  *	T4 has a single field to specify the packing and padding boundary.
7695  *	T5 onwards has separate fields for this and hence the alignment for
7696  *	next packet offset is maximum of these two.
7697  *
7698  */
t4_fl_pkt_align(struct adapter * adap,bool is_packed)7699 int t4_fl_pkt_align(struct adapter *adap, bool is_packed)
7700 {
7701 	u32 sge_control, sge_control2;
7702 	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7703 
7704 	sge_control = t4_read_reg(adap, A_SGE_CONTROL);
7705 
7706 	/* T4 uses a single control field to specify both the PCIe Padding and
7707 	 * Packing Boundary.  T5 introduced the ability to specify these
7708 	 * separately.  The actual Ingress Packet Data alignment boundary
7709 	 * within Packed Buffer Mode is the maximum of these two
7710 	 * specifications.  (Note that it makes no real practical sense to
7711 	 * have the Pading Boudary be larger than the Packing Boundary but you
7712 	 * could set the chip up that way and, in fact, legacy T4 code would
7713 	 * end doing this because it would initialize the Padding Boundary and
7714 	 * leave the Packing Boundary initialized to 0 (16 bytes).)
7715 	 * Padding Boundary values in T6 starts from 8B,
7716 	 * where as it is 32B for T4 and T5.
7717 	 */
7718 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7719 		ingpad_shift = X_INGPADBOUNDARY_SHIFT;
7720 	else
7721 		ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
7722 
7723 	ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
7724 
7725 	fl_align = ingpadboundary;
7726 	if (!is_t4(adap->params.chip) && is_packed) {
7727 		/* T5 has a weird interpretation of one of the PCIe Packing
7728 		 * Boundary values.  No idea why ...
7729 		 */
7730 		sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
7731 		ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
7732 		if (ingpackboundary == X_INGPACKBOUNDARY_16B)
7733 			ingpackboundary = 16;
7734 		else
7735 			ingpackboundary = 1 << (ingpackboundary +
7736 						X_INGPACKBOUNDARY_SHIFT);
7737 
7738 		fl_align = max(ingpadboundary, ingpackboundary);
7739 	}
7740 	return fl_align;
7741 }
7742 
7743 /**
7744  *	t4_fixup_host_params_compat - fix up host-dependent parameters
7745  *	@adap: the adapter
7746  *	@page_size: the host's Base Page Size
7747  *	@cache_line_size: the host's Cache Line Size
7748  *	@chip_compat: maintain compatibility with designated chip
7749  *
7750  *	Various registers in the chip contain values which are dependent on the
7751  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7752  *	those registers with the appropriate values as passed in ...
7753  *
7754  *	@chip_compat is used to limit the set of changes that are made
7755  *	to be compatible with the indicated chip release.  This is used by
7756  *	drivers to maintain compatibility with chip register settings when
7757  *	the drivers haven't [yet] been updated with new chip support.
7758  */
t4_fixup_host_params_compat(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size,enum chip_type chip_compat)7759 int t4_fixup_host_params_compat(struct adapter *adap,
7760 				unsigned int page_size,
7761 				unsigned int cache_line_size,
7762 				enum chip_type chip_compat)
7763 {
7764 	unsigned int page_shift = fls(page_size) - 1;
7765 	unsigned int sge_hps = page_shift - 10;
7766 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7767 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7768 	unsigned int fl_align_log = fls(fl_align) - 1;
7769 
7770 	t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
7771 		     V_HOSTPAGESIZEPF0(sge_hps) |
7772 		     V_HOSTPAGESIZEPF1(sge_hps) |
7773 		     V_HOSTPAGESIZEPF2(sge_hps) |
7774 		     V_HOSTPAGESIZEPF3(sge_hps) |
7775 		     V_HOSTPAGESIZEPF4(sge_hps) |
7776 		     V_HOSTPAGESIZEPF5(sge_hps) |
7777 		     V_HOSTPAGESIZEPF6(sge_hps) |
7778 		     V_HOSTPAGESIZEPF7(sge_hps));
7779 
7780 	if (is_t4(adap->params.chip) || is_t4(chip_compat)) {
7781 		t4_set_reg_field(adap, A_SGE_CONTROL,
7782 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7783 				 F_EGRSTATUSPAGESIZE,
7784 				 V_INGPADBOUNDARY(fl_align_log -
7785 						  X_INGPADBOUNDARY_SHIFT) |
7786 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
7787 	} else {
7788 		unsigned int pack_align;
7789 		unsigned int ingpad, ingpack;
7790 		unsigned int pcie_cap;
7791 
7792 		/* T5 introduced the separation of the Free List Padding and
7793 		 * Packing Boundaries.  Thus, we can select a smaller Padding
7794 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7795 		 * Bandwidth, and use a Packing Boundary which is large enough
7796 		 * to avoid false sharing between CPUs, etc.
7797 		 *
7798 		 * For the PCI Link, the smaller the Padding Boundary the
7799 		 * better.  For the Memory Controller, a smaller Padding
7800 		 * Boundary is better until we cross under the Memory Line
7801 		 * Size (the minimum unit of transfer to/from Memory).  If we
7802 		 * have a Padding Boundary which is smaller than the Memory
7803 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
7804 		 * Memory Controller which is never good.
7805 		 */
7806 
7807 		/* We want the Packing Boundary to be based on the Cache Line
7808 		 * Size in order to help avoid False Sharing performance
7809 		 * issues between CPUs, etc.  We also want the Packing
7810 		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
7811 		 * get best performance when the Packing Boundary is a
7812 		 * multiple of the Maximum Payload Size.
7813 		 */
7814 		pack_align = fl_align;
7815 		pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
7816 		if (pcie_cap) {
7817 			unsigned int mps, mps_log;
7818 			u16 devctl;
7819 
7820 			/*
7821 			 * The PCIe Device Control Maximum Payload Size field
7822 			 * [bits 7:5] encodes sizes as powers of 2 starting at
7823 			 * 128 bytes.
7824 			 */
7825 			t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
7826 					    &devctl);
7827 			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7828 			mps = 1 << mps_log;
7829 			if (mps > pack_align)
7830 				pack_align = mps;
7831 		}
7832 
7833 		/* N.B. T5/T6 have a crazy special interpretation of the "0"
7834 		 * value for the Packing Boundary.  This corresponds to 16
7835 		 * bytes instead of the expected 32 bytes.  So if we want 32
7836 		 * bytes, the best we can really do is 64 bytes ...
7837 		 */
7838 		if (pack_align <= 16) {
7839 			ingpack = X_INGPACKBOUNDARY_16B;
7840 			fl_align = 16;
7841 		} else if (pack_align == 32) {
7842 			ingpack = X_INGPACKBOUNDARY_64B;
7843 			fl_align = 64;
7844 		} else {
7845 			unsigned int pack_align_log = fls(pack_align) - 1;
7846 			ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
7847 			fl_align = pack_align;
7848 		}
7849 
7850 		/* Use the smallest Ingress Padding which isn't smaller than
7851 		 * the Memory Controller Read/Write Size.  We'll take that as
7852 		 * being 8 bytes since we don't know of any system with a
7853 		 * wider Memory Controller Bus Width.
7854 		 */
7855 		if (is_t5(adap->params.chip))
7856 			ingpad = X_INGPADBOUNDARY_32B;
7857 		else
7858 			ingpad = X_T6_INGPADBOUNDARY_8B;
7859 
7860 		t4_set_reg_field(adap, A_SGE_CONTROL,
7861 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7862 				 F_EGRSTATUSPAGESIZE,
7863 				 V_INGPADBOUNDARY(ingpad) |
7864 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
7865 		t4_set_reg_field(adap, A_SGE_CONTROL2,
7866 				 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
7867 				 V_INGPACKBOUNDARY(ingpack));
7868 	}
7869 	/*
7870 	 * Adjust various SGE Free List Host Buffer Sizes.
7871 	 *
7872 	 * This is something of a crock since we're using fixed indices into
7873 	 * the array which are also known by the sge.c code and the T4
7874 	 * Firmware Configuration File.  We need to come up with a much better
7875 	 * approach to managing this array.  For now, the first four entries
7876 	 * are:
7877 	 *
7878 	 *   0: Host Page Size
7879 	 *   1: 64KB
7880 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7881 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7882 	 *
7883 	 * For the single-MTU buffers in unpacked mode we need to include
7884 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7885 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7886 	 * Padding boundary.  All of these are accommodated in the Factory
7887 	 * Default Firmware Configuration File but we need to adjust it for
7888 	 * this host's cache line size.
7889 	 */
7890 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
7891 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
7892 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1)
7893 		     & ~(fl_align-1));
7894 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
7895 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1)
7896 		     & ~(fl_align-1));
7897 
7898 	t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
7899 
7900 	return 0;
7901 }
7902 
7903 /**
7904  *	t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
7905  *	@adap: the adapter
7906  *	@page_size: the host's Base Page Size
7907  *	@cache_line_size: the host's Cache Line Size
7908  *
7909  *	Various registers in T4 contain values which are dependent on the
7910  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7911  *	those registers with the appropriate values as passed in ...
7912  *
7913  *	This routine makes changes which are compatible with T4 chips.
7914  */
t4_fixup_host_params(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size)7915 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7916 			 unsigned int cache_line_size)
7917 {
7918 	return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
7919 					   T4_LAST_REV);
7920 }
7921 
7922 /**
7923  *	t4_fw_initialize - ask FW to initialize the device
7924  *	@adap: the adapter
7925  *	@mbox: mailbox to use for the FW command
7926  *
7927  *	Issues a command to FW to partially initialize the device.  This
7928  *	performs initialization that generally doesn't depend on user input.
7929  */
t4_fw_initialize(struct adapter * adap,unsigned int mbox)7930 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7931 {
7932 	struct fw_initialize_cmd c;
7933 
7934 	memset(&c, 0, sizeof(c));
7935 	INIT_CMD(c, INITIALIZE, WRITE);
7936 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7937 }
7938 
7939 /**
7940  *	t4_query_params_rw - query FW or device parameters
7941  *	@adap: the adapter
7942  *	@mbox: mailbox to use for the FW command
7943  *	@pf: the PF
7944  *	@vf: the VF
7945  *	@nparams: the number of parameters
7946  *	@params: the parameter names
7947  *	@val: the parameter values
7948  *	@rw: Write and read flag
7949  *	@sleep_ok: if true, we may sleep awaiting mbox cmd completion
7950  *
7951  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
7952  *	queried at once.
7953  */
t4_query_params_rw(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val,int rw,bool sleep_ok)7954 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7955 		       unsigned int vf, unsigned int nparams, const u32 *params,
7956 		       u32 *val, int rw, bool sleep_ok)
7957 {
7958 	int i, ret;
7959 	struct fw_params_cmd c;
7960 	__be32 *p = &c.param[0].mnem;
7961 
7962 	if (nparams > 7)
7963 		return -EINVAL;
7964 
7965 	memset(&c, 0, sizeof(c));
7966 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7967 				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
7968 				  V_FW_PARAMS_CMD_PFN(pf) |
7969 				  V_FW_PARAMS_CMD_VFN(vf));
7970 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7971 
7972 	for (i = 0; i < nparams; i++) {
7973 		*p++ = cpu_to_be32(*params++);
7974 		if (rw)
7975 			*p = cpu_to_be32(*(val + i));
7976 		p++;
7977 	}
7978 
7979 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7980 
7981 	/*
7982 	 * We always copy back the reults, even if there's an error.  We'll
7983 	 * get an error if any of the parameters was unknown to the Firmware,
7984 	 * but there will be results for the others ...  (Older Firmware
7985 	 * stopped at the first unknown parameter; newer Firmware processes
7986 	 * them all and flags the unknown parameters with a return value of
7987 	 * ~0UL.)
7988 	 */
7989 	for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7990 		*val++ = be32_to_cpu(*p);
7991 
7992 	return ret;
7993 }
7994 
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)7995 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7996 		    unsigned int vf, unsigned int nparams, const u32 *params,
7997 		    u32 *val)
7998 {
7999 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
8000 				  true);
8001 }
8002 
t4_query_params_ns(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)8003 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
8004 		    unsigned int vf, unsigned int nparams, const u32 *params,
8005 		    u32 *val)
8006 {
8007 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
8008 				  false);
8009 }
8010 
8011 /**
8012  *      t4_set_params_timeout - sets FW or device parameters
8013  *      @adap: the adapter
8014  *      @mbox: mailbox to use for the FW command
8015  *      @pf: the PF
8016  *      @vf: the VF
8017  *      @nparams: the number of parameters
8018  *      @params: the parameter names
8019  *      @val: the parameter values
8020  *      @timeout: the timeout time
8021  *
8022  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
8023  *      specified at once.
8024  */
t4_set_params_timeout(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val,int timeout)8025 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
8026 			  unsigned int pf, unsigned int vf,
8027 			  unsigned int nparams, const u32 *params,
8028 			  const u32 *val, int timeout)
8029 {
8030 	struct fw_params_cmd c;
8031 	__be32 *p = &c.param[0].mnem;
8032 
8033 	if (nparams > 7)
8034 		return -EINVAL;
8035 
8036 	memset(&c, 0, sizeof(c));
8037 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
8038 				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8039 				  V_FW_PARAMS_CMD_PFN(pf) |
8040 				  V_FW_PARAMS_CMD_VFN(vf));
8041 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8042 
8043 	while (nparams--) {
8044 		*p++ = cpu_to_be32(*params++);
8045 		*p++ = cpu_to_be32(*val++);
8046 	}
8047 
8048 	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
8049 }
8050 
8051 /**
8052  *	t4_set_params - sets FW or device parameters
8053  *	@adap: the adapter
8054  *	@mbox: mailbox to use for the FW command
8055  *	@pf: the PF
8056  *	@vf: the VF
8057  *	@nparams: the number of parameters
8058  *	@params: the parameter names
8059  *	@val: the parameter values
8060  *
8061  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
8062  *	specified at once.
8063  */
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)8064 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
8065 		  unsigned int vf, unsigned int nparams, const u32 *params,
8066 		  const u32 *val)
8067 {
8068 	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
8069 				     FW_CMD_MAX_TIMEOUT);
8070 }
8071 
8072 /**
8073  *	t4_cfg_pfvf - configure PF/VF resource limits
8074  *	@adap: the adapter
8075  *	@mbox: mailbox to use for the FW command
8076  *	@pf: the PF being configured
8077  *	@vf: the VF being configured
8078  *	@txq: the max number of egress queues
8079  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
8080  *	@rxqi: the max number of interrupt-capable ingress queues
8081  *	@rxq: the max number of interruptless ingress queues
8082  *	@tc: the PCI traffic class
8083  *	@vi: the max number of virtual interfaces
8084  *	@cmask: the channel access rights mask for the PF/VF
8085  *	@pmask: the port access rights mask for the PF/VF
8086  *	@nexact: the maximum number of exact MPS filters
8087  *	@rcaps: read capabilities
8088  *	@wxcaps: write/execute capabilities
8089  *
8090  *	Configures resource limits and capabilities for a physical or virtual
8091  *	function.
8092  */
t4_cfg_pfvf(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int txq,unsigned int txq_eth_ctrl,unsigned int rxqi,unsigned int rxq,unsigned int tc,unsigned int vi,unsigned int cmask,unsigned int pmask,unsigned int nexact,unsigned int rcaps,unsigned int wxcaps)8093 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
8094 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
8095 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
8096 		unsigned int vi, unsigned int cmask, unsigned int pmask,
8097 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
8098 {
8099 	struct fw_pfvf_cmd c;
8100 
8101 	memset(&c, 0, sizeof(c));
8102 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
8103 				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
8104 				  V_FW_PFVF_CMD_VFN(vf));
8105 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8106 	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
8107 				     V_FW_PFVF_CMD_NIQ(rxq));
8108 	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
8109 				    V_FW_PFVF_CMD_PMASK(pmask) |
8110 				    V_FW_PFVF_CMD_NEQ(txq));
8111 	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
8112 				      V_FW_PFVF_CMD_NVI(vi) |
8113 				      V_FW_PFVF_CMD_NEXACTF(nexact));
8114 	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
8115 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
8116 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
8117 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8118 }
8119 
8120 /**
8121  *	t4_alloc_vi_func - allocate a virtual interface
8122  *	@adap: the adapter
8123  *	@mbox: mailbox to use for the FW command
8124  *	@port: physical port associated with the VI
8125  *	@pf: the PF owning the VI
8126  *	@vf: the VF owning the VI
8127  *	@nmac: number of MAC addresses needed (1 to 5)
8128  *	@mac: the MAC addresses of the VI
8129  *	@rss_size: size of RSS table slice associated with this VI
8130  *	@portfunc: which Port Application Function MAC Address is desired
8131  *	@idstype: Intrusion Detection Type
8132  *
8133  *	Allocates a virtual interface for the given physical port.  If @mac is
8134  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
8135  *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
8136  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
8137  *	stored consecutively so the space needed is @nmac * 6 bytes.
8138  *	Returns a negative error number or the non-negative VI id.
8139  */
t4_alloc_vi_func(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin,unsigned int portfunc,unsigned int idstype)8140 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
8141 		     unsigned int port, unsigned int pf, unsigned int vf,
8142 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
8143 		     u8 *vivld, u8 *vin,
8144 		     unsigned int portfunc, unsigned int idstype)
8145 {
8146 	int ret;
8147 	struct fw_vi_cmd c;
8148 
8149 	memset(&c, 0, sizeof(c));
8150 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
8151 				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
8152 				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
8153 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
8154 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
8155 				     V_FW_VI_CMD_FUNC(portfunc));
8156 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
8157 	c.nmac = nmac - 1;
8158 	if(!rss_size)
8159 		c.norss_rsssize = F_FW_VI_CMD_NORSS;
8160 
8161 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8162 	if (ret)
8163 		return ret;
8164 
8165 	if (mac) {
8166 		memcpy(mac, c.mac, sizeof(c.mac));
8167 		switch (nmac) {
8168 		case 5:
8169 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
8170 			/* FALLTHRU */
8171 		case 4:
8172 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
8173 			/* FALLTHRU */
8174 		case 3:
8175 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
8176 			/* FALLTHRU */
8177 		case 2:
8178 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
8179 		}
8180 	}
8181 	if (rss_size)
8182 		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
8183 
8184 	if (vivld)
8185 		*vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
8186 
8187 	if (vin)
8188 		*vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
8189 
8190 	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
8191 }
8192 
8193 /**
8194  *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
8195  *      @adap: the adapter
8196  *      @mbox: mailbox to use for the FW command
8197  *      @port: physical port associated with the VI
8198  *      @pf: the PF owning the VI
8199  *      @vf: the VF owning the VI
8200  *      @nmac: number of MAC addresses needed (1 to 5)
8201  *      @mac: the MAC addresses of the VI
8202  *      @rss_size: size of RSS table slice associated with this VI
8203  *
8204  *	backwards compatible and convieniance routine to allocate a Virtual
8205  *	Interface with a Ethernet Port Application Function and Intrustion
8206  *	Detection System disabled.
8207  */
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin)8208 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
8209 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
8210 		unsigned int *rss_size, u8 *vivld, u8 *vin)
8211 {
8212 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
8213 				vivld, vin, FW_VI_FUNC_ETH, 0);
8214 }
8215 
8216 
8217 /**
8218  * 	t4_free_vi - free a virtual interface
8219  * 	@adap: the adapter
8220  * 	@mbox: mailbox to use for the FW command
8221  * 	@pf: the PF owning the VI
8222  * 	@vf: the VF owning the VI
8223  * 	@viid: virtual interface identifiler
8224  *
8225  * 	Free a previously allocated virtual interface.
8226  */
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)8227 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8228 	       unsigned int vf, unsigned int viid)
8229 {
8230 	struct fw_vi_cmd c;
8231 
8232 	memset(&c, 0, sizeof(c));
8233 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8234 				  F_FW_CMD_REQUEST |
8235 				  F_FW_CMD_EXEC |
8236 				  V_FW_VI_CMD_PFN(pf) |
8237 				  V_FW_VI_CMD_VFN(vf));
8238 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8239 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8240 
8241 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8242 }
8243 
8244 /**
8245  *	t4_set_rxmode - set Rx properties of a virtual interface
8246  *	@adap: the adapter
8247  *	@mbox: mailbox to use for the FW command
8248  *	@viid: the VI id
8249  *	@mtu: the new MTU or -1
8250  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8251  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8252  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8253  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8254  *	@sleep_ok: if true we may sleep while awaiting command completion
8255  *
8256  *	Sets Rx properties of a virtual interface.
8257  */
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)8258 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8259 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
8260 		  bool sleep_ok)
8261 {
8262 	struct fw_vi_rxmode_cmd c;
8263 
8264 	/* convert to FW values */
8265 	if (mtu < 0)
8266 		mtu = M_FW_VI_RXMODE_CMD_MTU;
8267 	if (promisc < 0)
8268 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8269 	if (all_multi < 0)
8270 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8271 	if (bcast < 0)
8272 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8273 	if (vlanex < 0)
8274 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8275 
8276 	memset(&c, 0, sizeof(c));
8277 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8278 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8279 				   V_FW_VI_RXMODE_CMD_VIID(viid));
8280 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8281 	c.mtu_to_vlanexen =
8282 		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8283 			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8284 			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8285 			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8286 			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8287 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8288 }
8289 
8290 /**
8291  *	t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
8292  *	@adap: the adapter
8293  *	@viid: the VI id
8294  *	@mac: the MAC address
8295  *	@mask: the mask
8296  *	@vni: the VNI id for the tunnel protocol
8297  *	@vni_mask: mask for the VNI id
8298  *	@dip_hit: to enable DIP match for the MPS entry
8299  *	@lookup_type: MAC address for inner (1) or outer (0) header
8300  *	@sleep_ok: call is allowed to sleep
8301  *
8302  *	Allocates an MPS entry with specified MAC address and VNI value.
8303  *
8304  *	Returns a negative error number or the allocated index for this mac.
8305  */
t4_alloc_encap_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int vni,unsigned int vni_mask,u8 dip_hit,u8 lookup_type,bool sleep_ok)8306 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
8307 			    const u8 *addr, const u8 *mask, unsigned int vni,
8308 			    unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
8309 			    bool sleep_ok)
8310 {
8311 	struct fw_vi_mac_cmd c;
8312 	struct fw_vi_mac_vni *p = c.u.exact_vni;
8313 	int ret = 0;
8314 	u32 val;
8315 
8316 	memset(&c, 0, sizeof(c));
8317 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8318 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8319 				   V_FW_VI_MAC_CMD_VIID(viid));
8320 	val = V_FW_CMD_LEN16(1) |
8321 	      V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
8322 	c.freemacs_to_len16 = cpu_to_be32(val);
8323 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8324 				      V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8325 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8326 	memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
8327 
8328 	p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
8329 					    V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
8330 					    V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
8331 	p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
8332 
8333 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8334 	if (ret == 0)
8335 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8336 	return ret;
8337 }
8338 
8339 /**
8340  *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8341  *	@adap: the adapter
8342  *	@viid: the VI id
8343  *	@mac: the MAC address
8344  *	@mask: the mask
8345  *	@idx: index at which to add this entry
8346  *	@port_id: the port index
8347  *	@lookup_type: MAC address for inner (1) or outer (0) header
8348  *	@sleep_ok: call is allowed to sleep
8349  *
8350  *	Adds the mac entry at the specified index using raw mac interface.
8351  *
8352  *	Returns a negative error number or the allocated index for this mac.
8353  */
t4_alloc_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)8354 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8355 			  const u8 *addr, const u8 *mask, unsigned int idx,
8356 			  u8 lookup_type, u8 port_id, bool sleep_ok)
8357 {
8358 	int ret = 0;
8359 	struct fw_vi_mac_cmd c;
8360 	struct fw_vi_mac_raw *p = &c.u.raw;
8361 	u32 val;
8362 
8363 	memset(&c, 0, sizeof(c));
8364 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8365 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8366 				   V_FW_VI_MAC_CMD_VIID(viid));
8367 	val = V_FW_CMD_LEN16(1) |
8368 	      V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8369 	c.freemacs_to_len16 = cpu_to_be32(val);
8370 
8371 	/* Specify that this is an inner mac address */
8372 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8373 
8374 	/* Lookup Type. Outer header: 0, Inner header: 1 */
8375 	p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8376 				   V_DATAPORTNUM(port_id));
8377 	/* Lookup mask and port mask */
8378 	p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8379 				    V_DATAPORTNUM(M_DATAPORTNUM));
8380 
8381 	/* Copy the address and the mask */
8382 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8383 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8384 
8385 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8386 	if (ret == 0) {
8387 		ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8388 		if (ret != idx)
8389 			ret = -ENOMEM;
8390 	}
8391 
8392 	return ret;
8393 }
8394 
8395 /**
8396  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8397  *	@adap: the adapter
8398  *	@mbox: mailbox to use for the FW command
8399  *	@viid: the VI id
8400  *	@free: if true any existing filters for this VI id are first removed
8401  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8402  *	@addr: the MAC address(es)
8403  *	@idx: where to store the index of each allocated filter
8404  *	@hash: pointer to hash address filter bitmap
8405  *	@sleep_ok: call is allowed to sleep
8406  *
8407  *	Allocates an exact-match filter for each of the supplied addresses and
8408  *	sets it to the corresponding address.  If @idx is not %NULL it should
8409  *	have at least @naddr entries, each of which will be set to the index of
8410  *	the filter allocated for the corresponding MAC address.  If a filter
8411  *	could not be allocated for an address its index is set to 0xffff.
8412  *	If @hash is not %NULL addresses that fail to allocate an exact filter
8413  *	are hashed and update the hash filter bitmap pointed at by @hash.
8414  *
8415  *	Returns a negative error number or the number of filters allocated.
8416  */
t4_alloc_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)8417 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8418 		      unsigned int viid, bool free, unsigned int naddr,
8419 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8420 {
8421 	int offset, ret = 0;
8422 	struct fw_vi_mac_cmd c;
8423 	unsigned int nfilters = 0;
8424 	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8425 	unsigned int rem = naddr;
8426 
8427 	if (naddr > max_naddr)
8428 		return -EINVAL;
8429 
8430 	for (offset = 0; offset < naddr ; /**/) {
8431 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8432 					 ? rem
8433 					 : ARRAY_SIZE(c.u.exact));
8434 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8435 						     u.exact[fw_naddr]), 16);
8436 		struct fw_vi_mac_exact *p;
8437 		int i;
8438 
8439 		memset(&c, 0, sizeof(c));
8440 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8441 					   F_FW_CMD_REQUEST |
8442 					   F_FW_CMD_WRITE |
8443 					   V_FW_CMD_EXEC(free) |
8444 					   V_FW_VI_MAC_CMD_VIID(viid));
8445 		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8446 						  V_FW_CMD_LEN16(len16));
8447 
8448 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8449 			p->valid_to_idx =
8450 				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8451 					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8452 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8453 		}
8454 
8455 		/*
8456 		 * It's okay if we run out of space in our MAC address arena.
8457 		 * Some of the addresses we submit may get stored so we need
8458 		 * to run through the reply to see what the results were ...
8459 		 */
8460 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8461 		if (ret && ret != -FW_ENOMEM)
8462 			break;
8463 
8464 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8465 			u16 index = G_FW_VI_MAC_CMD_IDX(
8466 						be16_to_cpu(p->valid_to_idx));
8467 
8468 			if (idx)
8469 				idx[offset+i] = (index >=  max_naddr
8470 						 ? 0xffff
8471 						 : index);
8472 			if (index < max_naddr)
8473 				nfilters++;
8474 			else if (hash)
8475 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8476 		}
8477 
8478 		free = false;
8479 		offset += fw_naddr;
8480 		rem -= fw_naddr;
8481 	}
8482 
8483 	if (ret == 0 || ret == -FW_ENOMEM)
8484 		ret = nfilters;
8485 	return ret;
8486 }
8487 
8488 /**
8489  *	t4_free_encap_mac_filt - frees MPS entry at given index
8490  *	@adap: the adapter
8491  *	@viid: the VI id
8492  *	@idx: index of MPS entry to be freed
8493  *	@sleep_ok: call is allowed to sleep
8494  *
8495  *	Frees the MPS entry at supplied index
8496  *
8497  *	Returns a negative error number or zero on success
8498  */
t4_free_encap_mac_filt(struct adapter * adap,unsigned int viid,int idx,bool sleep_ok)8499 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
8500 			   int idx, bool sleep_ok)
8501 {
8502 	struct fw_vi_mac_exact *p;
8503 	struct fw_vi_mac_cmd c;
8504 	u8 addr[] = {0,0,0,0,0,0};
8505 	int ret = 0;
8506 	u32 exact;
8507 
8508 	memset(&c, 0, sizeof(c));
8509 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8510 				   F_FW_CMD_REQUEST |
8511 				   F_FW_CMD_WRITE |
8512 				   V_FW_CMD_EXEC(0) |
8513 				   V_FW_VI_MAC_CMD_VIID(viid));
8514 	exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
8515 	c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8516 					  exact |
8517 					  V_FW_CMD_LEN16(1));
8518 	p = c.u.exact;
8519 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8520 				      V_FW_VI_MAC_CMD_IDX(idx));
8521 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8522 
8523 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8524 	return ret;
8525 }
8526 
8527 /**
8528  *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
8529  *	@adap: the adapter
8530  *	@viid: the VI id
8531  *	@addr: the MAC address
8532  *	@mask: the mask
8533  *	@idx: index of the entry in mps tcam
8534  *	@lookup_type: MAC address for inner (1) or outer (0) header
8535  *	@port_id: the port index
8536  *	@sleep_ok: call is allowed to sleep
8537  *
8538  *	Removes the mac entry at the specified index using raw mac interface.
8539  *
8540  *	Returns a negative error number on failure.
8541  */
t4_free_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)8542 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
8543 			 const u8 *addr, const u8 *mask, unsigned int idx,
8544 			 u8 lookup_type, u8 port_id, bool sleep_ok)
8545 {
8546 	struct fw_vi_mac_cmd c;
8547 	struct fw_vi_mac_raw *p = &c.u.raw;
8548 	u32 raw;
8549 
8550 	memset(&c, 0, sizeof(c));
8551 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8552 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8553 				   V_FW_CMD_EXEC(0) |
8554 				   V_FW_VI_MAC_CMD_VIID(viid));
8555 	raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8556 	c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8557 					  raw |
8558 					  V_FW_CMD_LEN16(1));
8559 
8560 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
8561 				     FW_VI_MAC_ID_BASED_FREE);
8562 
8563 	/* Lookup Type. Outer header: 0, Inner header: 1 */
8564 	p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8565 				   V_DATAPORTNUM(port_id));
8566 	/* Lookup mask and port mask */
8567 	p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8568 				    V_DATAPORTNUM(M_DATAPORTNUM));
8569 
8570 	/* Copy the address and the mask */
8571 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8572 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8573 
8574 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8575 }
8576 
8577 /**
8578  *	t4_free_mac_filt - frees exact-match filters of given MAC addresses
8579  *	@adap: the adapter
8580  *	@mbox: mailbox to use for the FW command
8581  *	@viid: the VI id
8582  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8583  *	@addr: the MAC address(es)
8584  *	@sleep_ok: call is allowed to sleep
8585  *
8586  *	Frees the exact-match filter for each of the supplied addresses
8587  *
8588  *	Returns a negative error number or the number of filters freed.
8589  */
t4_free_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)8590 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8591 		      unsigned int viid, unsigned int naddr,
8592 		      const u8 **addr, bool sleep_ok)
8593 {
8594 	int offset, ret = 0;
8595 	struct fw_vi_mac_cmd c;
8596 	unsigned int nfilters = 0;
8597 	unsigned int max_naddr = is_t4(adap->params.chip) ?
8598 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
8599 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8600 	unsigned int rem = naddr;
8601 
8602 	if (naddr > max_naddr)
8603 		return -EINVAL;
8604 
8605 	for (offset = 0; offset < (int)naddr ; /**/) {
8606 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8607 					 ? rem
8608 					 : ARRAY_SIZE(c.u.exact));
8609 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8610 						     u.exact[fw_naddr]), 16);
8611 		struct fw_vi_mac_exact *p;
8612 		int i;
8613 
8614 		memset(&c, 0, sizeof(c));
8615 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8616 				     F_FW_CMD_REQUEST |
8617 				     F_FW_CMD_WRITE |
8618 				     V_FW_CMD_EXEC(0) |
8619 				     V_FW_VI_MAC_CMD_VIID(viid));
8620 		c.freemacs_to_len16 =
8621 				cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8622 					    V_FW_CMD_LEN16(len16));
8623 
8624 		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8625 			p->valid_to_idx = cpu_to_be16(
8626 				F_FW_VI_MAC_CMD_VALID |
8627 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8628 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8629 		}
8630 
8631 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8632 		if (ret)
8633 			break;
8634 
8635 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8636 			u16 index = G_FW_VI_MAC_CMD_IDX(
8637 						be16_to_cpu(p->valid_to_idx));
8638 
8639 			if (index < max_naddr)
8640 				nfilters++;
8641 		}
8642 
8643 		offset += fw_naddr;
8644 		rem -= fw_naddr;
8645 	}
8646 
8647 	if (ret == 0)
8648 		ret = nfilters;
8649 	return ret;
8650 }
8651 
8652 /**
8653  *	t4_change_mac - modifies the exact-match filter for a MAC address
8654  *	@adap: the adapter
8655  *	@mbox: mailbox to use for the FW command
8656  *	@viid: the VI id
8657  *	@idx: index of existing filter for old value of MAC address, or -1
8658  *	@addr: the new MAC address value
8659  *	@persist: whether a new MAC allocation should be persistent
8660  *	@add_smt: if true also add the address to the HW SMT
8661  *
8662  *	Modifies an exact-match filter and sets it to the new MAC address if
8663  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
8664  *	latter case the address is added persistently if @persist is %true.
8665  *
8666  *	Note that in general it is not possible to modify the value of a given
8667  *	filter so the generic way to modify an address filter is to free the one
8668  *	being used by the old address value and allocate a new filter for the
8669  *	new address value.
8670  *
8671  *	Returns a negative error number or the index of the filter with the new
8672  *	MAC value.  Note that this index may differ from @idx.
8673  */
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx)8674 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8675 		  int idx, const u8 *addr, bool persist, u8 *smt_idx)
8676 {
8677 	/* This will add this mac address to the destination TCAM region */
8678 	return t4_add_mac(adap, mbox, viid, idx, addr, persist, smt_idx, 0);
8679 }
8680 
8681 /**
8682  *	t4_set_addr_hash - program the MAC inexact-match hash filter
8683  *	@adap: the adapter
8684  *	@mbox: mailbox to use for the FW command
8685  *	@viid: the VI id
8686  *	@ucast: whether the hash filter should also match unicast addresses
8687  *	@vec: the value to be written to the hash filter
8688  *	@sleep_ok: call is allowed to sleep
8689  *
8690  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
8691  */
t4_set_addr_hash(struct adapter * adap,unsigned int mbox,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)8692 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8693 		     bool ucast, u64 vec, bool sleep_ok)
8694 {
8695 	struct fw_vi_mac_cmd c;
8696 	u32 val;
8697 
8698 	memset(&c, 0, sizeof(c));
8699 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8700 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8701 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8702 	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8703 	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8704 	c.freemacs_to_len16 = cpu_to_be32(val);
8705 	c.u.hash.hashvec = cpu_to_be64(vec);
8706 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8707 }
8708 
8709 /**
8710  *      t4_enable_vi_params - enable/disable a virtual interface
8711  *      @adap: the adapter
8712  *      @mbox: mailbox to use for the FW command
8713  *      @viid: the VI id
8714  *      @rx_en: 1=enable Rx, 0=disable Rx
8715  *      @tx_en: 1=enable Tx, 0=disable Tx
8716  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8717  *
8718  *      Enables/disables a virtual interface.  Note that setting DCB Enable
8719  *      only makes sense when enabling a Virtual Interface ...
8720  */
t4_enable_vi_params(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en,bool dcb_en)8721 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8722 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8723 {
8724 	struct fw_vi_enable_cmd c;
8725 
8726 	memset(&c, 0, sizeof(c));
8727 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8728 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8729 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8730 	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8731 				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8732 				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8733 				     FW_LEN16(c));
8734 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8735 }
8736 
8737 /**
8738  *	t4_enable_vi - enable/disable a virtual interface
8739  *	@adap: the adapter
8740  *	@mbox: mailbox to use for the FW command
8741  *	@viid: the VI id
8742  *	@rx_en: 1=enable Rx, 0=disable Rx
8743  *	@tx_en: 1=enable Tx, 0=disable Tx
8744  *
8745  *	Enables/disables a virtual interface.  Note that setting DCB Enable
8746  *	only makes sense when enabling a Virtual Interface ...
8747  */
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)8748 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8749 		 bool rx_en, bool tx_en)
8750 {
8751 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8752 }
8753 
8754 /**
8755  *	t4_enable_pi_params - enable/disable a Port's Virtual Interface
8756  *      @adap: the adapter
8757  *      @mbox: mailbox to use for the FW command
8758  *      @pi: the Port Information structure
8759  *      @rx_en: 1=enable Rx, 0=disable Rx
8760  *      @tx_en: 1=enable Tx, 0=disable Tx
8761  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8762  *
8763  *      Enables/disables a Port's Virtual Interface.  Note that setting DCB
8764  *	Enable only makes sense when enabling a Virtual Interface ...
8765  *	If the Virtual Interface enable/disable operation is successful,
8766  *	we notify the OS-specific code of a potential Link Status change
8767  *	via the OS Contract API t4_os_link_changed().
8768  */
t4_enable_pi_params(struct adapter * adap,unsigned int mbox,struct port_info * pi,bool rx_en,bool tx_en,bool dcb_en)8769 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8770 			struct port_info *pi,
8771 			bool rx_en, bool tx_en, bool dcb_en)
8772 {
8773 	int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8774 				      rx_en, tx_en, dcb_en);
8775 	if (ret)
8776 		return ret;
8777 	t4_os_link_changed(adap, pi->port_id,
8778 			   rx_en && tx_en && pi->link_cfg.link_ok);
8779 	return 0;
8780 }
8781 
8782 /**
8783  *	t4_identify_port - identify a VI's port by blinking its LED
8784  *	@adap: the adapter
8785  *	@mbox: mailbox to use for the FW command
8786  *	@viid: the VI id
8787  *	@nblinks: how many times to blink LED at 2.5 Hz
8788  *
8789  *	Identifies a VI's port by blinking its LED.
8790  */
t4_identify_port(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int nblinks)8791 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8792 		     unsigned int nblinks)
8793 {
8794 	struct fw_vi_enable_cmd c;
8795 
8796 	memset(&c, 0, sizeof(c));
8797 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8798 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8799 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8800 	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8801 	c.blinkdur = cpu_to_be16(nblinks);
8802 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8803 }
8804 
8805 /**
8806  *	t4_iq_stop - stop an ingress queue and its FLs
8807  *	@adap: the adapter
8808  *	@mbox: mailbox to use for the FW command
8809  *	@pf: the PF owning the queues
8810  *	@vf: the VF owning the queues
8811  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8812  *	@iqid: ingress queue id
8813  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8814  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8815  *
8816  *	Stops an ingress queue and its associated FLs, if any.  This causes
8817  *	any current or future data/messages destined for these queues to be
8818  *	tossed.
8819  */
t4_iq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8820 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8821 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8822 	       unsigned int fl0id, unsigned int fl1id)
8823 {
8824 	struct fw_iq_cmd c;
8825 
8826 	memset(&c, 0, sizeof(c));
8827 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8828 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8829 				  V_FW_IQ_CMD_VFN(vf));
8830 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8831 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8832 	c.iqid = cpu_to_be16(iqid);
8833 	c.fl0id = cpu_to_be16(fl0id);
8834 	c.fl1id = cpu_to_be16(fl1id);
8835 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8836 }
8837 
8838 /**
8839  *	t4_iq_free - free an ingress queue and its FLs
8840  *	@adap: the adapter
8841  *	@mbox: mailbox to use for the FW command
8842  *	@pf: the PF owning the queues
8843  *	@vf: the VF owning the queues
8844  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8845  *	@iqid: ingress queue id
8846  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8847  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8848  *
8849  *	Frees an ingress queue and its associated FLs, if any.
8850  */
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8851 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8852 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8853 	       unsigned int fl0id, unsigned int fl1id)
8854 {
8855 	struct fw_iq_cmd c;
8856 
8857 	memset(&c, 0, sizeof(c));
8858 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8859 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8860 				  V_FW_IQ_CMD_VFN(vf));
8861 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8862 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8863 	c.iqid = cpu_to_be16(iqid);
8864 	c.fl0id = cpu_to_be16(fl0id);
8865 	c.fl1id = cpu_to_be16(fl1id);
8866 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8867 }
8868 
8869 /**
8870  *	t4_eth_eq_free - free an Ethernet egress queue
8871  *	@adap: the adapter
8872  *	@mbox: mailbox to use for the FW command
8873  *	@pf: the PF owning the queue
8874  *	@vf: the VF owning the queue
8875  *	@eqid: egress queue id
8876  *
8877  *	Frees an Ethernet egress queue.
8878  */
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8879 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8880 		   unsigned int vf, unsigned int eqid)
8881 {
8882 	struct fw_eq_eth_cmd c;
8883 
8884 	memset(&c, 0, sizeof(c));
8885 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8886 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8887 				  V_FW_EQ_ETH_CMD_PFN(pf) |
8888 				  V_FW_EQ_ETH_CMD_VFN(vf));
8889 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8890 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8891 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8892 }
8893 
8894 /**
8895  *	t4_ctrl_eq_free - free a control egress queue
8896  *	@adap: the adapter
8897  *	@mbox: mailbox to use for the FW command
8898  *	@pf: the PF owning the queue
8899  *	@vf: the VF owning the queue
8900  *	@eqid: egress queue id
8901  *
8902  *	Frees a control egress queue.
8903  */
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8904 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8905 		    unsigned int vf, unsigned int eqid)
8906 {
8907 	struct fw_eq_ctrl_cmd c;
8908 
8909 	memset(&c, 0, sizeof(c));
8910 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8911 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8912 				  V_FW_EQ_CTRL_CMD_PFN(pf) |
8913 				  V_FW_EQ_CTRL_CMD_VFN(vf));
8914 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8915 	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8916 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8917 }
8918 
8919 /**
8920  *	t4_ofld_eq_free - free an offload egress queue
8921  *	@adap: the adapter
8922  *	@mbox: mailbox to use for the FW command
8923  *	@pf: the PF owning the queue
8924  *	@vf: the VF owning the queue
8925  *	@eqid: egress queue id
8926  *
8927  *	Frees a control egress queue.
8928  */
t4_ofld_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8929 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8930 		    unsigned int vf, unsigned int eqid)
8931 {
8932 	struct fw_eq_ofld_cmd c;
8933 
8934 	memset(&c, 0, sizeof(c));
8935 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8936 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8937 				  V_FW_EQ_OFLD_CMD_PFN(pf) |
8938 				  V_FW_EQ_OFLD_CMD_VFN(vf));
8939 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8940 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8941 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8942 }
8943 
8944 /**
8945  * Return the highest speed set in the port capabilities, in Mb/s.
8946  */
t4_link_fwcap_to_speed(fw_port_cap32_t caps)8947 unsigned int t4_link_fwcap_to_speed(fw_port_cap32_t caps)
8948 {
8949 	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
8950 		do { \
8951 			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8952 				return __speed; \
8953 		} while (0)
8954 
8955 	TEST_SPEED_RETURN(400G, 400000);
8956 	TEST_SPEED_RETURN(200G, 200000);
8957 	TEST_SPEED_RETURN(100G, 100000);
8958 	TEST_SPEED_RETURN(50G,   50000);
8959 	TEST_SPEED_RETURN(40G,   40000);
8960 	TEST_SPEED_RETURN(25G,   25000);
8961 	TEST_SPEED_RETURN(10G,   10000);
8962 	TEST_SPEED_RETURN(1G,     1000);
8963 	TEST_SPEED_RETURN(100M,    100);
8964 
8965 	#undef TEST_SPEED_RETURN
8966 
8967 	return 0;
8968 }
8969 
8970 /**
8971  *	t4_link_fwcap_to_fwspeed - return highest speed in Port Capabilities
8972  *	@acaps: advertised Port Capabilities
8973  *
8974  *	Get the highest speed for the port from the advertised Port
8975  *	Capabilities.  It will be either the highest speed from the list of
8976  *	speeds or whatever user has set using ethtool.
8977  */
t4_link_fwcap_to_fwspeed(fw_port_cap32_t acaps)8978 fw_port_cap32_t t4_link_fwcap_to_fwspeed(fw_port_cap32_t acaps)
8979 {
8980 	#define TEST_SPEED_RETURN(__caps_speed) \
8981 		do { \
8982 			if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8983 				return FW_PORT_CAP32_SPEED_##__caps_speed; \
8984 		} while (0)
8985 
8986 	TEST_SPEED_RETURN(400G);
8987 	TEST_SPEED_RETURN(200G);
8988 	TEST_SPEED_RETURN(100G);
8989 	TEST_SPEED_RETURN(50G);
8990 	TEST_SPEED_RETURN(40G);
8991 	TEST_SPEED_RETURN(25G);
8992 	TEST_SPEED_RETURN(10G);
8993 	TEST_SPEED_RETURN(1G);
8994 	TEST_SPEED_RETURN(100M);
8995 
8996 	#undef TEST_SPEED_RETURN
8997 
8998 	return 0;
8999 }
9000 
9001 /**
9002  *	fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
9003  *	@caps16: a 16-bit Port Capabilities value
9004  *
9005  *	Returns the equivalent 32-bit Port Capabilities value.
9006  */
fwcaps16_to_caps32(fw_port_cap16_t caps16)9007 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
9008 {
9009 	fw_port_cap32_t caps32 = 0;
9010 
9011 	#define CAP16_TO_CAP32(__cap) \
9012 		do { \
9013 			if (caps16 & FW_PORT_CAP_##__cap) \
9014 				caps32 |= FW_PORT_CAP32_##__cap; \
9015 		} while (0)
9016 
9017 	CAP16_TO_CAP32(SPEED_100M);
9018 	CAP16_TO_CAP32(SPEED_1G);
9019 	CAP16_TO_CAP32(SPEED_25G);
9020 	CAP16_TO_CAP32(SPEED_10G);
9021 	CAP16_TO_CAP32(SPEED_40G);
9022 	CAP16_TO_CAP32(SPEED_100G);
9023 	CAP16_TO_CAP32(FC_RX);
9024 	CAP16_TO_CAP32(FC_TX);
9025 	CAP16_TO_CAP32(ANEG);
9026 	CAP16_TO_CAP32(FORCE_PAUSE);
9027 	CAP16_TO_CAP32(MDIAUTO);
9028 	CAP16_TO_CAP32(MDISTRAIGHT);
9029 	CAP16_TO_CAP32(FEC_RS);
9030 	CAP16_TO_CAP32(FEC_BASER_RS);
9031 	CAP16_TO_CAP32(802_3_PAUSE);
9032 	CAP16_TO_CAP32(802_3_ASM_DIR);
9033 
9034 	#undef CAP16_TO_CAP32
9035 
9036 	return caps32;
9037 }
9038 
9039 /**
9040  *	fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
9041  *	@caps32: a 32-bit Port Capabilities value
9042  *
9043  *	Returns the equivalent 16-bit Port Capabilities value.  Note that
9044  *	not all 32-bit Port Capabilities can be represented in the 16-bit
9045  *	Port Capabilities and some fields/values may not make it.
9046  */
fwcaps32_to_caps16(fw_port_cap32_t caps32)9047 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
9048 {
9049 	fw_port_cap16_t caps16 = 0;
9050 
9051 	#define CAP32_TO_CAP16(__cap) \
9052 		do { \
9053 			if (caps32 & FW_PORT_CAP32_##__cap) \
9054 				caps16 |= FW_PORT_CAP_##__cap; \
9055 		} while (0)
9056 
9057 	CAP32_TO_CAP16(SPEED_100M);
9058 	CAP32_TO_CAP16(SPEED_1G);
9059 	CAP32_TO_CAP16(SPEED_10G);
9060 	CAP32_TO_CAP16(SPEED_25G);
9061 	CAP32_TO_CAP16(SPEED_40G);
9062 	CAP32_TO_CAP16(SPEED_100G);
9063 	CAP32_TO_CAP16(FC_RX);
9064 	CAP32_TO_CAP16(FC_TX);
9065 	CAP32_TO_CAP16(802_3_PAUSE);
9066 	CAP32_TO_CAP16(802_3_ASM_DIR);
9067 	CAP32_TO_CAP16(ANEG);
9068 	CAP32_TO_CAP16(FORCE_PAUSE);
9069 	CAP32_TO_CAP16(MDIAUTO);
9070 	CAP32_TO_CAP16(MDISTRAIGHT);
9071 	CAP32_TO_CAP16(FEC_RS);
9072 	CAP32_TO_CAP16(FEC_BASER_RS);
9073 
9074 	#undef CAP32_TO_CAP16
9075 
9076 	return caps16;
9077 }
9078 
t4_link_set_autoneg(struct port_info * pi,u8 autoneg,fw_port_cap32_t * new_caps)9079 int t4_link_set_autoneg(struct port_info *pi, u8 autoneg,
9080 			fw_port_cap32_t *new_caps)
9081 {
9082 	struct link_config *lc = &pi->link_cfg;
9083 	fw_port_cap32_t caps = *new_caps;
9084 
9085 	if (autoneg) {
9086 		if (!(lc->pcaps & FW_PORT_CAP32_ANEG))
9087 			return -ENOTSUP;
9088 
9089 		caps |= FW_PORT_CAP32_ANEG;
9090 	} else {
9091 		caps &= ~FW_PORT_CAP32_ANEG;
9092 	}
9093 
9094 	caps &= ~V_FW_PORT_CAP32_MDI(M_FW_PORT_CAP32_MDI);
9095 	if (lc->pcaps & FW_PORT_CAP32_MDIAUTO)
9096 		caps |= FW_PORT_CAP32_MDIAUTO;
9097 
9098 	*new_caps = caps;
9099 	return 0;
9100 }
9101 
t4_link_set_pause(struct port_info * pi,cc_pause_t pause,fw_port_cap32_t * new_caps)9102 int t4_link_set_pause(struct port_info *pi, cc_pause_t pause,
9103 		      fw_port_cap32_t *new_caps)
9104 {
9105 	struct link_config *lc = &pi->link_cfg;
9106 	fw_port_cap32_t caps = *new_caps;
9107 
9108 	caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
9109 	caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
9110 
9111 	if ((pause & PAUSE_TX) && (pause & PAUSE_RX)) {
9112 		caps |= FW_PORT_CAP32_FC_TX | FW_PORT_CAP32_FC_RX;
9113 		if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
9114 			caps |= FW_PORT_CAP32_802_3_PAUSE;
9115 	} else if (pause & PAUSE_TX) {
9116 		caps |= FW_PORT_CAP32_FC_TX;
9117 		if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
9118 			caps |= FW_PORT_CAP32_802_3_ASM_DIR;
9119 	} else if (pause & PAUSE_RX) {
9120 		caps |= FW_PORT_CAP32_FC_RX;
9121 		if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
9122 			caps |= FW_PORT_CAP32_802_3_PAUSE;
9123 		if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
9124 			caps |= FW_PORT_CAP32_802_3_ASM_DIR;
9125 	}
9126 
9127 	if (!(pause & PAUSE_AUTONEG))
9128 		caps |= FW_PORT_CAP32_FORCE_PAUSE;
9129 
9130 	*new_caps = caps;
9131 	return 0;
9132 }
9133 
9134 #define T4_LINK_FEC_MASK V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)
9135 
t4_link_supported_speed_to_fec(u32 speed)9136 static fw_port_cap32_t t4_link_supported_speed_to_fec(u32 speed)
9137 {
9138 	fw_port_cap32_t caps = 0;
9139 
9140 	switch (speed) {
9141 	case 100000:
9142 		caps |= FW_PORT_CAP32_FEC_RS;
9143 		break;
9144 	case 50000:
9145 		caps |= FW_PORT_CAP32_FEC_BASER_RS;
9146 		break;
9147 	case 25000:
9148 		caps |= FW_PORT_CAP32_FEC_RS |
9149 			FW_PORT_CAP32_FEC_BASER_RS;
9150 		break;
9151 	default:
9152 		break;
9153 	}
9154 
9155 	caps |= FW_PORT_CAP32_FEC_NO_FEC;
9156 	return caps;
9157 }
9158 
t4_link_update_fec(struct port_info * pi,u32 max_speed,cc_fec_t fec,fw_port_cap32_t * new_caps)9159 static void t4_link_update_fec(struct port_info *pi, u32 max_speed,
9160 			       cc_fec_t fec, fw_port_cap32_t *new_caps)
9161 {
9162 	fw_port_cap32_t caps = *new_caps;
9163 
9164 	caps &= ~T4_LINK_FEC_MASK;
9165 	if (fec & FEC_RS) {
9166 		switch (max_speed) {
9167 		case 100000:
9168 		case 25000:
9169 			caps |= FW_PORT_CAP32_FEC_RS;
9170 			break;
9171 		default:
9172 			CH_ERR(pi->adapter,
9173 			       "Ignoring unsupported RS FEC for speed %u\n",
9174 			       max_speed);
9175 			break;
9176 		}
9177 	}
9178 
9179 	if (fec & FEC_BASER_RS) {
9180 		switch (max_speed) {
9181 		case 50000:
9182 		case 25000:
9183 			caps |= FW_PORT_CAP32_FEC_BASER_RS;
9184 			break;
9185 		default:
9186 			CH_ERR(pi->adapter,
9187 			       "Ignoring unsupported BASER FEC for speed %u\n",
9188 			       max_speed);
9189 			break;
9190 		}
9191 	}
9192 
9193 	if (fec & FEC_NONE)
9194 		caps |= FW_PORT_CAP32_FEC_NO_FEC;
9195 
9196 	if (!(caps & T4_LINK_FEC_MASK)) {
9197 		/* No explicit encoding is requested.
9198 		 * So, default back to AUTO.
9199 		 */
9200 		caps |= t4_link_supported_speed_to_fec(max_speed);
9201 		caps &= ~FW_PORT_CAP32_FORCE_FEC;
9202 	}
9203 
9204 	if (fec & FEC_FORCE)
9205 		caps |= FW_PORT_CAP32_FORCE_FEC;
9206 
9207 	*new_caps = caps;
9208 }
9209 
t4_link_set_fec(struct port_info * pi,cc_fec_t fec,fw_port_cap32_t * new_caps)9210 int t4_link_set_fec(struct port_info *pi, cc_fec_t fec,
9211 		    fw_port_cap32_t *new_caps)
9212 {
9213 	struct link_config *lc = &pi->link_cfg;
9214 	u32 max_speed;
9215 
9216 	if (!(lc->pcaps & T4_LINK_FEC_MASK))
9217 		return -ENOTSUP;
9218 
9219 	max_speed = t4_link_fwcap_to_speed(lc->link_caps);
9220 	/* Link might be down. In that case consider the max
9221 	 * speed advertised
9222 	 */
9223 	if (!max_speed)
9224 		max_speed = t4_link_fwcap_to_speed(lc->acaps);
9225 
9226 	t4_link_update_fec(pi, max_speed, fec, new_caps);
9227 	return 0;
9228 }
9229 
9230 #define T4_LINK_SPEED_MASK V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED)
9231 
t4_link_set_speed(struct port_info * pi,fw_port_cap32_t speed,u8 en,fw_port_cap32_t * new_caps)9232 int t4_link_set_speed(struct port_info *pi, fw_port_cap32_t speed, u8 en,
9233 		      fw_port_cap32_t *new_caps)
9234 {
9235 	fw_port_cap32_t tcaps, caps = *new_caps;
9236 	struct link_config *lc = &pi->link_cfg;
9237 
9238 	if (((lc->pcaps & T4_LINK_SPEED_MASK) & speed) != speed)
9239 		return -ENOTSUP;
9240 
9241 	if (en)
9242 		caps |= speed;
9243 	else
9244 		caps &= ~speed;
9245 
9246 	/* If no speeds are left, then pick the next highest speed. */
9247 	if (!(caps & T4_LINK_SPEED_MASK)) {
9248 		tcaps = CAP32_SPEED(lc->pcaps);
9249 		tcaps &= ~speed;
9250 		tcaps &= (speed - 1);
9251 		if (tcaps == 0)
9252 			return -EINVAL;
9253 
9254 		caps |= t4_link_fwcap_to_fwspeed(tcaps);
9255 	}
9256 
9257 	*new_caps = caps;
9258 	return 0;
9259 }
9260 
t4_link_sanitize_speed_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9261 static void t4_link_sanitize_speed_caps(struct link_config *lc,
9262 					fw_port_cap32_t *new_caps)
9263 {
9264 	fw_port_cap32_t tcaps, caps = *new_caps;
9265 
9266 	/* Sanitize Speeds when AN is disabled */
9267 	if (!(caps & FW_PORT_CAP32_ANEG)) {
9268 		tcaps = CAP32_SPEED(caps);
9269 		caps &= ~T4_LINK_SPEED_MASK;
9270 		caps |= t4_link_fwcap_to_fwspeed(tcaps);
9271 	}
9272 
9273 	*new_caps = caps;
9274 }
9275 
t4_link_sanitize_fec_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9276 static void t4_link_sanitize_fec_caps(struct link_config *lc,
9277 				      fw_port_cap32_t *new_caps)
9278 {
9279 	fw_port_cap32_t tcaps, caps = *new_caps;
9280 	u32 max_speed;
9281 
9282 	/* Sanitize FECs when supported */
9283 	if (CAP32_FEC(lc->pcaps)) {
9284 		max_speed = t4_link_fwcap_to_speed(caps);
9285 		tcaps = t4_link_supported_speed_to_fec(max_speed);
9286 		if (caps & FW_PORT_CAP32_FORCE_FEC) {
9287 			/* If the current chosen FEC params are
9288 			 * completely invalid, then disable FEC.
9289 			 * Else, pick only the FECs requested
9290 			 * by user or the defaults supported by
9291 			 * the speed.
9292 			 */
9293 			if (!(tcaps & CAP32_FEC(caps)))
9294 				tcaps = FW_PORT_CAP32_FEC_NO_FEC;
9295 			else
9296 				tcaps &= CAP32_FEC(caps);
9297 		}
9298 	} else {
9299 		/* Always force NO_FEC when FECs are not supported */
9300 		tcaps = FW_PORT_CAP32_FEC_NO_FEC;
9301 	}
9302 
9303 	if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) {
9304 		tcaps |= FW_PORT_CAP32_FORCE_FEC;
9305 	} else {
9306 		/* Older firmware doesn't allow driver to send request
9307 		 * to try multiple FECs for FEC_AUTO case. So, clear
9308 		 * the FEC caps for FEC_AUTO case because the older
9309 		 * firmware will try all supported FECs on its own.
9310 		 */
9311 		caps &= ~FW_PORT_CAP32_FORCE_FEC;
9312 		if (tcaps & (tcaps - 1))
9313 			tcaps = 0;
9314 	}
9315 
9316 	caps &= ~T4_LINK_FEC_MASK;
9317 	caps |= tcaps;
9318 
9319 	*new_caps = caps;
9320 }
9321 
t4_link_sanitize_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9322 static void t4_link_sanitize_caps(struct link_config *lc,
9323 				  fw_port_cap32_t *new_caps)
9324 {
9325 	fw_port_cap32_t caps = *new_caps;
9326 
9327 	t4_link_sanitize_speed_caps(lc, &caps);
9328 	t4_link_sanitize_fec_caps(lc, &caps);
9329 
9330 	/* Remove all unsupported caps */
9331 	if ((lc->pcaps | caps) != lc->pcaps)
9332 		caps &= lc->pcaps;
9333 
9334 	*new_caps = caps;
9335 }
9336 
9337 /**
9338  *	t4_link_l1cfg_core - apply link configuration to MAC/PHY
9339  *	@adapter: the adapter
9340  *	@mbox: the Firmware Mailbox to use
9341  *	@port: the Port ID
9342  *	@lc: the Port's Link Configuration
9343  *	@rcap: new link configuration
9344  *	@sleep_ok: if true we may sleep while awaiting command completion
9345  *	@timeout: time to wait for command to finish before timing out
9346  *		(negative implies @sleep_ok=false)
9347  *
9348  *	Set up a port's MAC and PHY according to a desired link configuration.
9349  *	- If the PHY can auto-negotiate first decide what to advertise, then
9350  *	  enable/disable auto-negotiation as desired, and reset.
9351  *	- If the PHY does not auto-negotiate just reset it.
9352  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
9353  *	  otherwise do it later based on the outcome of auto-negotiation.
9354  */
t4_link_l1cfg_core(struct adapter * adapter,unsigned int mbox,unsigned int port,struct link_config * lc,fw_port_cap32_t rcap,bool sleep_ok,int timeout)9355 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
9356 		       unsigned int port, struct link_config *lc,
9357 		       fw_port_cap32_t rcap, bool sleep_ok, int timeout)
9358 {
9359 	unsigned int fw_caps = adapter->params.fw_caps_support;
9360 	struct fw_port_cmd cmd;
9361 	int ret;
9362 
9363 	t4_link_sanitize_caps(lc, &rcap);
9364 
9365 	memset(&cmd, 0, sizeof(cmd));
9366 	cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9367 				       F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9368 				       V_FW_PORT_CMD_PORTID(port));
9369 	cmd.action_to_len16 =
9370 		cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9371 						 ? FW_PORT_ACTION_L1_CFG
9372 						 : FW_PORT_ACTION_L1_CFG32) |
9373 			    FW_LEN16(cmd));
9374 	if (fw_caps == FW_CAPS16)
9375 		cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
9376 	else
9377 		cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
9378 	ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
9379 				      sleep_ok, timeout);
9380 
9381 	/* Unfortunately, even if the Requested Port Capabilities "fit" within
9382 	 * the Physical Port Capabilities, some combinations of features may
9383 	 * still not be legal.  For example, 40Gb/s and Reed-Solomon Forward
9384 	 * Error Correction.  So if the Firmware rejects the L1 Configure
9385 	 * request, flag that here.
9386 	 */
9387 	if (ret) {
9388 		CH_ERR(adapter,
9389 		       "Requested Port Capabilities 0x%x rejected, error %d\n",
9390 		       rcap, -ret);
9391 		return ret;
9392 	}
9393 
9394 	return 0;
9395 }
9396 
9397 /**
9398  *	t4_restart_aneg - restart autonegotiation
9399  *	@adap: the adapter
9400  *	@mbox: mbox to use for the FW command
9401  *	@port: the port id
9402  *
9403  *	Restarts autonegotiation for the selected port.
9404  */
t4_restart_aneg(struct adapter * adap,unsigned int mbox,unsigned int port)9405 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
9406 {
9407 	unsigned int fw_caps = adap->params.fw_caps_support;
9408 	struct fw_port_cmd c;
9409 
9410 	memset(&c, 0, sizeof(c));
9411 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9412 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9413 				     V_FW_PORT_CMD_PORTID(port));
9414 	c.action_to_len16 =
9415 		cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9416 						 ? FW_PORT_ACTION_L1_CFG
9417 						 : FW_PORT_ACTION_L1_CFG32) |
9418 			    FW_LEN16(c));
9419 	if (fw_caps == FW_CAPS16)
9420 		c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
9421 	else
9422 		c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
9423 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9424 }
9425 
9426 /**
9427  *	t4_init_link_config - initialize a link's SW state
9428  *	@pi: the port info
9429  *	@pcaps: link Port Capabilities
9430  *	@acaps: link current Advertised Port Capabilities
9431  *
9432  *	Initializes the SW state maintained for each link, including the link's
9433  *	capabilities and default speed/flow-control/autonegotiation settings.
9434  */
t4_init_link_config(struct port_info * pi,fw_port_cap32_t pcaps,fw_port_cap32_t acaps)9435 static void t4_init_link_config(struct port_info *pi, fw_port_cap32_t pcaps,
9436 				fw_port_cap32_t acaps)
9437 {
9438 	u32 max_speed = t4_link_fwcap_to_speed(acaps);
9439 	struct link_config *lc = &pi->link_cfg;
9440 	fw_port_cap32_t new_caps = acaps;
9441 
9442 	/* If initializing for the first time or if port module changed,
9443 	 * then overwrite the saved link params with the new port module
9444 	 * caps.
9445 	 */
9446 	if (lc->admin_caps == 0 || lc->pcaps != pcaps) {
9447 		t4_link_update_fec(pi, max_speed, FEC_AUTO, &new_caps);
9448 		lc->admin_caps = new_caps;
9449 	}
9450 
9451 	lc->pcaps = pcaps;
9452 	lc->acaps = acaps;
9453 	lc->lpacaps = 0;
9454 	lc->link_caps = 0;
9455 }
9456 
9457 /**
9458  *	t4_link_down_rc_str - return a string for a Link Down Reason Code
9459  *	@link_down_rc: Link Down Reason Code
9460  *
9461  *	Returns a string representation of the Link Down Reason Code.
9462  */
t4_link_down_rc_str(unsigned char link_down_rc)9463 const char *t4_link_down_rc_str(unsigned char link_down_rc)
9464 {
9465 	static const char * const reason[] = {
9466 		"Link Down",
9467 		"Remote Fault",
9468 		"Auto-negotiation Failure",
9469 		"Reserved",
9470 		"Insufficient Airflow",
9471 		"Unable To Determine Reason",
9472 		"No RX Signal Detected",
9473 		"Reserved",
9474 	};
9475 
9476 	if (link_down_rc >= ARRAY_SIZE(reason))
9477 		return "Bad Reason Code";
9478 
9479 	return reason[link_down_rc];
9480 }
9481 
9482 /**
9483  *	lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
9484  *	@lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
9485  *
9486  *	Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
9487  *	32-bit Port Capabilities value.
9488  */
lstatus_to_fwcap(u32 lstatus)9489 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
9490 {
9491 	fw_port_cap32_t linkattr = 0;
9492 
9493 	/*
9494 	 * Unfortunately the format of the Link Status in the old
9495 	 * 16-bit Port Information message isn't the same as the
9496 	 * 16-bit Port Capabilities bitfield used everywhere else ...
9497 	 */
9498 	if (lstatus & F_FW_PORT_CMD_RXPAUSE)
9499 		linkattr |= FW_PORT_CAP32_FC_RX;
9500 	if (lstatus & F_FW_PORT_CMD_TXPAUSE)
9501 		linkattr |= FW_PORT_CAP32_FC_TX;
9502 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
9503 		linkattr |= FW_PORT_CAP32_SPEED_100M;
9504 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
9505 		linkattr |= FW_PORT_CAP32_SPEED_1G;
9506 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
9507 		linkattr |= FW_PORT_CAP32_SPEED_10G;
9508 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
9509 		linkattr |= FW_PORT_CAP32_SPEED_25G;
9510 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
9511 		linkattr |= FW_PORT_CAP32_SPEED_40G;
9512 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
9513 		linkattr |= FW_PORT_CAP32_SPEED_100G;
9514 
9515 	return linkattr;
9516 }
9517 
9518 /**
9519  *	t4_handle_get_port_info - process a FW reply message
9520  *	@pi: the port info
9521  *	@rpl: start of the FW message
9522  *
9523  *	Processes a GET_PORT_INFO FW reply message.
9524  */
t4_handle_get_port_info(struct port_info * pi,const __be64 * rpl)9525 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
9526 {
9527 	const struct fw_port_cmd *cmd = (const void *)rpl;
9528 	int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
9529 	struct adapter *adapter = pi->adapter;
9530 	struct link_config *lc = &pi->link_cfg;
9531 	int link_ok, linkdnrc;
9532 	enum fw_port_type port_type;
9533 	enum fw_port_module_type mod_type;
9534 	fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
9535 
9536 	/*
9537 	 * Extract the various fields from the Port Information message.
9538 	 */
9539 	switch (action) {
9540 	case FW_PORT_ACTION_GET_PORT_INFO: {
9541 		u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
9542 
9543 		link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
9544 		linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
9545 		port_type = G_FW_PORT_CMD_PTYPE(lstatus);
9546 		mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
9547 		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
9548 		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
9549 		lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
9550 		linkattr = lstatus_to_fwcap(lstatus);
9551 		break;
9552 	}
9553 
9554 	case FW_PORT_ACTION_GET_PORT_INFO32: {
9555 		u32 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
9556 
9557 		link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
9558 		linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
9559 		port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
9560 		mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
9561 		pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
9562 		acaps = be32_to_cpu(cmd->u.info32.acaps32);
9563 		lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
9564 		linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
9565 		break;
9566 	}
9567 
9568 	default:
9569 		CH_ERR(adapter, "Handle Port Information: Bad Command/Action %#x\n",
9570 		       be32_to_cpu(cmd->action_to_len16));
9571 		return;
9572 	}
9573 
9574 	/*
9575 	 * Reset state for communicating new Transceiver Module status and
9576 	 * whether the OS-dependent layer wants us to redo the current
9577 	 * "sticky" L1 Configure Link Parameters.
9578 	 */
9579 	lc->new_module = false;
9580 	lc->redo_l1cfg = false;
9581 
9582 	if (mod_type != pi->mod_type) {
9583 		/*
9584 		 * Some versions of the early T6 Firmware "cheated" when
9585 		 * handling different Transceiver Modules by changing the
9586 		 * underlaying Port Type reported to the Host Drivers.  As
9587 		 * such we need to capture whatever Port Type the Firmware
9588 		 * sends us and record it in case it's different from what we
9589 		 * were told earlier.  Unfortunately, since Firmware is
9590 		 * forever, we'll need to keep this code here forever, but in
9591 		 * later T6 Firmware it should just be an assignment of the
9592 		 * same value already recorded.
9593 		 */
9594 		pi->port_type = port_type;
9595 
9596 		/*
9597 		 * Record new Module Type information.
9598 		 */
9599 		pi->mod_type = mod_type;
9600 
9601 		/*
9602 		 * Let the OS-dependent layer know if we have a new
9603 		 * Transceiver Module inserted.
9604 		 */
9605 		lc->new_module = t4_is_inserted_mod_type(mod_type);
9606 
9607 		if (lc->new_module)
9608 			t4_init_link_config(pi, pcaps, acaps);
9609 		t4_os_portmod_changed(adapter, pi->port_id);
9610 	}
9611 
9612 	if (link_ok != lc->link_ok || acaps != lc->acaps ||
9613 	    lpacaps != lc->lpacaps || linkattr != lc->link_caps) {
9614 		/* something changed */
9615 		if (!link_ok && lc->link_ok) {
9616 			lc->link_down_rc = linkdnrc;
9617 			CH_WARN_RATELIMIT(adapter,
9618 				"Port %d link down, reason: %s\n",
9619 				pi->tx_chan, t4_link_down_rc_str(linkdnrc));
9620 		}
9621 
9622 		lc->link_ok = link_ok;
9623 		lc->acaps = acaps;
9624 		lc->lpacaps = lpacaps;
9625 		lc->link_caps = linkattr;
9626 
9627 		t4_os_link_changed(adapter, pi->port_id, link_ok);
9628 	}
9629 
9630 	/*
9631 	 * If we have a new Transceiver Module and the OS-dependent code has
9632 	 * told us that it wants us to redo whatever "sticky" L1 Configuration
9633 	 * Link Parameters are set, do that now.
9634 	 */
9635 	if (lc->new_module && lc->redo_l1cfg) {
9636 		int ret;
9637 
9638 		/*
9639 		 * Save the current L1 Configuration and restore it if an
9640 		 * error occurs.  We probably should fix the l1_cfg*()
9641 		 * routines not to change the link_config when an error
9642 		 * occurs ...
9643 		 */
9644 		ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc,
9645 				       lc->admin_caps);
9646 		if (ret) {
9647 			CH_WARN(adapter,
9648 				"Attempt to update new Transceiver Module settings failed\n");
9649 		}
9650 	}
9651 	lc->new_module = false;
9652 	lc->redo_l1cfg = false;
9653 }
9654 
9655 /**
9656  *	t4_update_port_info - retrieve and update port information if changed
9657  *	@pi: the port_info
9658  *
9659  *	We issue a Get Port Information Command to the Firmware and, if
9660  *	successful, we check to see if anything is different from what we
9661  *	last recorded and update things accordingly.
9662  */
t4_update_port_info(struct port_info * pi)9663 int t4_update_port_info(struct port_info *pi)
9664 {
9665 	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9666 	struct fw_port_cmd port_cmd;
9667 	int ret;
9668 
9669 	memset(&port_cmd, 0, sizeof port_cmd);
9670 	port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9671 					    F_FW_CMD_REQUEST | F_FW_CMD_READ |
9672 					    V_FW_PORT_CMD_PORTID(pi->lport));
9673 	port_cmd.action_to_len16 = cpu_to_be32(
9674 		V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9675 				     ? FW_PORT_ACTION_GET_PORT_INFO
9676 				     : FW_PORT_ACTION_GET_PORT_INFO32) |
9677 		FW_LEN16(port_cmd));
9678 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9679 			 &port_cmd, sizeof(port_cmd), &port_cmd);
9680 	if (ret)
9681 		return ret;
9682 
9683 	t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
9684 	return 0;
9685 }
9686 
9687 /**
9688  *	t4_get_link_params - retrieve basic link parameters for given port
9689  *	@pi: the port
9690  *	@link_okp: value return pointer for link up/down
9691  *	@speedp: value return pointer for speed (Mb/s)
9692  *	@mtup: value return pointer for mtu
9693  *
9694  *	Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
9695  *	and MTU for a specified port.  A negative error is returned on
9696  *	failure; 0 on success.
9697  */
t4_get_link_params(struct port_info * pi,unsigned int * link_okp,unsigned int * speedp,unsigned int * mtup)9698 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
9699 		       unsigned int *speedp, unsigned int *mtup)
9700 {
9701 	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9702 	struct fw_port_cmd port_cmd;
9703 	unsigned int action, link_ok, mtu;
9704 	fw_port_cap32_t linkattr;
9705 	int ret;
9706 
9707 	memset(&port_cmd, 0, sizeof port_cmd);
9708 	port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9709 					    F_FW_CMD_REQUEST | F_FW_CMD_READ |
9710 					    V_FW_PORT_CMD_PORTID(pi->tx_chan));
9711 	action = (fw_caps == FW_CAPS16
9712 		  ? FW_PORT_ACTION_GET_PORT_INFO
9713 		  : FW_PORT_ACTION_GET_PORT_INFO32);
9714 	port_cmd.action_to_len16 = cpu_to_be32(
9715 		V_FW_PORT_CMD_ACTION(action) |
9716 		FW_LEN16(port_cmd));
9717 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9718 			 &port_cmd, sizeof(port_cmd), &port_cmd);
9719 	if (ret)
9720 		return ret;
9721 
9722 	if (action == FW_PORT_ACTION_GET_PORT_INFO) {
9723 		u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
9724 
9725 		link_ok = !!(lstatus & F_FW_PORT_CMD_LSTATUS);
9726 		linkattr = lstatus_to_fwcap(lstatus);
9727 		mtu = be16_to_cpu(port_cmd.u.info.mtu);;
9728 	} else {
9729 		u32 lstatus32 = be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
9730 
9731 		link_ok = !!(lstatus32 & F_FW_PORT_CMD_LSTATUS32);
9732 		linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
9733 		mtu = G_FW_PORT_CMD_MTU32(
9734 			be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
9735 	}
9736 
9737 	*link_okp = link_ok;
9738 	*speedp = t4_link_fwcap_to_speed(linkattr);
9739 	*mtup = mtu;
9740 
9741 	return 0;
9742 }
9743 
9744 /**
9745  *      t4_handle_fw_rpl - process a FW reply message
9746  *      @adap: the adapter
9747  *      @rpl: start of the FW message
9748  *
9749  *      Processes a FW message, such as link state change messages.
9750  */
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)9751 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
9752 {
9753 	u8 opcode = *(const u8 *)rpl;
9754 
9755 	/*
9756 	 * This might be a port command ... this simplifies the following
9757 	 * conditionals ...  We can get away with pre-dereferencing
9758 	 * action_to_len16 because it's in the first 16 bytes and all messages
9759 	 * will be at least that long.
9760 	 */
9761 	const struct fw_port_cmd *p = (const void *)rpl;
9762 	unsigned int action =
9763 		G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
9764 
9765 	if (opcode == FW_PORT_CMD &&
9766 	    (action == FW_PORT_ACTION_GET_PORT_INFO ||
9767 	     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
9768 		int i;
9769 		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
9770 		struct port_info *pi = NULL;
9771 
9772 		for_each_port(adap, i) {
9773 			pi = adap2pinfo(adap, i);
9774 			if (pi->lport == chan)
9775 				break;
9776 		}
9777 
9778 		t4_handle_get_port_info(pi, rpl);
9779 	} else {
9780 		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
9781 		return -EINVAL;
9782 	}
9783 	return 0;
9784 }
9785 
9786 /**
9787  *	get_pci_mode - determine a card's PCI mode
9788  *	@adapter: the adapter
9789  *	@p: where to store the PCI settings
9790  *
9791  *	Determines a card's PCI mode and associated parameters, such as speed
9792  *	and width.
9793  */
get_pci_mode(struct adapter * adapter,struct pci_params * p)9794 static void get_pci_mode(struct adapter *adapter,
9795 				   struct pci_params *p)
9796 {
9797 	u16 val;
9798 	u32 pcie_cap;
9799 
9800 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9801 	if (pcie_cap) {
9802 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
9803 		p->speed = val & PCI_EXP_LNKSTA_CLS;
9804 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
9805 	}
9806 }
9807 
9808 /**
9809  *	t4_wait_dev_ready - wait till to reads of registers work
9810  *
9811  *	Right after the device is RESET is can take a small amount of time
9812  *	for it to respond to register reads.  Until then, all reads will
9813  *	return either 0xff...ff or 0xee...ee.  Return an error if reads
9814  *	don't work within a reasonable time frame.
9815  */
t4_wait_dev_ready(struct adapter * adapter)9816 int t4_wait_dev_ready(struct adapter *adapter)
9817 {
9818 	u32 whoami;
9819 
9820 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9821 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9822 		return 0;
9823 
9824 	msleep(500);
9825 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9826 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9827 		return 0;
9828 
9829 	CH_ERR(adapter, "Device didn't become ready for access, "
9830 	       "whoami = %#x\n", whoami);
9831 	return -EIO;
9832 }
9833 
9834 struct flash_desc {
9835 	u32 vendor_and_model_id;
9836 	u32 size_mb;
9837 };
9838 
t4_get_flash_params(struct adapter * adapter)9839 int t4_get_flash_params(struct adapter *adapter)
9840 {
9841 	/*
9842 	 * Table for non-standard supported Flash parts.  Note, all Flash
9843 	 * parts must have 64KB sectors.
9844 	 */
9845 	static struct flash_desc supported_flash[] = {
9846 		{ 0x00150201, 4 << 20 },	/* Spansion 4MB S25FL032P */
9847 	};
9848 
9849 	int ret;
9850 	u32 flashid = 0;
9851 	unsigned int part, manufacturer;
9852 	unsigned int density, size = 0;
9853 
9854 
9855 	/*
9856 	 * Issue a Read ID Command to the Flash part.  We decode supported
9857 	 * Flash parts and their sizes from this.  There's a newer Query
9858 	 * Command which can retrieve detailed geometry information but many
9859 	 * Flash parts don't support it.
9860 	 */
9861 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
9862 	if (!ret)
9863 		ret = sf1_read(adapter, 3, 0, 1, &flashid);
9864 	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
9865 	if (ret < 0)
9866 		return ret;
9867 
9868 	/*
9869 	 * Check to see if it's one of our non-standard supported Flash parts.
9870 	 */
9871 	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
9872 		if (supported_flash[part].vendor_and_model_id == flashid) {
9873 			adapter->params.sf_size =
9874 				supported_flash[part].size_mb;
9875 			adapter->params.sf_nsec =
9876 				adapter->params.sf_size / SF_SEC_SIZE;
9877 			goto found;
9878 		}
9879 
9880 	/*
9881 	 * Decode Flash part size.  The code below looks repetative with
9882 	 * common encodings, but that's not guaranteed in the JEDEC
9883 	 * specification for the Read JADEC ID command.  The only thing that
9884 	 * we're guaranteed by the JADEC specification is where the
9885 	 * Manufacturer ID is in the returned result.  After that each
9886 	 * Manufacturer ~could~ encode things completely differently.
9887 	 * Note, all Flash parts must have 64KB sectors.
9888 	 */
9889 	manufacturer = flashid & 0xff;
9890 	switch (manufacturer) {
9891 	case 0x20: { /* Micron/Numonix */
9892 		/*
9893 		 * This Density -> Size decoding table is taken from Micron
9894 		 * Data Sheets.
9895 		 */
9896 		density = (flashid >> 16) & 0xff;
9897 		switch (density) {
9898 		case 0x14: size = 1 << 20; break; /*   1MB */
9899 		case 0x15: size = 1 << 21; break; /*   2MB */
9900 		case 0x16: size = 1 << 22; break; /*   4MB */
9901 		case 0x17: size = 1 << 23; break; /*   8MB */
9902 		case 0x18: size = 1 << 24; break; /*  16MB */
9903 		case 0x19: size = 1 << 25; break; /*  32MB */
9904 		case 0x20: size = 1 << 26; break; /*  64MB */
9905 		case 0x21: size = 1 << 27; break; /* 128MB */
9906 		case 0x22: size = 1 << 28; break; /* 256MB */
9907 		}
9908 		break;
9909 	}
9910 
9911 	case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9912 		/*
9913 		 * This Density -> Size decoding table is taken from ISSI
9914 		 * Data Sheets.
9915 		 */
9916 		density = (flashid >> 16) & 0xff;
9917 		switch (density) {
9918 		case 0x16: size = 1 << 25; break; /*  32MB */
9919 		case 0x17: size = 1 << 26; break; /*  64MB */
9920 		}
9921 		break;
9922 	}
9923 
9924 	case 0xc2: { /* Macronix */
9925 		/*
9926 		 * This Density -> Size decoding table is taken from Macronix
9927 		 * Data Sheets.
9928 		 */
9929 		density = (flashid >> 16) & 0xff;
9930 		switch (density) {
9931 		case 0x17: size = 1 << 23; break; /*   8MB */
9932 		case 0x18: size = 1 << 24; break; /*  16MB */
9933 		}
9934 		break;
9935 	}
9936 
9937 	case 0xef: { /* Winbond */
9938 		/*
9939 		 * This Density -> Size decoding table is taken from Winbond
9940 		 * Data Sheets.
9941 		 */
9942 		density = (flashid >> 16) & 0xff;
9943 		switch (density) {
9944 		case 0x17: size = 1 << 23; break; /*   8MB */
9945 		case 0x18: size = 1 << 24; break; /*  16MB */
9946 		}
9947 		break;
9948 	}
9949 	}
9950 
9951 	/*
9952 	 * If we didn't recognize the FLASH part, that's no real issue: the
9953 	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9954 	 * use a FLASH part which is at least 4MB in size and has 64KB
9955 	 * sectors.  The unrecognized FLASH part is likely to be much larger
9956 	 * than 4MB, but that's all we really need.
9957 	 */
9958 	if (size == 0) {
9959 		CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
9960 		size = 1 << 22;
9961 	}
9962 
9963 	/*
9964 	 * Store decoded Flash size and fall through into vetting code.
9965 	 */
9966 	adapter->params.sf_size = size;
9967 	adapter->params.sf_nsec = size / SF_SEC_SIZE;
9968 
9969  found:
9970 	/*
9971 	 * We should ~probably~ reject adapters with FLASHes which are too
9972 	 * small but we have some legacy FPGAs with small FLASHes that we'd
9973 	 * still like to use.  So instead we emit a scary message ...
9974 	 */
9975 	if (adapter->params.sf_size < FLASH_MIN_SIZE)
9976 		CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9977 			flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
9978 
9979 	return 0;
9980 }
9981 
set_pcie_completion_timeout(struct adapter * adapter,u8 range)9982 static void set_pcie_completion_timeout(struct adapter *adapter,
9983 						  u8 range)
9984 {
9985 	u16 val;
9986 	u32 pcie_cap;
9987 
9988 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9989 	if (pcie_cap) {
9990 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
9991 		val &= 0xfff0;
9992 		val |= range ;
9993 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
9994 	}
9995 }
9996 
9997 /**
9998  *	t4_get_chip_type - Determine chip type from device ID
9999  *	@adap: the adapter
10000  *	@ver: adapter version
10001  */
t4_get_chip_type(struct adapter * adap,int ver)10002 enum chip_type t4_get_chip_type(struct adapter *adap, int ver)
10003 {
10004 	enum chip_type chip = 0;
10005 	u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
10006 
10007 	/* Retrieve adapter's device ID */
10008 	switch (ver) {
10009 		case CHELSIO_T4_FPGA:
10010 			chip |= CHELSIO_CHIP_FPGA;
10011 			/*FALLTHROUGH*/
10012 		case CHELSIO_T4:
10013 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
10014 			break;
10015 		case CHELSIO_T5_FPGA:
10016 			chip |= CHELSIO_CHIP_FPGA;
10017 			/*FALLTHROUGH*/
10018 		case CHELSIO_T5:
10019 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
10020 			break;
10021 		case CHELSIO_T6_FPGA:
10022 			chip |= CHELSIO_CHIP_FPGA;
10023 			/*FALLTHROUGH*/
10024 		case CHELSIO_T6:
10025 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
10026 			break;
10027 		default:
10028 			CH_ERR(adap, "Device %d is not supported\n",
10029 			       adap->params.pci.device_id);
10030 			return -EINVAL;
10031 	}
10032 
10033 	/* T4A1 chip is no longer supported */
10034 	if (chip == T4_A1) {
10035 		CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n");
10036 		return -EINVAL;
10037 	}
10038 	return chip;
10039 }
10040 
10041 /**
10042  *	t4_prep_pf - prepare SW and HW for PF operation
10043  *	@adapter: the adapter
10044  *
10045  *	Initialize adapter SW state for the various HW modules, set initial
10046  *	values for some adapter tunables on each PF.
10047  */
t4_prep_pf(struct adapter * adapter)10048 int t4_prep_pf(struct adapter *adapter)
10049 {
10050 	int ret, ver;
10051 
10052 	ret = t4_wait_dev_ready(adapter);
10053 	if (ret < 0)
10054 		return ret;
10055 
10056 	get_pci_mode(adapter, &adapter->params.pci);
10057 
10058 
10059 	/* Retrieve adapter's device ID
10060 	 */
10061 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id);
10062 	t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id);
10063 
10064 	ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
10065 	adapter->params.chip = t4_get_chip_type(adapter, ver);
10066 	if (is_t4(adapter->params.chip)) {
10067 		adapter->params.arch.sge_fl_db = F_DBPRIO;
10068 		adapter->params.arch.mps_tcam_size =
10069 				 NUM_MPS_CLS_SRAM_L_INSTANCES;
10070 		adapter->params.arch.mps_rplc_size = 128;
10071 		adapter->params.arch.nchan = NCHAN;
10072 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
10073 		adapter->params.arch.vfcount = 128;
10074 		/* Congestion map is for 4 channels so that
10075 		 * MPS can have 4 priority per port.
10076 		 */
10077 		adapter->params.arch.cng_ch_bits_log = 2;
10078 	} else if (is_t5(adapter->params.chip)) {
10079 		adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
10080 		adapter->params.arch.mps_tcam_size =
10081 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
10082 		adapter->params.arch.mps_rplc_size = 128;
10083 		adapter->params.arch.nchan = NCHAN;
10084 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
10085 		adapter->params.arch.vfcount = 128;
10086 		adapter->params.arch.cng_ch_bits_log = 2;
10087 	} else if (is_t6(adapter->params.chip)) {
10088 		adapter->params.arch.sge_fl_db = 0;
10089 		adapter->params.arch.mps_tcam_size =
10090 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
10091 		adapter->params.arch.mps_rplc_size = 256;
10092 		adapter->params.arch.nchan = 2;
10093 		adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
10094 		adapter->params.arch.vfcount = 256;
10095 		/* Congestion map will be for 2 channels so that
10096 		 * MPS can have 8 priority per port.
10097 		 */
10098 		adapter->params.arch.cng_ch_bits_log = 3;
10099 	} else {
10100 		CH_ERR(adapter, "Device %d is not supported\n",
10101 			adapter->params.pci.device_id);
10102 		return -EINVAL;
10103 	}
10104 
10105 	adapter->params.pci.vpd_cap_addr =
10106 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
10107 
10108 	if (is_fpga(adapter->params.chip)) {
10109 		/* FPGA */
10110 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
10111 	} else {
10112 		/* ASIC */
10113 		adapter->params.cim_la_size = CIMLA_SIZE;
10114 	}
10115 
10116 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
10117 
10118 	/*
10119 	 * Default port and clock for debugging in case we can't reach FW.
10120 	 */
10121 	adapter->params.nports = 1;
10122 	adapter->params.portvec = 1;
10123 	adapter->params.vpd.cclk = 50000;
10124 
10125 	/* Set pci completion timeout value to 4 seconds. */
10126 	set_pcie_completion_timeout(adapter, 0xd);
10127 	return 0;
10128 }
10129 
10130 /**
10131  *      t4_prep_master_pf - prepare SW for master PF operations
10132  *      @adapter: the adapter
10133  *
10134  */
t4_prep_master_pf(struct adapter * adapter)10135 int t4_prep_master_pf(struct adapter *adapter)
10136 {
10137 	int ret;
10138 
10139 	ret = t4_prep_pf(adapter);
10140 	if (ret < 0)
10141 		return ret;
10142 
10143 	ret = t4_get_flash_params(adapter);
10144 	if (ret < 0) {
10145 		CH_ERR(adapter,
10146 		       "Unable to retrieve Flash parameters ret = %d\n", -ret);
10147 		return ret;
10148 	}
10149 
10150 	return 0;
10151 }
10152 
10153 /**
10154  *      t4_prep_adapter - prepare SW and HW for operation
10155  *      @adapter: the adapter
10156  *      @reset: if true perform a HW reset
10157  *
10158  *      Initialize adapter SW state for the various HW modules, set initial
10159  *      values for some adapter tunables.
10160  */
t4_prep_adapter(struct adapter * adapter,bool reset)10161 int t4_prep_adapter(struct adapter *adapter, bool reset)
10162 {
10163 	return t4_prep_master_pf(adapter);
10164 }
10165 
10166 /**
10167  *	t4_shutdown_adapter - shut down adapter, host & wire
10168  *	@adapter: the adapter
10169  *
10170  *	Perform an emergency shutdown of the adapter and stop it from
10171  *	continuing any further communication on the ports or DMA to the
10172  *	host.  This is typically used when the adapter and/or firmware
10173  *	have crashed and we want to prevent any further accidental
10174  *	communication with the rest of the world.  This will also force
10175  *	the port Link Status to go down -- if register writes work --
10176  *	which should help our peers figure out that we're down.
10177  */
t4_shutdown_adapter(struct adapter * adapter)10178 int t4_shutdown_adapter(struct adapter *adapter)
10179 {
10180 	int port;
10181 
10182 	t4_intr_disable(adapter);
10183 	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
10184 	for_each_port(adapter, port) {
10185 		u32 a_port_cfg = is_t4(adapter->params.chip) ?
10186 				 PORT_REG(port, A_XGMAC_PORT_CFG) :
10187 				 T5_PORT_REG(port, A_MAC_PORT_CFG);
10188 
10189 		t4_write_reg(adapter, a_port_cfg,
10190 			     t4_read_reg(adapter, a_port_cfg)
10191 			     & ~V_SIGNAL_DET(1));
10192 	}
10193 	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
10194 
10195 	return 0;
10196 }
10197 
10198 /**
10199  *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
10200  *	@adapter: the adapter
10201  *	@qid: the Queue ID
10202  *	@qtype: the Ingress or Egress type for @qid
10203  *	@user: true if this request is for a user mode queue
10204  *	@pbar2_qoffset: BAR2 Queue Offset
10205  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
10206  *
10207  *	Returns the BAR2 SGE Queue Registers information associated with the
10208  *	indicated Absolute Queue ID.  These are passed back in return value
10209  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
10210  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
10211  *
10212  *	This may return an error which indicates that BAR2 SGE Queue
10213  *	registers aren't available.  If an error is not returned, then the
10214  *	following values are returned:
10215  *
10216  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
10217  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
10218  *
10219  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
10220  *	require the "Inferred Queue ID" ability may be used.  E.g. the
10221  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
10222  *	then these "Inferred Queue ID" register may not be used.
10223  */
t4_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,int user,u64 * pbar2_qoffset,unsigned int * pbar2_qid)10224 int t4_bar2_sge_qregs(struct adapter *adapter,
10225 		      unsigned int qid,
10226 		      enum t4_bar2_qtype qtype,
10227 		      int user,
10228 		      u64 *pbar2_qoffset,
10229 		      unsigned int *pbar2_qid)
10230 {
10231 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
10232 	u64 bar2_page_offset, bar2_qoffset;
10233 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
10234 
10235 	/* T4 doesn't support BAR2 SGE Queue registers for kernel
10236 	 * mode queues.
10237 	 */
10238 	if (!user && is_t4(adapter->params.chip))
10239 		return -EINVAL;
10240 
10241 	/* Get our SGE Page Size parameters.
10242 	 */
10243 	page_shift = adapter->params.sge.hps + 10;
10244 	page_size = 1 << page_shift;
10245 
10246 	/* Get the right Queues per Page parameters for our Queue.
10247 	 */
10248 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
10249 		     ? adapter->params.sge.eq_qpp
10250 		     : adapter->params.sge.iq_qpp);
10251 	qpp_mask = (1 << qpp_shift) - 1;
10252 
10253 	/* Calculate the basics of the BAR2 SGE Queue register area:
10254 	 *  o The BAR2 page the Queue registers will be in.
10255 	 *  o The BAR2 Queue ID.
10256 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
10257 	 */
10258 	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
10259 	bar2_qid = qid & qpp_mask;
10260 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
10261 
10262 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
10263 	 * hardware will infer the Absolute Queue ID simply from the writes to
10264 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
10265 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
10266 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
10267 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
10268 	 * from the BAR2 Page and BAR2 Queue ID.
10269 	 *
10270 	 * One important censequence of this is that some BAR2 SGE registers
10271 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
10272 	 * there.  But other registers synthesize the SGE Queue ID purely
10273 	 * from the writes to the registers -- the Write Combined Doorbell
10274 	 * Buffer is a good example.  These BAR2 SGE Registers are only
10275 	 * available for those BAR2 SGE Register areas where the SGE Absolute
10276 	 * Queue ID can be inferred from simple writes.
10277 	 */
10278 	bar2_qoffset = bar2_page_offset;
10279 	bar2_qinferred = (bar2_qid_offset < page_size);
10280 	if (bar2_qinferred) {
10281 		bar2_qoffset += bar2_qid_offset;
10282 		bar2_qid = 0;
10283 	}
10284 
10285 	*pbar2_qoffset = bar2_qoffset;
10286 	*pbar2_qid = bar2_qid;
10287 	return 0;
10288 }
10289 
10290 /**
10291  *	t4_init_devlog_params - initialize adapter->params.devlog
10292  *	@adap: the adapter
10293  *	@fw_attach: whether we can talk to the firmware
10294  *
10295  *	Initialize various fields of the adapter's Firmware Device Log
10296  *	Parameters structure.
10297  */
t4_init_devlog_params(struct adapter * adap,int fw_attach)10298 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
10299 {
10300 	struct devlog_params *dparams = &adap->params.devlog;
10301 	u32 pf_dparams;
10302 	unsigned int devlog_meminfo;
10303 	struct fw_devlog_cmd devlog_cmd;
10304 	int ret;
10305 
10306 	/* If we're dealing with newer firmware, the Device Log Paramerters
10307 	 * are stored in a designated register which allows us to access the
10308 	 * Device Log even if we can't talk to the firmware.
10309 	 */
10310 	pf_dparams =
10311 		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
10312 	if (pf_dparams) {
10313 		unsigned int nentries, nentries128;
10314 
10315 		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
10316 		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
10317 
10318 		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
10319 		nentries = (nentries128 + 1) * 128;
10320 		dparams->size = nentries * sizeof(struct fw_devlog_e);
10321 
10322 		return 0;
10323 	}
10324 
10325 	/*
10326 	 * For any failing returns ...
10327 	 */
10328 	memset(dparams, 0, sizeof *dparams);
10329 
10330 	/*
10331 	 * If we can't talk to the firmware, there's really nothing we can do
10332 	 * at this point.
10333 	 */
10334 	if (!fw_attach)
10335 		return -ENXIO;
10336 
10337 	/* Otherwise, ask the firmware for it's Device Log Parameters.
10338 	 */
10339 	memset(&devlog_cmd, 0, sizeof devlog_cmd);
10340 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10341 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
10342 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10343 	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
10344 			 &devlog_cmd);
10345 	if (ret)
10346 		return ret;
10347 
10348 	devlog_meminfo =
10349 		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
10350 	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
10351 	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
10352 	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
10353 
10354 	return 0;
10355 }
10356 
10357 /**
10358  *	t4_init_sge_params - initialize adap->params.sge
10359  *	@adapter: the adapter
10360  *
10361  *	Initialize various fields of the adapter's SGE Parameters structure.
10362  */
t4_init_sge_params(struct adapter * adapter)10363 int t4_init_sge_params(struct adapter *adapter)
10364 {
10365 	struct sge_params *sge_params = &adapter->params.sge;
10366 	u32 hps, qpp;
10367 	unsigned int s_hps, s_qpp;
10368 
10369 	/* Extract the SGE Page Size for our PF.
10370 	 */
10371 	hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
10372 	s_hps = (S_HOSTPAGESIZEPF0 +
10373 		 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf);
10374 	sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
10375 
10376 	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
10377 	 */
10378 	s_qpp = (S_QUEUESPERPAGEPF0 +
10379 		(S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
10380 	qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
10381 	sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10382 	qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
10383 	sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10384 
10385 	return 0;
10386 }
10387 
10388 /**
10389  *      t4_init_tp_params - initialize adap->params.tp
10390  *      @adap: the adapter
10391  * 	@sleep_ok: if true we may sleep while awaiting command completion
10392  *
10393  *      Initialize various fields of the adapter's TP Parameters structure.
10394  */
t4_init_tp_params(struct adapter * adap,bool sleep_ok)10395 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
10396 {
10397 	u32 param, val, v;
10398 	int chan, ret;
10399 
10400 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
10401 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
10402 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
10403 
10404 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
10405 	for (chan = 0; chan < NCHAN; chan++)
10406 		adap->params.tp.tx_modq[chan] = chan;
10407 
10408 	/* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
10409 	 * Configuration.
10410 	 */
10411 
10412 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10413 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
10414 		 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
10415 
10416 	/* Read current value */
10417 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
10418 			      &param, &val);
10419 	if (ret == 0) {
10420 		CH_INFO(adap,
10421 			 "Current filter mode/mask 0x%x:0x%x\n",
10422 			 G_FW_PARAMS_PARAM_FILTER_MODE(val),
10423 			 G_FW_PARAMS_PARAM_FILTER_MASK(val));
10424 		adap->params.tp.vlan_pri_map = G_FW_PARAMS_PARAM_FILTER_MODE(val);
10425 		adap->params.tp.filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val);
10426 	} else {
10427 		CH_WARN(adap,
10428 			 "Reading filter mode/mask not supported via fw api, "
10429 			 "falling back to older indirect-reg-read \n");
10430 
10431 		/* Incase of older-fw (which doesn't expose the api
10432 		 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
10433 		 * the fw api) combination, fall-back to older method of reading
10434 		 * the filter mode from indirect-register
10435 		 */
10436 		t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
10437 			       A_TP_VLAN_PRI_MAP, sleep_ok);
10438 
10439 		/* With the older-fw and newer-driver combination we might run
10440 		 * into an issue when user wants to use hash filter region but
10441 		 * the filter_mask is zero, in this case filter_mask validation
10442 		 * is tough. To avoid that we set the filter_mask same as filter
10443 		 * mode, which will behave exactly as the older way of ignoring
10444 		 * the filter mask validation.
10445 		 */
10446 		adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
10447 	}
10448 
10449 	t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
10450 		       A_TP_INGRESS_CONFIG, sleep_ok);
10451 
10452 	/* For T6, cache the adapter's compressed error vector
10453 	 * and passing outer header info for encapsulated packets.
10454 	 */
10455 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
10456 		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
10457 		adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
10458 	}
10459 
10460 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
10461 	 * shift positions of several elements of the Compressed Filter Tuple
10462 	 * for this adapter which we need frequently ...
10463 	 */
10464 	adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
10465 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
10466 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
10467 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
10468 	adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
10469 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
10470 	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
10471 								F_ETHERTYPE);
10472 	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
10473 								F_MACMATCH);
10474 	adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
10475 								F_MPSHITTYPE);
10476 	adap->params.tp.frag_shift = t4_filter_field_shift(adap,
10477 							   F_FRAGMENTATION);
10478 	return 0;
10479 }
10480 
10481 /**
10482  *      t4_filter_field_shift - calculate filter field shift
10483  *      @adap: the adapter
10484  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
10485  *
10486  *      Return the shift position of a filter field within the Compressed
10487  *      Filter Tuple.  The filter field is specified via its selection bit
10488  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
10489  */
t4_filter_field_shift(const struct adapter * adap,int filter_sel)10490 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
10491 {
10492 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
10493 	unsigned int sel;
10494 	int field_shift;
10495 
10496 	if ((filter_mode & filter_sel) == 0)
10497 		return -1;
10498 
10499 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
10500 		switch (filter_mode & sel) {
10501 		case F_FCOE:
10502 			field_shift += W_FT_FCOE;
10503 			break;
10504 		case F_PORT:
10505 			field_shift += W_FT_PORT;
10506 			break;
10507 		case F_VNIC_ID:
10508 			field_shift += W_FT_VNIC_ID;
10509 			break;
10510 		case F_VLAN:
10511 			field_shift += W_FT_VLAN;
10512 			break;
10513 		case F_TOS:
10514 			field_shift += W_FT_TOS;
10515 			break;
10516 		case F_PROTOCOL:
10517 			field_shift += W_FT_PROTOCOL;
10518 			break;
10519 		case F_ETHERTYPE:
10520 			field_shift += W_FT_ETHERTYPE;
10521 			break;
10522 		case F_MACMATCH:
10523 			field_shift += W_FT_MACMATCH;
10524 			break;
10525 		case F_MPSHITTYPE:
10526 			field_shift += W_FT_MPSHITTYPE;
10527 			break;
10528 		case F_FRAGMENTATION:
10529 			field_shift += W_FT_FRAGMENTATION;
10530 			break;
10531 		}
10532 	}
10533 	return field_shift;
10534 }
10535 
10536 /**
10537  *	t4_create_filter_info - return Compressed Filter Value/Mask tuple
10538  *	@adapter: the adapter
10539  *	@filter_value: Filter Value return value pointer
10540  *	@filter_mask: Filter Mask return value pointer
10541  *	@fcoe: FCoE filter selection
10542  *	@port: physical port filter selection
10543  *	@vnic: Virtual NIC ID filter selection
10544  *	@vlan: VLAN ID filter selection
10545  *	@vlan_pcp: VLAN Priority Code Point
10546  *	@vlan_dei: VLAN Drop Eligibility Indicator
10547  *	@tos: Type Of Server filter selection
10548  *	@protocol: IP Protocol filter selection
10549  *	@ethertype: Ethernet Type filter selection
10550  *	@macmatch: MPS MAC Index filter selection
10551  *	@matchtype: MPS Hit Type filter selection
10552  *	@frag: IP Fragmentation filter selection
10553  *
10554  *	Construct a Compressed Filter Value/Mask tuple based on a set of
10555  *	"filter selection" values.  For each passed filter selection value
10556  *	which is greater than or equal to 0, we put that value into the
10557  *	constructed Filter Value and the appropriate mask into the Filter
10558  *	Mask.  If a filter selections is specified which is not currently
10559  *	configured into the hardware, an error will be returned.  Otherwise
10560  *	the constructed FIlter Value/Mask tuple will be returned via the
10561  *	specified return value pointers and success will be returned.
10562  *
10563  *	All filter selection values and the returned Filter Value/Mask values
10564  *	are in Host-Endian format.
10565  */
t4_create_filter_info(const struct adapter * adapter,u64 * filter_value,u64 * filter_mask,int fcoe,int port,int vnic,int vlan,int vlan_pcp,int vlan_dei,int tos,int protocol,int ethertype,int macmatch,int matchtype,int frag)10566 int t4_create_filter_info(const struct adapter *adapter,
10567 			  u64 *filter_value, u64 *filter_mask,
10568 			  int fcoe, int port, int vnic,
10569 			  int vlan, int vlan_pcp, int vlan_dei,
10570 			  int tos, int protocol, int ethertype,
10571 			  int macmatch, int matchtype, int frag)
10572 {
10573 	const struct tp_params *tp = &adapter->params.tp;
10574 	u64 v, m;
10575 
10576 	/*
10577 	 * If any selected filter field isn't enabled, return an error.
10578 	 */
10579 	#define BAD_FILTER(__field) \
10580 		((__field) >= 0 && tp->__field##_shift < 0)
10581 	if (BAD_FILTER(fcoe)       ||
10582 	    BAD_FILTER(port)       ||
10583 	    BAD_FILTER(vnic)       ||
10584 	    BAD_FILTER(vlan)       ||
10585 	    BAD_FILTER(tos)        ||
10586 	    BAD_FILTER(protocol)   ||
10587 	    BAD_FILTER(ethertype)  ||
10588 	    BAD_FILTER(macmatch)   ||
10589 	    BAD_FILTER(matchtype) ||
10590 	    BAD_FILTER(frag))
10591 		return -EINVAL;
10592 	#undef BAD_FILTER
10593 
10594 	/*
10595 	 * We have to have VLAN ID selected if we want to also select on
10596 	 * either the Priority Code Point or Drop Eligibility Indicator
10597 	 * fields.
10598 	 */
10599 	if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0)
10600 		return -EINVAL;
10601 
10602 	/*
10603 	 * Construct Filter Value and Mask.
10604 	 */
10605 	v = m = 0;
10606 	#define SET_FILTER_FIELD(__field, __width) \
10607 	do { \
10608 		if ((__field) >= 0) { \
10609 			const int shift = tp->__field##_shift; \
10610 			\
10611 			v |= (__field) << shift; \
10612 			m |= ((1ULL << (__width)) - 1) << shift; \
10613 		} \
10614 	} while (0)
10615 	SET_FILTER_FIELD(fcoe,      W_FT_FCOE);
10616 	SET_FILTER_FIELD(port,      W_FT_PORT);
10617 	SET_FILTER_FIELD(tos,       W_FT_TOS);
10618 	SET_FILTER_FIELD(protocol,  W_FT_PROTOCOL);
10619 	SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE);
10620 	SET_FILTER_FIELD(macmatch,  W_FT_MACMATCH);
10621 	SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE);
10622 	SET_FILTER_FIELD(frag,      W_FT_FRAGMENTATION);
10623 	#undef SET_FILTER_FIELD
10624 
10625 	/*
10626 	 * We handle VNIC ID and VLANs separately because they're slightly
10627 	 * different than the rest of the fields.  Both require that a
10628 	 * corresponding "valid" bit be set in the Filter Value and Mask.
10629 	 * These bits are in the top bit of the field.  Additionally, we can
10630 	 * select the Priority Code Point and Drop Eligibility Indicator
10631 	 * fields for VLANs as an option.  Remember that the format of a VLAN
10632 	 * Tag is:
10633 	 *
10634 	 * bits: 3  1      12
10635 	 *     +---+-+------------+
10636 	 *     |PCP|D|   VLAN ID  |
10637 	 *     +---+-+------------+
10638 	 */
10639 	if (vnic >= 0) {
10640 		v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift;
10641 		m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift;
10642 	}
10643 	if (vlan >= 0) {
10644 		v |= ((1ULL << (W_FT_VLAN-1)) | vlan)  << tp->vlan_shift;
10645 		m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift;
10646 
10647 		if (vlan_dei >= 0) {
10648 			v |= vlan_dei << (tp->vlan_shift + 12);
10649 			m |= 0x7      << (tp->vlan_shift + 12);
10650 		}
10651 		if (vlan_pcp >= 0) {
10652 			v |= vlan_pcp << (tp->vlan_shift + 13);
10653 			m |= 0x7      << (tp->vlan_shift + 13);
10654 		}
10655 	}
10656 
10657 	/*
10658 	 * Pass back computed Filter Value and Mask; return success.
10659 	 */
10660 	*filter_value = v;
10661 	*filter_mask = m;
10662 	return 0;
10663 }
10664 
t4_init_rss_mode(struct adapter * adap,int mbox)10665 int t4_init_rss_mode(struct adapter *adap, int mbox)
10666 {
10667 	int i, ret;
10668 	struct fw_rss_vi_config_cmd rvc;
10669 
10670 	memset(&rvc, 0, sizeof(rvc));
10671 
10672 	for_each_port(adap, i) {
10673 		struct port_info *p = adap2pinfo(adap, i);
10674 		rvc.op_to_viid =
10675 			cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
10676 				    F_FW_CMD_REQUEST | F_FW_CMD_READ |
10677 				    V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
10678 		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
10679 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
10680 		if (ret)
10681 			return ret;
10682 		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
10683 	}
10684 	return 0;
10685 }
10686 
t4_init_portmirror(struct port_info * pi,int mbox,int port,int pf,int vf)10687 static int t4_init_portmirror(struct port_info *pi, int mbox,
10688 		       int port, int pf, int vf)
10689 {
10690 	struct adapter *adapter = pi->adapter;
10691 	int ret;
10692 	u8 vivld = 0, vin = 0;
10693 
10694 	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
10695 			  &vivld, &vin);
10696 	if (ret < 0)
10697 		return ret;
10698 
10699 	pi->viid_mirror = ret;
10700 
10701 	/* If fw supports returning the VIN as part of FW_VI_CMD,
10702 	 * save the returned values.
10703 	 */
10704 	if (adapter->params.viid_smt_extn_support) {
10705 		pi->vivld_mirror = vivld;
10706 		pi->vin_mirror = vin;
10707 	} else {
10708 		/* Retrieve the values from VIID */
10709 		pi->vivld_mirror = G_FW_VIID_VIVLD(pi->viid_mirror);
10710 		pi->vin_mirror = G_FW_VIID_VIN(pi->viid_mirror);
10711 	}
10712 
10713 	CH_INFO(pi->adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n",
10714 		port, pf, pi->vin_mirror);
10715 	return 0;
10716 }
10717 
t4_mirror_init(struct adapter * adap,int mbox,int pf,int vf,bool enable_ringbb)10718 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf,
10719 		   bool enable_ringbb)
10720 {
10721 	int ret, i, j = 0;
10722 
10723 	for_each_port(adap, i) {
10724 		struct port_info *pi = adap2pinfo(adap, i);
10725 
10726 		/* We want mirroring only on Port0 for ringbackbone
10727 		 * configuration.
10728 		 */
10729 		if (enable_ringbb && i)
10730 			break;
10731 		while ((adap->params.portvec & (1 << j)) == 0)
10732 			j++;
10733 
10734 		ret = t4_init_portmirror(pi, mbox, j, pf, vf);
10735 		if (ret)
10736 			return ret;
10737 		j++;
10738 	}
10739 	return 0;
10740 }
10741 
10742 /**
10743  *	t4_init_portinfo_viid - allocate a virtual interface and initialize
10744  *	port_info
10745  *	@pi: the port_info
10746  *	@mbox: mailbox to use for the FW command
10747  *	@port: physical port associated with the VI
10748  *	@pf: the PF owning the VI
10749  *	@vf: the VF owning the VI
10750  *	@mac: the MAC address of the VI
10751  *	@alloc_vi: Indicator to alloc VI
10752  *
10753  *	Allocates a virtual interface for the given physical port.  If @mac is
10754  *	not %NULL it contains the MAC address of the VI as assigned by FW.
10755  *	@mac should be large enough to hold an Ethernet address.
10756  *	Returns < 0 on error.
10757  */
t4_init_portinfo_viid(struct port_info * pi,int mbox,int port,int pf,int vf,u8 mac[],bool alloc_vi)10758 int t4_init_portinfo_viid(struct port_info *pi, int mbox,
10759 		     int port, int pf, int vf, u8 mac[], bool alloc_vi)
10760 {
10761 	struct adapter *adapter = pi->adapter;
10762 	unsigned int fw_caps = adapter->params.fw_caps_support;
10763 	struct fw_port_cmd cmd;
10764 	unsigned int rss_size;
10765 	enum fw_port_type port_type;
10766 	int mdio_addr;
10767 	fw_port_cap32_t pcaps, acaps;
10768 	int ret;
10769 
10770 	/*
10771 	 * If we haven't yet determined whether we're talking to Firmware
10772 	 * which knows the new 32-bit Port Capabilities, it's time to find
10773 	 * out now.  This will also tell new Firmware to send us Port Status
10774 	 * Updates using the new 32-bit Port Capabilities version of the
10775 	 * Port Information message.
10776 	 */
10777 	if (fw_caps == FW_CAPS_UNKNOWN) {
10778 		u32 param, val;
10779 
10780 		param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
10781 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
10782 		val = 1;
10783 		ret = t4_set_params(adapter, mbox, pf, vf, 1, &param, &val);
10784 		fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
10785 		adapter->params.fw_caps_support = fw_caps;
10786 	}
10787 
10788 	memset(&cmd, 0, sizeof(cmd));
10789 	cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
10790 				       F_FW_CMD_REQUEST | F_FW_CMD_READ |
10791 				       V_FW_PORT_CMD_PORTID(port));
10792 	cmd.action_to_len16 = cpu_to_be32(
10793 		V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
10794 				     ? FW_PORT_ACTION_GET_PORT_INFO
10795 				     : FW_PORT_ACTION_GET_PORT_INFO32) |
10796 		FW_LEN16(cmd));
10797 	ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
10798 	if (ret)
10799 		return ret;
10800 
10801 	/*
10802 	 * Extract the various fields from the Port Information message.
10803 	 */
10804 	if (fw_caps == FW_CAPS16) {
10805 		u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
10806 
10807 		port_type = G_FW_PORT_CMD_PTYPE(lstatus);
10808 		mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP)
10809 			     ? G_FW_PORT_CMD_MDIOADDR(lstatus)
10810 			     : -1);
10811 		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
10812 		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
10813 	} else {
10814 		u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
10815 
10816 		port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
10817 		mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32)
10818 			     ? G_FW_PORT_CMD_MDIOADDR32(lstatus32)
10819 			     : -1);
10820 		pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
10821 		acaps = be32_to_cpu(cmd.u.info32.acaps32);
10822 	}
10823 
10824 	if (alloc_vi) {
10825 		u8 vivld = 0, vin = 0;
10826 
10827 		ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac,
10828 				  &rss_size, &vivld, &vin);
10829 		if (ret < 0)
10830 			return ret;
10831 
10832 		pi->viid = ret;
10833 		pi->rss_size = rss_size;
10834 
10835 		/* If fw supports returning the VIN as part of FW_VI_CMD,
10836 		 * save the returned values.
10837 		 */
10838 		if (adapter->params.viid_smt_extn_support) {
10839 			pi->vivld = vivld;
10840 			pi->vin = vin;
10841 		} else {
10842 			/* Retrieve the values from VIID */
10843 			pi->vivld = G_FW_VIID_VIVLD(pi->viid);
10844 			pi->vin = G_FW_VIID_VIN(pi->viid);
10845 		}
10846 	}
10847 
10848 	pi->tx_chan = port;
10849 	pi->lport = port;
10850 	pi->rx_chan = port;
10851 	pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
10852 
10853 	pi->port_type = port_type;
10854 	pi->mdio_addr = mdio_addr;
10855 	pi->mod_type = FW_PORT_MOD_TYPE_NA;
10856 
10857 	t4_init_link_config(pi, pcaps, acaps);
10858 	return 0;
10859 }
10860 
10861 /**
10862  *	t4_init_portinfo - allocate a virtual interface and initialize port_info
10863  *	@pi: the port_info
10864  *	@mbox: mailbox to use for the FW command
10865  *	@port: physical port associated with the VI
10866  *	@pf: the PF owning the VI
10867  *	@vf: the VF owning the VI
10868  *	@mac: the MAC address of the VI
10869  *
10870  *	Allocates a virtual interface for the given physical port.  If @mac is
10871  *	not %NULL it contains the MAC address of the VI as assigned by FW.
10872  *	@mac should be large enough to hold an Ethernet address.
10873  *	Returns < 0 on error.
10874  */
t4_init_portinfo(struct port_info * pi,int mbox,int port,int pf,int vf,u8 mac[])10875 int t4_init_portinfo(struct port_info *pi, int mbox,
10876 		     int port, int pf, int vf, u8 mac[])
10877 {
10878 	return t4_init_portinfo_viid(pi, mbox, port, pf, vf, mac, true);
10879 }
10880 
t4_port_init(struct adapter * adap,int mbox,int pf,int vf)10881 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
10882 {
10883 	u8 addr[6];
10884 	int ret, i, j = 0;
10885 
10886 	for_each_port(adap, i) {
10887 		struct port_info *pi = adap2pinfo(adap, i);
10888 
10889 		while ((adap->params.portvec & (1 << j)) == 0)
10890 			j++;
10891 
10892 		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
10893 		if (ret)
10894 			return ret;
10895 
10896 		t4_os_set_hw_addr(adap, i, addr);
10897 		j++;
10898 	}
10899 	return 0;
10900 }
10901 
10902 /**
10903  *	t4_read_cimq_cfg - read CIM queue configuration
10904  *	@adap: the adapter
10905  *	@base: holds the queue base addresses in bytes
10906  *	@size: holds the queue sizes in bytes
10907  *	@thres: holds the queue full thresholds in bytes
10908  *
10909  *	Returns the current configuration of the CIM queues, starting with
10910  *	the IBQs, then the OBQs.
10911  */
t4_read_cimq_cfg(struct adapter * adap,u16 * base,u16 * size,u16 * thres)10912 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
10913 {
10914 	unsigned int i, v;
10915 	int cim_num_obq = is_t4(adap->params.chip) ?
10916 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10917 
10918 	for (i = 0; i < CIM_NUM_IBQ; i++) {
10919 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
10920 			     V_QUENUMSELECT(i));
10921 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10922 		/* value is in 256-byte units */
10923 		*base++ = G_CIMQBASE(v) * 256;
10924 		*size++ = G_CIMQSIZE(v) * 256;
10925 		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
10926 	}
10927 	for (i = 0; i < cim_num_obq; i++) {
10928 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10929 			     V_QUENUMSELECT(i));
10930 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10931 		/* value is in 256-byte units */
10932 		*base++ = G_CIMQBASE(v) * 256;
10933 		*size++ = G_CIMQSIZE(v) * 256;
10934 	}
10935 }
10936 
10937 /**
10938  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
10939  *	@adap: the adapter
10940  *	@qid: the queue index
10941  *	@data: where to store the queue contents
10942  *	@n: capacity of @data in 32-bit words
10943  *
10944  *	Reads the contents of the selected CIM queue starting at address 0 up
10945  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
10946  *	error and the number of 32-bit words actually read on success.
10947  */
t4_read_cim_ibq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)10948 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10949 {
10950 	int i, err, attempts;
10951 	unsigned int addr;
10952 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
10953 
10954 	if (qid > 5 || (n & 3))
10955 		return -EINVAL;
10956 
10957 	addr = qid * nwords;
10958 	if (n > nwords)
10959 		n = nwords;
10960 
10961 	/* It might take 3-10ms before the IBQ debug read access is allowed.
10962 	 * Wait for 1 Sec with a delay of 1 usec.
10963 	 */
10964 	attempts = 1000000;
10965 
10966 	for (i = 0; i < n; i++, addr++) {
10967 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
10968 			     F_IBQDBGEN);
10969 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
10970 				      attempts, 1);
10971 		if (err)
10972 			return err;
10973 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
10974 	}
10975 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
10976 	return i;
10977 }
10978 
10979 /**
10980  *	t4_read_cim_obq - read the contents of a CIM outbound queue
10981  *	@adap: the adapter
10982  *	@qid: the queue index
10983  *	@data: where to store the queue contents
10984  *	@n: capacity of @data in 32-bit words
10985  *
10986  *	Reads the contents of the selected CIM queue starting at address 0 up
10987  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
10988  *	error and the number of 32-bit words actually read on success.
10989  */
t4_read_cim_obq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)10990 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10991 {
10992 	int i, err;
10993 	unsigned int addr, v, nwords;
10994 	int cim_num_obq = is_t4(adap->params.chip) ?
10995 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10996 
10997 	if ((qid > (cim_num_obq - 1)) || (n & 3))
10998 		return -EINVAL;
10999 
11000 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
11001 		     V_QUENUMSELECT(qid));
11002 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
11003 
11004 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
11005 	nwords = G_CIMQSIZE(v) * 64;  /* same */
11006 	if (n > nwords)
11007 		n = nwords;
11008 
11009 	for (i = 0; i < n; i++, addr++) {
11010 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
11011 			     F_OBQDBGEN);
11012 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
11013 				      2, 1);
11014 		if (err)
11015 			return err;
11016 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
11017 	}
11018 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
11019 	return i;
11020 }
11021 
11022 /**
11023  *	t4_cim_read - read a block from CIM internal address space
11024  *	@adap: the adapter
11025  *	@addr: the start address within the CIM address space
11026  *	@n: number of words to read
11027  *	@valp: where to store the result
11028  *
11029  *	Reads a block of 4-byte words from the CIM intenal address space.
11030  */
t4_cim_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)11031 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
11032 		unsigned int *valp)
11033 {
11034 	int ret = 0;
11035 
11036 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
11037 		return -EBUSY;
11038 
11039 	for ( ; !ret && n--; addr += 4) {
11040 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
11041 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
11042 				      0, 5, 2);
11043 		if (!ret)
11044 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
11045 	}
11046 	return ret;
11047 }
11048 
11049 /**
11050  *	t4_cim_write - write a block into CIM internal address space
11051  *	@adap: the adapter
11052  *	@addr: the start address within the CIM address space
11053  *	@n: number of words to write
11054  *	@valp: set of values to write
11055  *
11056  *	Writes a block of 4-byte words into the CIM intenal address space.
11057  */
t4_cim_write(struct adapter * adap,unsigned int addr,unsigned int n,const unsigned int * valp)11058 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
11059 		 const unsigned int *valp)
11060 {
11061 	int ret = 0;
11062 
11063 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
11064 		return -EBUSY;
11065 
11066 	for ( ; !ret && n--; addr += 4) {
11067 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
11068 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
11069 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
11070 				      0, 5, 2);
11071 	}
11072 	return ret;
11073 }
11074 
t4_cim_write1(struct adapter * adap,unsigned int addr,unsigned int val)11075 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
11076 			 unsigned int val)
11077 {
11078 	return t4_cim_write(adap, addr, 1, &val);
11079 }
11080 
11081 /**
11082  *	t4_cim_read_la - read CIM LA capture buffer
11083  *	@adap: the adapter
11084  *	@la_buf: where to store the LA data
11085  *	@wrptr: the HW write pointer within the capture buffer
11086  *
11087  *	Reads the contents of the CIM LA buffer with the most recent entry at
11088  *	the end	of the returned data and with the entry at @wrptr first.
11089  *	We try to leave the LA in the running state we find it in.
11090  */
t4_cim_read_la(struct adapter * adap,u32 * la_buf,unsigned int * wrptr)11091 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
11092 {
11093 	int i, ret;
11094 	unsigned int cfg, val, idx;
11095 
11096 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
11097 	if (ret)
11098 		return ret;
11099 
11100 	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
11101 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
11102 		if (ret)
11103 			return ret;
11104 	}
11105 
11106 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
11107 	if (ret)
11108 		goto restart;
11109 
11110 	idx = G_UPDBGLAWRPTR(val);
11111 	if (wrptr)
11112 		*wrptr = idx;
11113 
11114 	for (i = 0; i < adap->params.cim_la_size; i++) {
11115 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
11116 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
11117 		if (ret)
11118 			break;
11119 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
11120 		if (ret)
11121 			break;
11122 		if (val & F_UPDBGLARDEN) {
11123 			ret = -ETIMEDOUT;
11124 			break;
11125 		}
11126 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
11127 		if (ret)
11128 			break;
11129 
11130 		/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
11131 		 * identify the 32-bit portion of the full 312-bit data
11132 		 */
11133 		if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
11134 			idx = (idx & 0xff0) + 0x10;
11135 		else
11136 			idx++;
11137 		/* address can't exceed 0xfff */
11138 		idx &= M_UPDBGLARDPTR;
11139 	}
11140 restart:
11141 	if (cfg & F_UPDBGLAEN) {
11142 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
11143 				      cfg & ~F_UPDBGLARDEN);
11144 		if (!ret)
11145 			ret = r;
11146 	}
11147 	return ret;
11148 }
11149 
11150 /**
11151  *	t4_tp_read_la - read TP LA capture buffer
11152  *	@adap: the adapter
11153  *	@la_buf: where to store the LA data
11154  *	@wrptr: the HW write pointer within the capture buffer
11155  *
11156  *	Reads the contents of the TP LA buffer with the most recent entry at
11157  *	the end	of the returned data and with the entry at @wrptr first.
11158  *	We leave the LA in the running state we find it in.
11159  */
t4_tp_read_la(struct adapter * adap,u64 * la_buf,unsigned int * wrptr)11160 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
11161 {
11162 	bool last_incomplete;
11163 	unsigned int i, cfg, val, idx;
11164 
11165 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
11166 	if (cfg & F_DBGLAENABLE)			/* freeze LA */
11167 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11168 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
11169 
11170 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
11171 	idx = G_DBGLAWPTR(val);
11172 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
11173 	if (last_incomplete)
11174 		idx = (idx + 1) & M_DBGLARPTR;
11175 	if (wrptr)
11176 		*wrptr = idx;
11177 
11178 	val &= 0xffff;
11179 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
11180 	val |= adap->params.tp.la_mask;
11181 
11182 	for (i = 0; i < TPLA_SIZE; i++) {
11183 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
11184 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
11185 		idx = (idx + 1) & M_DBGLARPTR;
11186 	}
11187 
11188 	/* Wipe out last entry if it isn't valid */
11189 	if (last_incomplete)
11190 		la_buf[TPLA_SIZE - 1] = ~0ULL;
11191 
11192 	if (cfg & F_DBGLAENABLE)		/* restore running state */
11193 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11194 			     cfg | adap->params.tp.la_mask);
11195 }
11196 
11197 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
11198  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
11199  * state for more than the Warning Threshold then we'll issue a warning about
11200  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
11201  * appears to be hung every Warning Repeat second till the situation clears.
11202  * If the situation clears, we'll note that as well.
11203  */
11204 #define SGE_IDMA_WARN_THRESH 1
11205 #define SGE_IDMA_WARN_REPEAT 300
11206 
11207 /**
11208  *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
11209  *	@adapter: the adapter
11210  *	@idma: the adapter IDMA Monitor state
11211  *
11212  *	Initialize the state of an SGE Ingress DMA Monitor.
11213  */
t4_idma_monitor_init(struct adapter * adapter,struct sge_idma_monitor_state * idma)11214 void t4_idma_monitor_init(struct adapter *adapter,
11215 			  struct sge_idma_monitor_state *idma)
11216 {
11217 	/* Initialize the state variables for detecting an SGE Ingress DMA
11218 	 * hang.  The SGE has internal counters which count up on each clock
11219 	 * tick whenever the SGE finds its Ingress DMA State Engines in the
11220 	 * same state they were on the previous clock tick.  The clock used is
11221 	 * the Core Clock so we have a limit on the maximum "time" they can
11222 	 * record; typically a very small number of seconds.  For instance,
11223 	 * with a 600MHz Core Clock, we can only count up to a bit more than
11224 	 * 7s.  So we'll synthesize a larger counter in order to not run the
11225 	 * risk of having the "timers" overflow and give us the flexibility to
11226 	 * maintain a Hung SGE State Machine of our own which operates across
11227 	 * a longer time frame.
11228 	 */
11229 	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
11230 	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
11231 }
11232 
11233 /**
11234  *	t4_idma_monitor - monitor SGE Ingress DMA state
11235  *	@adapter: the adapter
11236  *	@idma: the adapter IDMA Monitor state
11237  *	@hz: number of ticks/second
11238  *	@ticks: number of ticks since the last IDMA Monitor call
11239  */
t4_idma_monitor(struct adapter * adapter,struct sge_idma_monitor_state * idma,int hz,int ticks)11240 void t4_idma_monitor(struct adapter *adapter,
11241 		     struct sge_idma_monitor_state *idma,
11242 		     int hz, int ticks)
11243 {
11244 	int i, idma_same_state_cnt[2];
11245 
11246 	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
11247 	  * are counters inside the SGE which count up on each clock when the
11248 	  * SGE finds its Ingress DMA State Engines in the same states they
11249 	  * were in the previous clock.  The counters will peg out at
11250 	  * 0xffffffff without wrapping around so once they pass the 1s
11251 	  * threshold they'll stay above that till the IDMA state changes.
11252 	  */
11253 	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
11254 	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
11255 	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11256 
11257 	for (i = 0; i < 2; i++) {
11258 		u32 debug0, debug11;
11259 
11260 		/* If the Ingress DMA Same State Counter ("timer") is less
11261 		 * than 1s, then we can reset our synthesized Stall Timer and
11262 		 * continue.  If we have previously emitted warnings about a
11263 		 * potential stalled Ingress Queue, issue a note indicating
11264 		 * that the Ingress Queue has resumed forward progress.
11265 		 */
11266 		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
11267 			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
11268 				CH_WARN(adapter, "SGE idma%d, queue %u, "
11269 					"resumed after %d seconds\n",
11270 					i, idma->idma_qid[i],
11271 					idma->idma_stalled[i]/hz);
11272 			idma->idma_stalled[i] = 0;
11273 			continue;
11274 		}
11275 
11276 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
11277 		 * domain.  The first time we get here it'll be because we
11278 		 * passed the 1s Threshold; each additional time it'll be
11279 		 * because the RX Timer Callback is being fired on its regular
11280 		 * schedule.
11281 		 *
11282 		 * If the stall is below our Potential Hung Ingress Queue
11283 		 * Warning Threshold, continue.
11284 		 */
11285 		if (idma->idma_stalled[i] == 0) {
11286 			idma->idma_stalled[i] = hz;
11287 			idma->idma_warn[i] = 0;
11288 		} else {
11289 			idma->idma_stalled[i] += ticks;
11290 			idma->idma_warn[i] -= ticks;
11291 		}
11292 
11293 		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
11294 			continue;
11295 
11296 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
11297 		 */
11298 		if (idma->idma_warn[i] > 0)
11299 			continue;
11300 		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
11301 
11302 		/* Read and save the SGE IDMA State and Queue ID information.
11303 		 * We do this every time in case it changes across time ...
11304 		 * can't be too careful ...
11305 		 */
11306 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
11307 		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11308 		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
11309 
11310 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
11311 		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11312 		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
11313 
11314 		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
11315 			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
11316 			i, idma->idma_qid[i], idma->idma_state[i],
11317 			idma->idma_stalled[i]/hz,
11318 			debug0, debug11);
11319 		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
11320 	}
11321 }
11322 
11323 /**
11324  *     t4_set_vf_mac - Set MAC address for the specified VF
11325  *     @adapter: The adapter
11326  *     @vf: one of the VFs instantiated by the specified PF
11327  *     @naddr: the number of MAC addresses
11328  *     @addr: the MAC address(es) to be set to the specified VF
11329  */
t4_set_vf_mac_acl(struct adapter * adapter,unsigned int vf,unsigned int naddr,u8 * addr)11330 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
11331 		      unsigned int naddr, u8 *addr)
11332 {
11333 	struct fw_acl_mac_cmd cmd;
11334 
11335 	memset(&cmd, 0, sizeof(cmd));
11336 	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
11337 				    F_FW_CMD_REQUEST |
11338 				    F_FW_CMD_WRITE |
11339 				    V_FW_ACL_MAC_CMD_PFN(adapter->pf) |
11340 				    V_FW_ACL_MAC_CMD_VFN(vf));
11341 
11342 	/* Note: Do not enable the ACL */
11343 	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
11344 	cmd.nmac = naddr;
11345 
11346 	switch (adapter->pf) {
11347 	case 3:
11348 		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
11349 		break;
11350 	case 2:
11351 		memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
11352 		break;
11353 	case 1:
11354 		memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
11355 		break;
11356 	case 0:
11357 		memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
11358 		break;
11359 	}
11360 
11361 	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
11362 }
11363 
11364 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper
11365  * functions
11366  */
11367 
11368 /**
11369  *	t4_read_pace_tbl - read the pace table
11370  *	@adap: the adapter
11371  *	@pace_vals: holds the returned values
11372  *
11373  *	Returns the values of TP's pace table in microseconds.
11374  */
t4_read_pace_tbl(struct adapter * adap,unsigned int pace_vals[NTX_SCHED])11375 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
11376 {
11377 	unsigned int i, v;
11378 
11379 	for (i = 0; i < NTX_SCHED; i++) {
11380 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
11381 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
11382 		pace_vals[i] = dack_ticks_to_usec(adap, v);
11383 	}
11384 }
11385 
11386 /**
11387  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
11388  *	@adap: the adapter
11389  *	@sched: the scheduler index
11390  *	@kbps: the byte rate in Kbps
11391  *	@ipg: the interpacket delay in tenths of nanoseconds
11392  * 	@sleep_ok: if true we may sleep while awaiting command completion
11393  *
11394  *	Return the current configuration of a HW Tx scheduler.
11395  */
t4_get_tx_sched(struct adapter * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg,bool sleep_ok)11396 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
11397 		     unsigned int *ipg, bool sleep_ok)
11398 {
11399 	unsigned int v, addr, bpt, cpt;
11400 
11401 	if (kbps) {
11402 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
11403 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11404 		if (sched & 1)
11405 			v >>= 16;
11406 		bpt = (v >> 8) & 0xff;
11407 		cpt = v & 0xff;
11408 		if (!cpt)
11409 			*kbps = 0;	/* scheduler disabled */
11410 		else {
11411 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
11412 			*kbps = (v * bpt) / 125;
11413 		}
11414 	}
11415 	if (ipg) {
11416 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
11417 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11418 		if (sched & 1)
11419 			v >>= 16;
11420 		v &= 0xffff;
11421 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
11422 	}
11423 }
11424 
11425 /**
11426  *	t4_load_cfg - download config file
11427  *	@adap: the adapter
11428  *	@cfg_data: the cfg text file to write
11429  *	@size: text file size
11430  *
11431  *	Write the supplied config text file to the card's serial flash.
11432  */
t4_load_cfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11433 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
11434 {
11435 	int ret, i, n, cfg_addr;
11436 	unsigned int addr;
11437 	unsigned int flash_cfg_start_sec;
11438 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11439 
11440 	cfg_addr = t4_flash_cfg_addr(adap);
11441 	if (cfg_addr < 0)
11442 		return cfg_addr;
11443 
11444 	addr = cfg_addr;
11445 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
11446 
11447 	if (size > FLASH_CFG_MAX_SIZE) {
11448 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
11449 		       FLASH_CFG_MAX_SIZE);
11450 		return -EFBIG;
11451 	}
11452 
11453 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
11454 			 sf_sec_size);
11455 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11456 				     flash_cfg_start_sec + i - 1);
11457 	/*
11458 	 * If size == 0 then we're simply erasing the FLASH sectors associated
11459 	 * with the on-adapter Firmware Configuration File.
11460 	 */
11461 	if (ret || size == 0)
11462 		goto out;
11463 
11464 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
11465 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11466 		if ( (size - i) <  SF_PAGE_SIZE)
11467 			n = size - i;
11468 		else
11469 			n = SF_PAGE_SIZE;
11470 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
11471 		if (ret)
11472 			goto out;
11473 
11474 		addr += SF_PAGE_SIZE;
11475 		cfg_data += SF_PAGE_SIZE;
11476 	}
11477 
11478 out:
11479 	if (ret)
11480 		CH_ERR(adap, "config file %s failed %d\n",
11481 		       (size == 0 ? "clear" : "download"), ret);
11482 	return ret;
11483 }
11484 
11485 /**
11486  *	t5_fw_init_extern_mem - initialize the external memory
11487  *	@adap: the adapter
11488  *
11489  *	Initializes the external memory on T5.
11490  */
t5_fw_init_extern_mem(struct adapter * adap)11491 int t5_fw_init_extern_mem(struct adapter *adap)
11492 {
11493 	u32 params[1], val[1];
11494 	int ret;
11495 
11496 	if (!is_t5(adap->params.chip))
11497 		return 0;
11498 
11499 	val[0] = 0xff; /* Initialize all MCs */
11500 	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11501 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
11502 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
11503 			FW_CMD_MAX_TIMEOUT);
11504 
11505 	return ret;
11506 }
11507 
11508 /* BIOS boot headers */
11509 typedef struct pci_expansion_rom_header {
11510 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
11511 	u8	reserved[22]; /* Reserved per processor Architecture data */
11512 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
11513 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
11514 
11515 /* Legacy PCI Expansion ROM Header */
11516 typedef struct legacy_pci_expansion_rom_header {
11517 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
11518 	u8	size512; /* Current Image Size in units of 512 bytes */
11519 	u8	initentry_point[4];
11520 	u8	cksum; /* Checksum computed on the entire Image */
11521 	u8	reserved[16]; /* Reserved */
11522 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
11523 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
11524 
11525 /* EFI PCI Expansion ROM Header */
11526 typedef struct efi_pci_expansion_rom_header {
11527 	u8	signature[2]; // ROM signature. The value 0xaa55
11528 	u8	initialization_size[2]; /* Units 512. Includes this header */
11529 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
11530 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
11531 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
11532 	u8	compression_type[2]; /* Compression type. */
11533 		/*
11534 		 * Compression type definition
11535 		 * 0x0: uncompressed
11536 		 * 0x1: Compressed
11537 		 * 0x2-0xFFFF: Reserved
11538 		 */
11539 	u8	reserved[8]; /* Reserved */
11540 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
11541 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
11542 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
11543 
11544 /* PCI Data Structure Format */
11545 typedef struct pcir_data_structure { /* PCI Data Structure */
11546 	u8	signature[4]; /* Signature. The string "PCIR" */
11547 	u8	vendor_id[2]; /* Vendor Identification */
11548 	u8	device_id[2]; /* Device Identification */
11549 	u8	vital_product[2]; /* Pointer to Vital Product Data */
11550 	u8	length[2]; /* PCIR Data Structure Length */
11551 	u8	revision; /* PCIR Data Structure Revision */
11552 	u8	class_code[3]; /* Class Code */
11553 	u8	image_length[2]; /* Image Length. Multiple of 512B */
11554 	u8	code_revision[2]; /* Revision Level of Code/Data */
11555 	u8	code_type; /* Code Type. */
11556 		/*
11557 		 * PCI Expansion ROM Code Types
11558 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
11559 		 * 0x01: Open Firmware standard for PCI. FCODE
11560 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
11561 		 * 0x03: EFI Image. EFI
11562 		 * 0x04-0xFF: Reserved.
11563 		 */
11564 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
11565 	u8	reserved[2]; /* Reserved */
11566 } pcir_data_t; /* PCI__DATA_STRUCTURE */
11567 
11568 /* BOOT constants */
11569 enum {
11570 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
11571 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
11572 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
11573 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
11574 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
11575 	VENDOR_ID = 0x1425, /* Vendor ID */
11576 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
11577 };
11578 
11579 /*
11580  *	modify_device_id - Modifies the device ID of the Boot BIOS image
11581  *	@adatper: the device ID to write.
11582  *	@boot_data: the boot image to modify.
11583  *
11584  *	Write the supplied device ID to the boot BIOS image.
11585  */
modify_device_id(int device_id,u8 * boot_data)11586 static void modify_device_id(int device_id, u8 *boot_data)
11587 {
11588 	legacy_pci_exp_rom_header_t *header;
11589 	pcir_data_t *pcir_header;
11590 	u32 cur_header = 0;
11591 
11592 	/*
11593 	 * Loop through all chained images and change the device ID's
11594 	 */
11595 	while (1) {
11596 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
11597 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
11598 			      le16_to_cpu(*(u16*)header->pcir_offset)];
11599 
11600 		/*
11601 		 * Only modify the Device ID if code type is Legacy or HP.
11602 		 * 0x00: Okay to modify
11603 		 * 0x01: FCODE. Do not be modify
11604 		 * 0x03: Okay to modify
11605 		 * 0x04-0xFF: Do not modify
11606 		 */
11607 		if (pcir_header->code_type == 0x00) {
11608 			u8 csum = 0;
11609 			int i;
11610 
11611 			/*
11612 			 * Modify Device ID to match current adatper
11613 			 */
11614 			*(u16*) pcir_header->device_id = device_id;
11615 
11616 			/*
11617 			 * Set checksum temporarily to 0.
11618 			 * We will recalculate it later.
11619 			 */
11620 			header->cksum = 0x0;
11621 
11622 			/*
11623 			 * Calculate and update checksum
11624 			 */
11625 			for (i = 0; i < (header->size512 * 512); i++)
11626 				csum += (u8)boot_data[cur_header + i];
11627 
11628 			/*
11629 			 * Invert summed value to create the checksum
11630 			 * Writing new checksum value directly to the boot data
11631 			 */
11632 			boot_data[cur_header + 7] = -csum;
11633 
11634 		} else if (pcir_header->code_type == 0x03) {
11635 
11636 			/*
11637 			 * Modify Device ID to match current adatper
11638 			 */
11639 			*(u16*) pcir_header->device_id = device_id;
11640 
11641 		}
11642 
11643 
11644 		/*
11645 		 * Check indicator element to identify if this is the last
11646 		 * image in the ROM.
11647 		 */
11648 		if (pcir_header->indicator & 0x80)
11649 			break;
11650 
11651 		/*
11652 		 * Move header pointer up to the next image in the ROM.
11653 		 */
11654 		cur_header += header->size512 * 512;
11655 	}
11656 }
11657 
11658 #ifdef CHELSIO_T4_DIAGS
11659 /*
11660  *	t4_earse_sf - Erase entire serial Flash region
11661  *	@adapter: the adapter
11662  *
11663  *	Clears the entire serial flash region.
11664  */
t4_erase_sf(struct adapter * adap)11665 int t4_erase_sf(struct adapter *adap)
11666 {
11667 	unsigned int nsectors;
11668 	int ret;
11669 
11670 	nsectors = FLASH_END_SEC;
11671 	if (nsectors > adap->params.sf_nsec)
11672 		nsectors = adap->params.sf_nsec;
11673 
11674 	// Erase all sectors of flash before and including the FW.
11675 	// Flash layout is in t4_hw.h.
11676 	ret = t4_flash_erase_sectors(adap, 0, nsectors - 1);
11677 	if (ret)
11678 		CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret);
11679 	return ret;
11680 }
11681 #endif
11682 
11683 /*
11684  *	t4_load_boot - download boot flash
11685  *	@adapter: the adapter
11686  *	@boot_data: the boot image to write
11687  *	@boot_addr: offset in flash to write boot_data
11688  *	@size: image size
11689  *
11690  *	Write the supplied boot image to the card's serial flash.
11691  *	The boot image has the following sections: a 28-byte header and the
11692  *	boot image.
11693  */
t4_load_boot(struct adapter * adap,u8 * boot_data,unsigned int boot_addr,unsigned int size)11694 int t4_load_boot(struct adapter *adap, u8 *boot_data,
11695 		 unsigned int boot_addr, unsigned int size)
11696 {
11697 	pci_exp_rom_header_t *header;
11698 	int pcir_offset ;
11699 	pcir_data_t *pcir_header;
11700 	int ret, addr;
11701 	uint16_t device_id;
11702 	unsigned int i;
11703 	unsigned int boot_sector = (boot_addr * 1024 );
11704 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11705 
11706 	/*
11707 	 * Make sure the boot image does not encroach on the firmware region
11708 	 */
11709 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
11710 		CH_ERR(adap, "boot image encroaching on firmware region\n");
11711 		return -EFBIG;
11712 	}
11713 
11714 	/*
11715 	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
11716 	 * and Boot configuration data sections. These 3 boot sections span
11717 	 * sectors 0 to 7 in flash and live right before the FW image location.
11718 	 */
11719 	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
11720 			sf_sec_size);
11721 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
11722 				     (boot_sector >> 16) + i - 1);
11723 
11724 	/*
11725 	 * If size == 0 then we're simply erasing the FLASH sectors associated
11726 	 * with the on-adapter option ROM file
11727 	 */
11728 	if (ret || (size == 0))
11729 		goto out;
11730 
11731 	/* Get boot header */
11732 	header = (pci_exp_rom_header_t *)boot_data;
11733 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
11734 	/* PCIR Data Structure */
11735 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
11736 
11737 	/*
11738 	 * Perform some primitive sanity testing to avoid accidentally
11739 	 * writing garbage over the boot sectors.  We ought to check for
11740 	 * more but it's not worth it for now ...
11741 	 */
11742 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
11743 		CH_ERR(adap, "boot image too small/large\n");
11744 		return -EFBIG;
11745 	}
11746 
11747 #ifndef CHELSIO_T4_DIAGS
11748 	/*
11749 	 * Check BOOT ROM header signature
11750 	 */
11751 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
11752 		CH_ERR(adap, "Boot image missing signature\n");
11753 		return -EINVAL;
11754 	}
11755 
11756 	/*
11757 	 * Check PCI header signature
11758 	 */
11759 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
11760 		CH_ERR(adap, "PCI header missing signature\n");
11761 		return -EINVAL;
11762 	}
11763 
11764 	/*
11765 	 * Check Vendor ID matches Chelsio ID
11766 	 */
11767 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
11768 		CH_ERR(adap, "Vendor ID missing signature\n");
11769 		return -EINVAL;
11770 	}
11771 #endif
11772 
11773 	/*
11774 	 * Retrieve adapter's device ID
11775 	 */
11776 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
11777 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
11778 	device_id = device_id & 0xf0ff;
11779 
11780 	/*
11781 	 * Check PCIE Device ID
11782 	 */
11783 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
11784 		/*
11785 		 * Change the device ID in the Boot BIOS image to match
11786 		 * the Device ID of the current adapter.
11787 		 */
11788 		modify_device_id(device_id, boot_data);
11789 	}
11790 
11791 	/*
11792 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
11793 	 * we finish copying the rest of the boot image. This will ensure
11794 	 * that the BIOS boot header will only be written if the boot image
11795 	 * was written in full.
11796 	 */
11797 	addr = boot_sector;
11798 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
11799 		addr += SF_PAGE_SIZE;
11800 		boot_data += SF_PAGE_SIZE;
11801 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
11802 		if (ret)
11803 			goto out;
11804 	}
11805 
11806 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
11807 			     (const u8 *)header, 0);
11808 
11809 out:
11810 	if (ret)
11811 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
11812 	return ret;
11813 }
11814 
11815 /*
11816  *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
11817  *	@adapter: the adapter
11818  *
11819  *	Return the address within the flash where the OptionROM Configuration
11820  *	is stored, or an error if the device FLASH is too small to contain
11821  *	a OptionROM Configuration.
11822  */
t4_flash_bootcfg_addr(struct adapter * adapter)11823 static int t4_flash_bootcfg_addr(struct adapter *adapter)
11824 {
11825 	/*
11826 	 * If the device FLASH isn't large enough to hold a Firmware
11827 	 * Configuration File, return an error.
11828 	 */
11829 	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
11830 		return -ENOSPC;
11831 
11832 	return FLASH_BOOTCFG_START;
11833 }
11834 
t4_load_bootcfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11835 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
11836 {
11837 	int ret, i, n, cfg_addr;
11838 	unsigned int addr;
11839 	unsigned int flash_cfg_start_sec;
11840 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11841 
11842 	cfg_addr = t4_flash_bootcfg_addr(adap);
11843 	if (cfg_addr < 0)
11844 		return cfg_addr;
11845 
11846 	addr = cfg_addr;
11847 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
11848 
11849 	if (size > FLASH_BOOTCFG_MAX_SIZE) {
11850 		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
11851 			FLASH_BOOTCFG_MAX_SIZE);
11852 		return -EFBIG;
11853 	}
11854 
11855 	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
11856 			 sf_sec_size);
11857 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11858 					flash_cfg_start_sec + i - 1);
11859 
11860 	/*
11861 	 * If size == 0 then we're simply erasing the FLASH sectors associated
11862 	 * with the on-adapter OptionROM Configuration File.
11863 	 */
11864 	if (ret || size == 0)
11865 		goto out;
11866 
11867 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
11868 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11869 		if ( (size - i) <  SF_PAGE_SIZE)
11870 			n = size - i;
11871 		else
11872 			n = SF_PAGE_SIZE;
11873 		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
11874 		if (ret)
11875 			goto out;
11876 
11877 		addr += SF_PAGE_SIZE;
11878 		cfg_data += SF_PAGE_SIZE;
11879 	}
11880 
11881 out:
11882 	if (ret)
11883 		CH_ERR(adap, "boot config data %s failed %d\n",
11884 				(size == 0 ? "clear" : "download"), ret);
11885 	return ret;
11886 }
11887 
11888 /**
11889  * t4_read_bootcfg - read the current (boot)OptionROM configuration from FLASH
11890  * @adap: the adapter
11891  * @cfg_data: where to store the read OptionROM configuration data
11892  *
11893  * Read the current OptionROM configuration from FLASH and write to the
11894  * buffer @cfg_data supplied.
11895  */
t4_read_bootcfg(struct adapter * adap,u8 * cfg_data,unsigned int size)11896 int t4_read_bootcfg(struct adapter *adap, u8 *cfg_data, unsigned int size)
11897 {
11898 	u32 *ptr = (u32 *)cfg_data;
11899 	int i, n, cfg_addr;
11900 	int ret = 0;
11901 
11902 	if (size > FLASH_BOOTCFG_MAX_SIZE) {
11903 		CH_ERR(adap, "bootcfg file too big, max is %u bytes\n",
11904 			FLASH_BOOTCFG_MAX_SIZE);
11905 		return -EINVAL;
11906 	}
11907 
11908 	cfg_addr = t4_flash_bootcfg_addr(adap);
11909 	if (cfg_addr < 0)
11910 		return cfg_addr;
11911 
11912 	size = size / sizeof (u32);
11913 	for (i = 0; i < size; i += SF_PAGE_SIZE) {
11914 		if ( (size - i) <  SF_PAGE_SIZE)
11915 			n = size - i;
11916 		else
11917 			n = SF_PAGE_SIZE;
11918 
11919 		ret = t4_read_flash(adap, cfg_addr, n, ptr, 0);
11920 		if (ret)
11921 			goto out;
11922 
11923 		cfg_addr += (n*4);
11924 		ptr += n;
11925 	}
11926 
11927 out:
11928 	return ret;
11929 }
11930 
11931 /**
11932  *	t4_set_filter_mode - configure the optional components of filter tuples
11933  *	@adap: the adapter
11934  *	@mode_map: a bitmap selcting which optional filter components to enable
11935  * 	@sleep_ok: if true we may sleep while awaiting command completion
11936  *
11937  *	Sets the filter mode by selecting the optional components to enable
11938  *	in filter tuples.  Returns 0 on success and a negative error if the
11939  *	requested mode needs more bits than are available for optional
11940  *	components.
11941  */
t4_set_filter_mode(struct adapter * adap,unsigned int mode_map,bool sleep_ok)11942 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
11943 		       bool sleep_ok)
11944 {
11945 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
11946 
11947 	int i, nbits = 0;
11948 
11949 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
11950 		if (mode_map & (1 << i))
11951 			nbits += width[i];
11952 	if (nbits > FILTER_OPT_LEN)
11953 		return -EINVAL;
11954 
11955 	t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
11956 
11957 	return 0;
11958 }
11959 
11960 /**
11961  *	t4_clr_port_stats - clear port statistics
11962  *	@adap: the adapter
11963  *	@idx: the port index
11964  *
11965  *	Clear HW statistics for the given port.
11966  */
t4_clr_port_stats(struct adapter * adap,int idx)11967 void t4_clr_port_stats(struct adapter *adap, int idx)
11968 {
11969 	unsigned int i;
11970 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
11971 	u32 port_base_addr;
11972 
11973 	if (is_t4(adap->params.chip))
11974 		port_base_addr = PORT_BASE(idx);
11975 	else
11976 		port_base_addr = T5_PORT_BASE(idx);
11977 
11978 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
11979 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
11980 		t4_write_reg(adap, port_base_addr + i, 0);
11981 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
11982 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
11983 		t4_write_reg(adap, port_base_addr + i, 0);
11984 	for (i = 0; i < 4; i++)
11985 		if (bgmap & (1 << i)) {
11986 			t4_write_reg(adap,
11987 			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
11988 			t4_write_reg(adap,
11989 			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
11990 		}
11991 }
11992 
11993 /**
11994  *	t4_i2c_io - read/write I2C data from adapter
11995  *	@adap: the adapter
11996  *	@port: Port number if per-port device; <0 if not
11997  *	@devid: per-port device ID or absolute device ID
11998  *	@offset: byte offset into device I2C space
11999  *	@len: byte length of I2C space data
12000  *	@buf: buffer in which to return I2C data for read
12001  *	      buffer which holds the I2C data for write
12002  *	@write: if true, do a write; else do a read
12003  *	Reads/Writes the I2C data from/to the indicated device and location.
12004  */
t4_i2c_io(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf,bool write)12005 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
12006 	      int port, unsigned int devid,
12007 	      unsigned int offset, unsigned int len,
12008 	      u8 *buf, bool write)
12009 {
12010 	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
12011 	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
12012 	int ret = 0;
12013 
12014 	if (len > I2C_PAGE_SIZE)
12015 		return -EINVAL;
12016 
12017 	/* Dont allow reads that spans multiple pages */
12018 	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
12019 		return -EINVAL;
12020 
12021 	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
12022 	ldst_cmd.op_to_addrspace =
12023 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
12024 			    F_FW_CMD_REQUEST |
12025 			    (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
12026 			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
12027 	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
12028 	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
12029 	ldst_cmd.u.i2c.did = devid;
12030 
12031 	while (len > 0) {
12032 		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
12033 
12034 		ldst_cmd.u.i2c.boffset = offset;
12035 		ldst_cmd.u.i2c.blen = i2c_len;
12036 
12037 		if (write)
12038 			memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
12039 
12040 		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
12041 				 write ? NULL : &ldst_rpl);
12042 		if (ret)
12043 			break;
12044 
12045 		if (!write)
12046 			memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
12047 		offset += i2c_len;
12048 		buf += i2c_len;
12049 		len -= i2c_len;
12050 	}
12051 
12052 	return ret;
12053 }
12054 
t4_i2c_rd(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)12055 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
12056 	      int port, unsigned int devid,
12057 	      unsigned int offset, unsigned int len,
12058 	      u8 *buf)
12059 {
12060 	return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
12061 }
12062 
t4_i2c_wr(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)12063 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
12064 	      int port, unsigned int devid,
12065 	      unsigned int offset, unsigned int len,
12066 	      u8 *buf)
12067 {
12068 	return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
12069 }
12070 
12071 /**
12072  * 	t4_sge_ctxt_rd - read an SGE context through FW
12073  * 	@adap: the adapter
12074  * 	@mbox: mailbox to use for the FW command
12075  * 	@cid: the context id
12076  * 	@ctype: the context type
12077  * 	@data: where to store the context data
12078  *
12079  * 	Issues a FW command through the given mailbox to read an SGE context.
12080  */
t4_sge_ctxt_rd(struct adapter * adap,unsigned int mbox,unsigned int cid,enum ctxt_type ctype,u32 * data)12081 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
12082 		   enum ctxt_type ctype, u32 *data)
12083 {
12084 	int ret;
12085 	struct fw_ldst_cmd c;
12086 
12087 	if (ctype == CTXT_EGRESS)
12088 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
12089 	else if (ctype == CTXT_INGRESS)
12090 		ret = FW_LDST_ADDRSPC_SGE_INGC;
12091 	else if (ctype == CTXT_FLM)
12092 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
12093 	else
12094 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
12095 
12096 	memset(&c, 0, sizeof(c));
12097 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
12098 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
12099 					V_FW_LDST_CMD_ADDRSPACE(ret));
12100 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
12101 	c.u.idctxt.physid = cpu_to_be32(cid);
12102 
12103 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12104 	if (ret == 0) {
12105 		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
12106 		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
12107 		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
12108 		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
12109 		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
12110 		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
12111 	}
12112 	return ret;
12113 }
12114 
12115 /**
12116  * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
12117  * 	@adap: the adapter
12118  * 	@cid: the context id
12119  * 	@ctype: the context type
12120  * 	@data: where to store the context data
12121  *
12122  * 	Reads an SGE context directly, bypassing FW.  This is only for
12123  * 	debugging when FW is unavailable.
12124  */
t4_sge_ctxt_rd_bd(struct adapter * adap,unsigned int cid,enum ctxt_type ctype,u32 * data)12125 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
12126 		      u32 *data)
12127 {
12128 	int i, ret;
12129 
12130 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
12131 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
12132 	if (!ret)
12133 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
12134 			*data++ = t4_read_reg(adap, i);
12135 	return ret;
12136 }
12137 
t4_sched_config(struct adapter * adapter,int type,int minmaxen)12138 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
12139 {
12140 	struct fw_sched_cmd cmd;
12141 
12142 	memset(&cmd, 0, sizeof(cmd));
12143 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12144 				      F_FW_CMD_REQUEST |
12145 				      F_FW_CMD_WRITE);
12146 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12147 
12148 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
12149 	cmd.u.config.type = type;
12150 	cmd.u.config.minmaxen = minmaxen;
12151 
12152 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12153 			       NULL, 1);
12154 }
12155 
t4_sched_params(struct adapter * adapter,int channel,int cls,int level,int mode,int type,int rateunit,int ratemode,int minrate,int maxrate,int weight,int pktsize,int burstsize)12156 int t4_sched_params(struct adapter *adapter,
12157 		    int channel, int cls,
12158 		    int level, int mode, int type,
12159 		    int rateunit, int ratemode,
12160 		    int minrate, int maxrate, int weight,
12161 		    int pktsize, int burstsize)
12162 {
12163 	struct fw_sched_cmd cmd;
12164 
12165 	memset(&cmd, 0, sizeof(cmd));
12166 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12167 				      F_FW_CMD_REQUEST |
12168 				      F_FW_CMD_WRITE);
12169 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12170 
12171 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12172 	cmd.u.params.type = type;
12173 	cmd.u.params.level = level;
12174 	cmd.u.params.mode = mode;
12175 	cmd.u.params.ch = channel;
12176 	cmd.u.params.cl = cls;
12177 	cmd.u.params.unit = rateunit;
12178 	cmd.u.params.rate = ratemode;
12179 	cmd.u.params.min = cpu_to_be32(minrate);
12180 	cmd.u.params.max = cpu_to_be32(maxrate);
12181 	cmd.u.params.weight = cpu_to_be16(weight);
12182 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
12183 	cmd.u.params.burstsize = cpu_to_be16(burstsize);
12184 
12185 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12186 			       NULL, 1);
12187 }
12188 
t4_read_sched_params(struct adapter * adapter,int channel,int cls,int * level,int * mode,int * type,int * rateunit,int * ratemode,int * minrate,int * maxrate,int * weight,int * pktsize,int * burstsize)12189 int t4_read_sched_params(struct adapter *adapter,
12190 		    int channel, int cls,
12191 		    int *level, int *mode, int *type,
12192 		    int *rateunit, int *ratemode,
12193 		    int *minrate, int *maxrate, int *weight,
12194 		    int *pktsize, int *burstsize)
12195 {
12196 	struct fw_sched_cmd cmd;
12197 	int ret = 0;
12198 
12199 	memset(&cmd, 0, sizeof(cmd));
12200 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12201 				      F_FW_CMD_REQUEST |
12202 				      F_FW_CMD_READ);
12203 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12204 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12205 	cmd.u.params.ch = channel;
12206 	cmd.u.params.cl = cls;
12207 
12208 	ret = t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
12209 			      &cmd, 1);
12210 	if (ret)
12211 		goto out;
12212 
12213 	*level = cmd.u.params.level;
12214 	*mode = cmd.u.params.mode;
12215 	*type = cmd.u.params.type;
12216 	*rateunit = cmd.u.params.unit;
12217 	*ratemode = cmd.u.params.rate;
12218 	*minrate = be32_to_cpu(cmd.u.params.min);
12219 	*maxrate = be32_to_cpu(cmd.u.params.max);
12220 	*weight = be16_to_cpu(cmd.u.params.weight);
12221 	*pktsize = be16_to_cpu(cmd.u.params.pktsize);
12222 	*burstsize = be16_to_cpu(cmd.u.params.burstsize);
12223 
12224 out:
12225 	return ret;
12226 }
12227 
12228 /*
12229  *	t4_config_watchdog - configure (enable/disable) a watchdog timer
12230  *	@adapter: the adapter
12231  * 	@mbox: mailbox to use for the FW command
12232  * 	@pf: the PF owning the queue
12233  * 	@vf: the VF owning the queue
12234  *	@timeout: watchdog timeout in ms
12235  *	@action: watchdog timer / action
12236  *
12237  *	There are separate watchdog timers for each possible watchdog
12238  *	action.  Configure one of the watchdog timers by setting a non-zero
12239  *	timeout.  Disable a watchdog timer by using a timeout of zero.
12240  */
t4_config_watchdog(struct adapter * adapter,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int timeout,unsigned int action)12241 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
12242 		       unsigned int pf, unsigned int vf,
12243 		       unsigned int timeout, unsigned int action)
12244 {
12245 	struct fw_watchdog_cmd wdog;
12246 	unsigned int ticks;
12247 
12248 	/*
12249 	 * The watchdog command expects a timeout in units of 10ms so we need
12250 	 * to convert it here (via rounding) and force a minimum of one 10ms
12251 	 * "tick" if the timeout is non-zero but the convertion results in 0
12252 	 * ticks.
12253 	 */
12254 	ticks = (timeout + 5)/10;
12255 	if (timeout && !ticks)
12256 		ticks = 1;
12257 
12258 	memset(&wdog, 0, sizeof wdog);
12259 	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
12260 				     F_FW_CMD_REQUEST |
12261 				     F_FW_CMD_WRITE |
12262 				     V_FW_PARAMS_CMD_PFN(pf) |
12263 				     V_FW_PARAMS_CMD_VFN(vf));
12264 	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
12265 	wdog.timeout = cpu_to_be32(ticks);
12266 	wdog.action = cpu_to_be32(action);
12267 
12268 	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
12269 }
12270 
t4_get_devlog_level(struct adapter * adapter,unsigned int * level)12271 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
12272 {
12273 	struct fw_devlog_cmd devlog_cmd;
12274 	int ret;
12275 
12276 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12277 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12278 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
12279 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12280 	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12281 			 sizeof(devlog_cmd), &devlog_cmd);
12282 	if (ret)
12283 		return ret;
12284 
12285 	*level = devlog_cmd.level;
12286 	return 0;
12287 }
12288 
t4_set_devlog_level(struct adapter * adapter,unsigned int level)12289 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
12290 {
12291 	struct fw_devlog_cmd devlog_cmd;
12292 
12293 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12294 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12295 					     F_FW_CMD_REQUEST |
12296 					     F_FW_CMD_WRITE);
12297 	devlog_cmd.level = level;
12298 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12299 	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12300 			  sizeof(devlog_cmd), &devlog_cmd);
12301 }
12302 
t4_configure_add_smac(struct adapter * adap)12303 int t4_configure_add_smac(struct adapter *adap)
12304 {
12305 	unsigned int param, val;
12306 	int ret = 0;
12307 
12308 	adap->params.smac_add_support = 0;
12309 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12310 		  V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
12311 	/* Query FW to check if FW supports adding source mac address
12312 	 * to TCAM feature or not.
12313 	 * If FW returns 1, driver can use this feature and driver need to send
12314 	 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
12315 	 * enable adding smac to TCAM.
12316 	 */
12317 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
12318 	if (ret)
12319 		return ret;
12320 
12321 	if (val == 1) {
12322 		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
12323 				    &param, &val);
12324 		if (!ret)
12325 			/* Firmware allows adding explicit TCAM entries.
12326 			 * Save this internally.
12327 			 */
12328 			adap->params.smac_add_support = 1;
12329 	}
12330 
12331 	return ret;
12332 }
12333 
t4_configure_ringbb(struct adapter * adap)12334 int t4_configure_ringbb(struct adapter *adap)
12335 {
12336 	unsigned int param, val;
12337 	int ret = 0;
12338 
12339 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12340 		  V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
12341 	/* Query FW to check if FW supports ring switch feature or not.
12342 	 * If FW returns 1, driver can use this feature and driver need to send
12343 	 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
12344 	 * enable the ring backbone configuration.
12345 	 */
12346 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
12347 	if (ret < 0) {
12348 		CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
12349 			ret);
12350 		goto out;
12351 	}
12352 
12353 	if (val != 1) {
12354 		CH_ERR(adap, "FW doesnot support ringbackbone features\n");
12355 		goto out;
12356 	}
12357 
12358 	ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
12359 	if (ret < 0) {
12360 		CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
12361 			ret);
12362 		goto out;
12363 	}
12364 
12365 out:
12366 	return ret;
12367 }
12368 
12369 /*
12370  *	t4_set_vlan_acl - Set a VLAN id for the specified VF
12371  *	@adapter: the adapter
12372  *	@mbox: mailbox to use for the FW command
12373  *	@vf: one of the VFs instantiated by the specified PF
12374  *	@vlan: The vlanid to be set
12375  *
12376  */
t4_set_vlan_acl(struct adapter * adap,unsigned int mbox,unsigned int vf,u16 vlan)12377 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
12378 		    u16 vlan)
12379 {
12380 	struct fw_acl_vlan_cmd vlan_cmd;
12381 	unsigned int enable;
12382 
12383 	enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
12384 	memset(&vlan_cmd, 0, sizeof(vlan_cmd));
12385 	vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
12386 					 F_FW_CMD_REQUEST |
12387 					 F_FW_CMD_WRITE |
12388 					 F_FW_CMD_EXEC |
12389 					 V_FW_ACL_VLAN_CMD_PFN(adap->pf) |
12390 					 V_FW_ACL_VLAN_CMD_VFN(vf));
12391 	vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
12392 	/* Drop all packets that donot match vlan id */
12393 	vlan_cmd.dropnovlan_fm = (enable
12394 				  ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
12395 				     F_FW_ACL_VLAN_CMD_FM)
12396 				  : 0);
12397 	if (enable != 0) {
12398 		vlan_cmd.nvlan = 1;
12399 		vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
12400 	}
12401 
12402 	return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
12403 }
12404 
12405 /**
12406  *	t4_del_mac - Removes the exact-match filter for a MAC address
12407  *	@adap: the adapter
12408  *	@mbox: mailbox to use for the FW command
12409  *	@viid: the VI id
12410  *	@addr: the MAC address value
12411  *	@smac: if true, delete from only the smac region of MPS
12412  *
12413  *	Modifies an exact-match filter and sets it to the new MAC address if
12414  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
12415  *	latter case the address is added persistently if @persist is %true.
12416  *
12417  *	Returns a negative error number or the index of the filter with the new
12418  *	MAC value.  Note that this index may differ from @idx.
12419  */
t4_del_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,const u8 * addr,bool smac)12420 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12421 	       const u8 *addr, bool smac)
12422 {
12423 	int ret;
12424 	struct fw_vi_mac_cmd c;
12425 	struct fw_vi_mac_exact *p = c.u.exact;
12426 	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12427 
12428 	memset(&c, 0, sizeof(c));
12429 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12430 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12431 				   V_FW_VI_MAC_CMD_VIID(viid));
12432 	c.freemacs_to_len16 = cpu_to_be32(
12433 					V_FW_CMD_LEN16(1) |
12434 					(smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12435 
12436 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
12437 	p->valid_to_idx = cpu_to_be16(
12438 				F_FW_VI_MAC_CMD_VALID |
12439 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
12440 
12441 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12442 	if (ret == 0) {
12443 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12444 		if (ret < max_mac_addr)
12445 			return -ENOMEM;
12446 	}
12447 
12448 	return ret;
12449 }
12450 
12451 /**
12452  *	t4_add_mac - Adds an exact-match filter for a MAC address
12453  *	@adap: the adapter
12454  *	@mbox: mailbox to use for the FW command
12455  *	@viid: the VI id
12456  *	@idx: index of existing filter for old value of MAC address, or -1
12457  *	@addr: the new MAC address value
12458  *	@persist: whether a new MAC allocation should be persistent
12459  *	@add_smt: if true also add the address to the HW SMT
12460  *	@smac: if true, update only the smac region of MPS
12461  *
12462  *	Modifies an exact-match filter and sets it to the new MAC address if
12463  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
12464  *	latter case the address is added persistently if @persist is %true.
12465  *
12466  *	Returns a negative error number or the index of the filter with the new
12467  *	MAC value.  Note that this index may differ from @idx.
12468  */
t4_add_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx,bool smac)12469 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12470 	       int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
12471 {
12472 	int ret, mode;
12473 	struct fw_vi_mac_cmd c;
12474 	struct fw_vi_mac_exact *p = c.u.exact;
12475 	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12476 
12477 	if (idx < 0)		/* new allocation */
12478 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
12479 	mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
12480 
12481 	memset(&c, 0, sizeof(c));
12482 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12483 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12484 				   V_FW_VI_MAC_CMD_VIID(viid));
12485 	c.freemacs_to_len16 = cpu_to_be32(
12486 				V_FW_CMD_LEN16(1) |
12487 				(smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12488 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
12489 				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
12490 				      V_FW_VI_MAC_CMD_IDX(idx));
12491 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
12492 
12493 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12494 	if (ret == 0) {
12495 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12496 		if (ret >= max_mac_addr)
12497 			return -ENOMEM;
12498 		if (smt_idx) {
12499 			/* Does fw supports returning smt_idx? */
12500 			if (adap->params.viid_smt_extn_support)
12501 				*smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
12502 			else {
12503 				/* In T4/T5, SMT contains 256 SMAC entries
12504 				 * organized in 128 rows of 2 entries each.
12505 				 * In T6, SMT contains 256 SMAC entries in
12506 				 * 256 rows.
12507 				 */
12508 				if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
12509 					*smt_idx = ((viid & M_FW_VIID_VIN) << 1);
12510 				else
12511 					*smt_idx = (viid & M_FW_VIID_VIN);
12512 			}
12513 		}
12514 	}
12515 
12516 	return ret;
12517 }
12518 
12519