1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012, 2016, 2025 Chelsio Communications.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30
31 #include <sys/param.h>
32 #include <sys/eventhandler.h>
33
34 #include "common.h"
35 #include "t4_regs.h"
36 #include "t4_regs_values.h"
37 #include "firmware/t4fw_interface.h"
38
39 #undef msleep
40 #define msleep(x) do { \
41 if (cold) \
42 DELAY((x) * 1000); \
43 else \
44 pause("t4hw", (x) * hz / 1000); \
45 } while (0)
46
47 /**
48 * t4_wait_op_done_val - wait until an operation is completed
49 * @adapter: the adapter performing the operation
50 * @reg: the register to check for completion
51 * @mask: a single-bit field within @reg that indicates completion
52 * @polarity: the value of the field when the operation is completed
53 * @attempts: number of check iterations
54 * @delay: delay in usecs between iterations
55 * @valp: where to store the value of the register at completion time
56 *
57 * Wait until an operation is completed by checking a bit in a register
58 * up to @attempts times. If @valp is not NULL the value of the register
59 * at the time it indicated completion is stored there. Returns 0 if the
60 * operation completes and -EAGAIN otherwise.
61 */
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)62 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
63 int polarity, int attempts, int delay, u32 *valp)
64 {
65 while (1) {
66 u32 val = t4_read_reg(adapter, reg);
67
68 if (!!(val & mask) == polarity) {
69 if (valp)
70 *valp = val;
71 return 0;
72 }
73 if (--attempts == 0)
74 return -EAGAIN;
75 if (delay)
76 udelay(delay);
77 }
78 }
79
t4_wait_op_done(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay)80 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
81 int polarity, int attempts, int delay)
82 {
83 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
84 delay, NULL);
85 }
86
87 /**
88 * t7_wait_sram_done - wait until an operation is completed
89 * @adapter: the adapter performing the operation
90 * @reg: the register to check for completion
91 * @result_reg: register that holds the result value
92 * @attempts: number of check iterations
93 * @delay: delay in usecs between iterations
94 * @valp: where to store the value of the result register at completion time
95 *
96 * Waits until a specific bit in @reg is cleared, checking up to
97 * @attempts times.Once the bit is cleared, reads from @result_reg
98 * and stores the value in @valp if it is not NULL. Returns 0 if the
99 * operation completes successfully and -EAGAIN if it times out.
100 */
t7_wait_sram_done(struct adapter * adap,int reg,int result_reg,int attempts,int delay,u32 * valp)101 static int t7_wait_sram_done(struct adapter *adap, int reg, int result_reg,
102 int attempts, int delay, u32 *valp)
103 {
104 while (1) {
105 u32 val = t4_read_reg(adap, reg);
106
107 /* Check if SramStart (bit 19) is cleared */
108 if (!(val & (1 << 19))) {
109 if (valp)
110 *valp = t4_read_reg(adap, result_reg);
111 return 0;
112 }
113
114 if (--attempts == 0)
115 return -EAGAIN;
116
117 if (delay)
118 udelay(delay);
119 }
120 }
121
122 /**
123 * t4_set_reg_field - set a register field to a value
124 * @adapter: the adapter to program
125 * @addr: the register address
126 * @mask: specifies the portion of the register to modify
127 * @val: the new value for the register field
128 *
129 * Sets a register field specified by the supplied mask to the
130 * given value.
131 */
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)132 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
133 u32 val)
134 {
135 u32 v = t4_read_reg(adapter, addr) & ~mask;
136
137 t4_write_reg(adapter, addr, v | val);
138 (void) t4_read_reg(adapter, addr); /* flush */
139 }
140
141 /**
142 * t4_read_indirect - read indirectly addressed registers
143 * @adap: the adapter
144 * @addr_reg: register holding the indirect address
145 * @data_reg: register holding the value of the indirect register
146 * @vals: where the read register values are stored
147 * @nregs: how many indirect registers to read
148 * @start_idx: index of first indirect register to read
149 *
150 * Reads registers that are accessed indirectly through an address/data
151 * register pair.
152 */
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)153 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
154 unsigned int data_reg, u32 *vals,
155 unsigned int nregs, unsigned int start_idx)
156 {
157 while (nregs--) {
158 t4_write_reg(adap, addr_reg, start_idx);
159 *vals++ = t4_read_reg(adap, data_reg);
160 start_idx++;
161 }
162 }
163
164 /**
165 * t4_write_indirect - write indirectly addressed registers
166 * @adap: the adapter
167 * @addr_reg: register holding the indirect addresses
168 * @data_reg: register holding the value for the indirect registers
169 * @vals: values to write
170 * @nregs: how many indirect registers to write
171 * @start_idx: address of first indirect register to write
172 *
173 * Writes a sequential block of registers that are accessed indirectly
174 * through an address/data register pair.
175 */
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)176 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
177 unsigned int data_reg, const u32 *vals,
178 unsigned int nregs, unsigned int start_idx)
179 {
180 while (nregs--) {
181 t4_write_reg(adap, addr_reg, start_idx++);
182 t4_write_reg(adap, data_reg, *vals++);
183 }
184 }
185
186 /*
187 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
188 * mechanism. This guarantees that we get the real value even if we're
189 * operating within a Virtual Machine and the Hypervisor is trapping our
190 * Configuration Space accesses.
191 *
192 * N.B. This routine should only be used as a last resort: the firmware uses
193 * the backdoor registers on a regular basis and we can end up
194 * conflicting with it's uses!
195 */
t4_hw_pci_read_cfg4(adapter_t * adap,int reg)196 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
197 {
198 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
199 u32 val;
200
201 if (chip_id(adap) <= CHELSIO_T5)
202 req |= F_ENABLE;
203 else
204 req |= F_T6_ENABLE;
205
206 if (is_t4(adap))
207 req |= F_LOCALCFG;
208
209 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
210 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
211
212 /*
213 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
214 * Configuration Space read. (None of the other fields matter when
215 * F_ENABLE is 0 so a simple register write is easier than a
216 * read-modify-write via t4_set_reg_field().)
217 */
218 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
219
220 return val;
221 }
222
223 /*
224 * t4_report_fw_error - report firmware error
225 * @adap: the adapter
226 *
227 * The adapter firmware can indicate error conditions to the host.
228 * If the firmware has indicated an error, print out the reason for
229 * the firmware error.
230 */
t4_report_fw_error(struct adapter * adap)231 void t4_report_fw_error(struct adapter *adap)
232 {
233 static const char *const reason[] = {
234 "Crash", /* PCIE_FW_EVAL_CRASH */
235 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
236 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
237 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
238 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
239 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
240 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
241 "Reserved", /* reserved */
242 };
243 u32 pcie_fw;
244
245 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
246 if (pcie_fw & F_PCIE_FW_ERR) {
247 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n",
248 reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw);
249 }
250 }
251
252 /*
253 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
254 */
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)255 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
256 u32 mbox_addr)
257 {
258 for ( ; nflit; nflit--, mbox_addr += 8)
259 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
260 }
261
262 /*
263 * Handle a FW assertion reported in a mailbox.
264 */
fw_asrt(struct adapter * adap,struct fw_debug_cmd * asrt)265 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
266 {
267 CH_ALERT(adap,
268 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
269 asrt->u.assert.filename_0_7,
270 be32_to_cpu(asrt->u.assert.line),
271 be32_to_cpu(asrt->u.assert.x),
272 be32_to_cpu(asrt->u.assert.y));
273 }
274
275 struct port_tx_state {
276 uint64_t rx_pause;
277 uint64_t tx_frames;
278 };
279
280 u32
t4_port_reg(struct adapter * adap,u8 port,u32 reg)281 t4_port_reg(struct adapter *adap, u8 port, u32 reg)
282 {
283 if (chip_id(adap) > CHELSIO_T6)
284 return T7_PORT_REG(port, reg);
285 if (chip_id(adap) > CHELSIO_T4)
286 return T5_PORT_REG(port, reg);
287 return PORT_REG(port, reg);
288 }
289
290 static void
read_tx_state_one(struct adapter * sc,int i,struct port_tx_state * tx_state)291 read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
292 {
293 uint32_t rx_pause_reg, tx_frames_reg;
294
295 rx_pause_reg = t4_port_reg(sc, i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
296 tx_frames_reg = t4_port_reg(sc, i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
297
298 tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
299 tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
300 }
301
302 static void
read_tx_state(struct adapter * sc,struct port_tx_state * tx_state)303 read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
304 {
305 int i;
306
307 for (i = 0; i < MAX_NCHAN; i++) {
308 if (sc->chan_map[i] != 0xff)
309 read_tx_state_one(sc, i, &tx_state[i]);
310 }
311 }
312
313 static void
check_tx_state(struct adapter * sc,struct port_tx_state * tx_state)314 check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
315 {
316 uint32_t port_ctl_reg;
317 uint64_t tx_frames, rx_pause;
318 int i;
319
320 for (i = 0; i < MAX_NCHAN; i++) {
321 if (sc->chan_map[i] == 0xff)
322 continue;
323 rx_pause = tx_state[i].rx_pause;
324 tx_frames = tx_state[i].tx_frames;
325 read_tx_state_one(sc, i, &tx_state[i]); /* update */
326
327 port_ctl_reg = t4_port_reg(sc, i, A_MPS_PORT_CTL);
328 if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
329 rx_pause != tx_state[i].rx_pause &&
330 tx_frames == tx_state[i].tx_frames) {
331 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
332 mdelay(1);
333 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
334 }
335 }
336 }
337
338 #define X_CIM_PF_NOACCESS 0xeeeeeeee
339 /**
340 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
341 * @adap: the adapter
342 * @mbox: index of the mailbox to use
343 * @cmd: the command to write
344 * @size: command length in bytes
345 * @rpl: where to optionally store the reply
346 * @sleep_ok: if true we may sleep while awaiting command completion
347 * @timeout: time to wait for command to finish before timing out
348 * (negative implies @sleep_ok=false)
349 *
350 * Sends the given command to FW through the selected mailbox and waits
351 * for the FW to execute the command. If @rpl is not %NULL it is used to
352 * store the FW's reply to the command. The command and its optional
353 * reply are of the same length. Some FW commands like RESET and
354 * INITIALIZE can take a considerable amount of time to execute.
355 * @sleep_ok determines whether we may sleep while awaiting the response.
356 * If sleeping is allowed we use progressive backoff otherwise we spin.
357 * Note that passing in a negative @timeout is an alternate mechanism
358 * for specifying @sleep_ok=false. This is useful when a higher level
359 * interface allows for specification of @timeout but not @sleep_ok ...
360 *
361 * The return value is 0 on success or a negative errno on failure. A
362 * failure can happen either because we are not able to execute the
363 * command or FW executes it but signals an error. In the latter case
364 * the return value is the error code indicated by FW (negated).
365 */
t4_wr_mbox_meat_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok,int timeout)366 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
367 int size, void *rpl, bool sleep_ok, int timeout)
368 {
369 /*
370 * We delay in small increments at first in an effort to maintain
371 * responsiveness for simple, fast executing commands but then back
372 * off to larger delays to a maximum retry delay.
373 */
374 static const int delay[] = {
375 1, 1, 3, 5, 10, 10, 20, 50, 100
376 };
377 u32 v;
378 u64 res;
379 int i, ms, delay_idx, ret, next_tx_check;
380 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
381 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
382 u32 ctl;
383 __be64 cmd_rpl[MBOX_LEN/8];
384 u32 pcie_fw;
385 struct port_tx_state tx_state[MAX_NPORTS];
386
387 if (adap->flags & CHK_MBOX_ACCESS)
388 ASSERT_SYNCHRONIZED_OP(adap);
389
390 if (size <= 0 || (size & 15) || size > MBOX_LEN)
391 return -EINVAL;
392
393 if (adap->flags & IS_VF) {
394 if (chip_id(adap) >= CHELSIO_T6)
395 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
396 else
397 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
398 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
399 }
400
401 /*
402 * If we have a negative timeout, that implies that we can't sleep.
403 */
404 if (timeout < 0) {
405 sleep_ok = false;
406 timeout = -timeout;
407 }
408
409 /*
410 * Attempt to gain access to the mailbox.
411 */
412 pcie_fw = 0;
413 if (!(adap->flags & IS_VF)) {
414 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
415 if (pcie_fw & F_PCIE_FW_ERR)
416 goto failed;
417 }
418 for (i = 0; i < 4; i++) {
419 ctl = t4_read_reg(adap, ctl_reg);
420 v = G_MBOWNER(ctl);
421 if (v != X_MBOWNER_NONE)
422 break;
423 }
424
425 /*
426 * If we were unable to gain access, report the error to our caller.
427 */
428 if (v != X_MBOWNER_PL) {
429 if (!(adap->flags & IS_VF)) {
430 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
431 if (pcie_fw & F_PCIE_FW_ERR)
432 goto failed;
433 }
434 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
435 return ret;
436 }
437
438 /*
439 * If we gain ownership of the mailbox and there's a "valid" message
440 * in it, this is likely an asynchronous error message from the
441 * firmware. So we'll report that and then proceed on with attempting
442 * to issue our own command ... which may well fail if the error
443 * presaged the firmware crashing ...
444 */
445 if (ctl & F_MBMSGVALID) {
446 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true);
447 }
448
449 /*
450 * Copy in the new mailbox command and send it on its way ...
451 */
452 memset(cmd_rpl, 0, sizeof(cmd_rpl));
453 memcpy(cmd_rpl, cmd, size);
454 CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false);
455 for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++)
456 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i]));
457
458 if (adap->flags & IS_VF) {
459 /*
460 * For the VFs, the Mailbox Data "registers" are
461 * actually backed by T4's "MA" interface rather than
462 * PL Registers (as is the case for the PFs). Because
463 * these are in different coherency domains, the write
464 * to the VF's PL-register-backed Mailbox Control can
465 * race in front of the writes to the MA-backed VF
466 * Mailbox Data "registers". So we need to do a
467 * read-back on at least one byte of the VF Mailbox
468 * Data registers before doing the write to the VF
469 * Mailbox Control register.
470 */
471 t4_read_reg(adap, data_reg);
472 }
473
474 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
475 read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */
476 next_tx_check = 1000;
477 delay_idx = 0;
478 ms = delay[0];
479
480 /*
481 * Loop waiting for the reply; bail out if we time out or the firmware
482 * reports an error.
483 */
484 for (i = 0; i < timeout; i += ms) {
485 if (!(adap->flags & IS_VF)) {
486 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
487 if (pcie_fw & F_PCIE_FW_ERR)
488 break;
489 }
490
491 if (i >= next_tx_check) {
492 check_tx_state(adap, &tx_state[0]);
493 next_tx_check = i + 1000;
494 }
495
496 if (sleep_ok) {
497 ms = delay[delay_idx]; /* last element may repeat */
498 if (delay_idx < ARRAY_SIZE(delay) - 1)
499 delay_idx++;
500 msleep(ms);
501 } else {
502 mdelay(ms);
503 }
504
505 v = t4_read_reg(adap, ctl_reg);
506 if (v == X_CIM_PF_NOACCESS)
507 continue;
508 if (G_MBOWNER(v) == X_MBOWNER_PL) {
509 if (!(v & F_MBMSGVALID)) {
510 t4_write_reg(adap, ctl_reg,
511 V_MBOWNER(X_MBOWNER_NONE));
512 continue;
513 }
514
515 /*
516 * Retrieve the command reply and release the mailbox.
517 */
518 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
519 CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false);
520 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
521
522 res = be64_to_cpu(cmd_rpl[0]);
523 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
524 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
525 res = V_FW_CMD_RETVAL(EIO);
526 } else if (rpl)
527 memcpy(rpl, cmd_rpl, size);
528 return -G_FW_CMD_RETVAL((int)res);
529 }
530 }
531
532 /*
533 * We timed out waiting for a reply to our mailbox command. Report
534 * the error and also check to see if the firmware reported any
535 * errors ...
536 */
537 CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n",
538 *(const u8 *)cmd, mbox, pcie_fw);
539 CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true);
540 CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true);
541 failed:
542 adap->flags &= ~FW_OK;
543 ret = pcie_fw & F_PCIE_FW_ERR ? -ENXIO : -ETIMEDOUT;
544 t4_fatal_err(adap, true);
545 return ret;
546 }
547
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)548 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
549 void *rpl, bool sleep_ok)
550 {
551 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
552 sleep_ok, FW_CMD_MAX_TIMEOUT);
553 }
554
t4_edc_err_read(struct adapter * adap,int idx)555 static int t4_edc_err_read(struct adapter *adap, int idx)
556 {
557 u32 edc_ecc_err_addr_reg;
558 u32 edc_bist_status_rdata_reg;
559
560 if (is_t4(adap)) {
561 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
562 return 0;
563 }
564 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
565 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
566 return 0;
567 }
568
569 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
570 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
571
572 CH_WARN(adap,
573 " edc%d err addr 0x%x: 0x%x.\n",
574 idx, edc_ecc_err_addr_reg,
575 t4_read_reg(adap, edc_ecc_err_addr_reg));
576 CH_WARN(adap,
577 " bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
578 edc_bist_status_rdata_reg,
579 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
580 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
581 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
582 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
583 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
584 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
585 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
586 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
587 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
588
589 return 0;
590 }
591
592 /**
593 * t4_mc_read - read from MC through backdoor accesses
594 * @adap: the adapter
595 * @idx: which MC to access
596 * @addr: address of first byte requested
597 * @data: 64 bytes of data containing the requested address
598 * @ecc: where to store the corresponding 64-bit ECC word
599 *
600 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
601 * that covers the requested address @addr. If @parity is not %NULL it
602 * is assigned the 64-bit ECC word for the read data.
603 */
t4_mc_read(struct adapter * adap,int idx,u32 addr,__be32 * data,u64 * ecc)604 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
605 {
606 int i;
607 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
608 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
609
610 if (is_t4(adap)) {
611 mc_bist_cmd_reg = A_MC_BIST_CMD;
612 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
613 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
614 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
615 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
616 } else if (chip_id(adap) < CHELSIO_T7) {
617 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
618 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
619 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
620 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, idx);
621 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, idx);
622 } else {
623 /* Need to figure out split mode and the rest. */
624 return (-ENOTSUP);
625 }
626
627 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
628 return -EBUSY;
629 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
630 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
631 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
632 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
633 F_START_BIST | V_BIST_CMD_GAP(1));
634 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
635 if (i)
636 return i;
637
638 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
639
640 for (i = 15; i >= 0; i--)
641 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
642 if (ecc)
643 *ecc = t4_read_reg64(adap, MC_DATA(16));
644 #undef MC_DATA
645 return 0;
646 }
647
648 /**
649 * t4_edc_read - read from EDC through backdoor accesses
650 * @adap: the adapter
651 * @idx: which EDC to access
652 * @addr: address of first byte requested
653 * @data: 64 bytes of data containing the requested address
654 * @ecc: where to store the corresponding 64-bit ECC word
655 *
656 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
657 * that covers the requested address @addr. If @parity is not %NULL it
658 * is assigned the 64-bit ECC word for the read data.
659 */
t4_edc_read(struct adapter * adap,int idx,u32 addr,__be32 * data,u64 * ecc)660 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
661 {
662 int i;
663 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
664 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
665
666 if (is_t4(adap)) {
667 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
668 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
669 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
670 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
671 idx);
672 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
673 idx);
674 } else {
675 edc_bist_cmd_reg = EDC_T5_REG(A_EDC_H_BIST_CMD, idx);
676 edc_bist_cmd_addr_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_ADDR, idx);
677 edc_bist_cmd_len_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_LEN, idx);
678 edc_bist_cmd_data_pattern = EDC_T5_REG(A_EDC_H_BIST_DATA_PATTERN,
679 idx);
680 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA,
681 idx);
682 }
683
684 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
685 return -EBUSY;
686 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
687 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
688 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
689 t4_write_reg(adap, edc_bist_cmd_reg,
690 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
691 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
692 if (i)
693 return i;
694
695 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
696
697 for (i = 15; i >= 0; i--)
698 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
699 if (ecc)
700 *ecc = t4_read_reg64(adap, EDC_DATA(16));
701 #undef EDC_DATA
702 return 0;
703 }
704
705 /**
706 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
707 * @adap: the adapter
708 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
709 * @addr: address within indicated memory type
710 * @len: amount of memory to read
711 * @buf: host memory buffer
712 *
713 * Reads an [almost] arbitrary memory region in the firmware: the
714 * firmware memory address, length and host buffer must be aligned on
715 * 32-bit boudaries. The memory is returned as a raw byte sequence from
716 * the firmware's memory. If this memory contains data structures which
717 * contain multi-byte integers, it's the callers responsibility to
718 * perform appropriate byte order conversions.
719 */
t4_mem_read(struct adapter * adap,int mtype,u32 addr,u32 len,__be32 * buf)720 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
721 __be32 *buf)
722 {
723 u32 pos, start, end, offset;
724 int ret;
725
726 /*
727 * Argument sanity checks ...
728 */
729 if ((addr & 0x3) || (len & 0x3))
730 return -EINVAL;
731
732 /*
733 * The underlaying EDC/MC read routines read 64 bytes at a time so we
734 * need to round down the start and round up the end. We'll start
735 * copying out of the first line at (addr - start) a word at a time.
736 */
737 start = rounddown2(addr, 64);
738 end = roundup2(addr + len, 64);
739 offset = (addr - start)/sizeof(__be32);
740
741 for (pos = start; pos < end; pos += 64, offset = 0) {
742 __be32 data[16];
743
744 /*
745 * Read the chip's memory block and bail if there's an error.
746 */
747 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
748 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
749 else
750 ret = t4_edc_read(adap, mtype, pos, data, NULL);
751 if (ret)
752 return ret;
753
754 /*
755 * Copy the data into the caller's memory buffer.
756 */
757 while (offset < 16 && len > 0) {
758 *buf++ = data[offset++];
759 len -= sizeof(__be32);
760 }
761 }
762
763 return 0;
764 }
765
766 /*
767 * Return the specified PCI-E Configuration Space register from our Physical
768 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
769 * since we prefer to let the firmware own all of these registers, but if that
770 * fails we go for it directly ourselves.
771 */
t4_read_pcie_cfg4(struct adapter * adap,int reg,int drv_fw_attach)772 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
773 {
774
775 /*
776 * If fw_attach != 0, construct and send the Firmware LDST Command to
777 * retrieve the specified PCI-E Configuration Space register.
778 */
779 if (drv_fw_attach != 0) {
780 struct fw_ldst_cmd ldst_cmd;
781 int ret;
782
783 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
784 ldst_cmd.op_to_addrspace =
785 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
786 F_FW_CMD_REQUEST |
787 F_FW_CMD_READ |
788 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
789 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
790 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
791 ldst_cmd.u.pcie.ctrl_to_fn =
792 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
793 ldst_cmd.u.pcie.r = reg;
794
795 /*
796 * If the LDST Command succeeds, return the result, otherwise
797 * fall through to reading it directly ourselves ...
798 */
799 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
800 &ldst_cmd);
801 if (ret == 0)
802 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
803
804 CH_WARN(adap, "Firmware failed to return "
805 "Configuration Space register %d, err = %d\n",
806 reg, -ret);
807 }
808
809 /*
810 * Read the desired Configuration Space register via the PCI-E
811 * Backdoor mechanism.
812 */
813 return t4_hw_pci_read_cfg4(adap, reg);
814 }
815
816 /**
817 * t4_get_regs_len - return the size of the chips register set
818 * @adapter: the adapter
819 *
820 * Returns the size of the chip's BAR0 register space.
821 */
t4_get_regs_len(struct adapter * adapter)822 unsigned int t4_get_regs_len(struct adapter *adapter)
823 {
824 unsigned int chip_version = chip_id(adapter);
825
826 switch (chip_version) {
827 case CHELSIO_T4:
828 if (adapter->flags & IS_VF)
829 return FW_T4VF_REGMAP_SIZE;
830 return T4_REGMAP_SIZE;
831
832 case CHELSIO_T5:
833 case CHELSIO_T6:
834 case CHELSIO_T7:
835 if (adapter->flags & IS_VF)
836 return FW_T4VF_REGMAP_SIZE;
837 return T5_REGMAP_SIZE;
838 }
839
840 CH_ERR(adapter,
841 "Unsupported chip version %d\n", chip_version);
842 return 0;
843 }
844
845 /**
846 * t4_get_regs - read chip registers into provided buffer
847 * @adap: the adapter
848 * @buf: register buffer
849 * @buf_size: size (in bytes) of register buffer
850 *
851 * If the provided register buffer isn't large enough for the chip's
852 * full register range, the register dump will be truncated to the
853 * register buffer's size.
854 */
t4_get_regs(struct adapter * adap,u8 * buf,size_t buf_size)855 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
856 {
857 static const unsigned int t4_reg_ranges[] = {
858 0x1008, 0x1108,
859 0x1180, 0x1184,
860 0x1190, 0x1194,
861 0x11a0, 0x11a4,
862 0x11b0, 0x11b4,
863 0x11fc, 0x123c,
864 0x1300, 0x173c,
865 0x1800, 0x18fc,
866 0x3000, 0x30d8,
867 0x30e0, 0x30e4,
868 0x30ec, 0x5910,
869 0x5920, 0x5924,
870 0x5960, 0x5960,
871 0x5968, 0x5968,
872 0x5970, 0x5970,
873 0x5978, 0x5978,
874 0x5980, 0x5980,
875 0x5988, 0x5988,
876 0x5990, 0x5990,
877 0x5998, 0x5998,
878 0x59a0, 0x59d4,
879 0x5a00, 0x5ae0,
880 0x5ae8, 0x5ae8,
881 0x5af0, 0x5af0,
882 0x5af8, 0x5af8,
883 0x6000, 0x6098,
884 0x6100, 0x6150,
885 0x6200, 0x6208,
886 0x6240, 0x6248,
887 0x6280, 0x62b0,
888 0x62c0, 0x6338,
889 0x6370, 0x638c,
890 0x6400, 0x643c,
891 0x6500, 0x6524,
892 0x6a00, 0x6a04,
893 0x6a14, 0x6a38,
894 0x6a60, 0x6a70,
895 0x6a78, 0x6a78,
896 0x6b00, 0x6b0c,
897 0x6b1c, 0x6b84,
898 0x6bf0, 0x6bf8,
899 0x6c00, 0x6c0c,
900 0x6c1c, 0x6c84,
901 0x6cf0, 0x6cf8,
902 0x6d00, 0x6d0c,
903 0x6d1c, 0x6d84,
904 0x6df0, 0x6df8,
905 0x6e00, 0x6e0c,
906 0x6e1c, 0x6e84,
907 0x6ef0, 0x6ef8,
908 0x6f00, 0x6f0c,
909 0x6f1c, 0x6f84,
910 0x6ff0, 0x6ff8,
911 0x7000, 0x700c,
912 0x701c, 0x7084,
913 0x70f0, 0x70f8,
914 0x7100, 0x710c,
915 0x711c, 0x7184,
916 0x71f0, 0x71f8,
917 0x7200, 0x720c,
918 0x721c, 0x7284,
919 0x72f0, 0x72f8,
920 0x7300, 0x730c,
921 0x731c, 0x7384,
922 0x73f0, 0x73f8,
923 0x7400, 0x7450,
924 0x7500, 0x7530,
925 0x7600, 0x760c,
926 0x7614, 0x761c,
927 0x7680, 0x76cc,
928 0x7700, 0x7798,
929 0x77c0, 0x77fc,
930 0x7900, 0x79fc,
931 0x7b00, 0x7b58,
932 0x7b60, 0x7b84,
933 0x7b8c, 0x7c38,
934 0x7d00, 0x7d38,
935 0x7d40, 0x7d80,
936 0x7d8c, 0x7ddc,
937 0x7de4, 0x7e04,
938 0x7e10, 0x7e1c,
939 0x7e24, 0x7e38,
940 0x7e40, 0x7e44,
941 0x7e4c, 0x7e78,
942 0x7e80, 0x7ea4,
943 0x7eac, 0x7edc,
944 0x7ee8, 0x7efc,
945 0x8dc0, 0x8e04,
946 0x8e10, 0x8e1c,
947 0x8e30, 0x8e78,
948 0x8ea0, 0x8eb8,
949 0x8ec0, 0x8f6c,
950 0x8fc0, 0x9008,
951 0x9010, 0x9058,
952 0x9060, 0x9060,
953 0x9068, 0x9074,
954 0x90fc, 0x90fc,
955 0x9400, 0x9408,
956 0x9410, 0x9458,
957 0x9600, 0x9600,
958 0x9608, 0x9638,
959 0x9640, 0x96bc,
960 0x9800, 0x9808,
961 0x9820, 0x983c,
962 0x9850, 0x9864,
963 0x9c00, 0x9c6c,
964 0x9c80, 0x9cec,
965 0x9d00, 0x9d6c,
966 0x9d80, 0x9dec,
967 0x9e00, 0x9e6c,
968 0x9e80, 0x9eec,
969 0x9f00, 0x9f6c,
970 0x9f80, 0x9fec,
971 0xd004, 0xd004,
972 0xd010, 0xd03c,
973 0xdfc0, 0xdfe0,
974 0xe000, 0xea7c,
975 0xf000, 0x11110,
976 0x11118, 0x11190,
977 0x19040, 0x1906c,
978 0x19078, 0x19080,
979 0x1908c, 0x190e4,
980 0x190f0, 0x190f8,
981 0x19100, 0x19110,
982 0x19120, 0x19124,
983 0x19150, 0x19194,
984 0x1919c, 0x191b0,
985 0x191d0, 0x191e8,
986 0x19238, 0x1924c,
987 0x193f8, 0x1943c,
988 0x1944c, 0x19474,
989 0x19490, 0x194e0,
990 0x194f0, 0x194f8,
991 0x19800, 0x19c08,
992 0x19c10, 0x19c90,
993 0x19ca0, 0x19ce4,
994 0x19cf0, 0x19d40,
995 0x19d50, 0x19d94,
996 0x19da0, 0x19de8,
997 0x19df0, 0x19e40,
998 0x19e50, 0x19e90,
999 0x19ea0, 0x19f4c,
1000 0x1a000, 0x1a004,
1001 0x1a010, 0x1a06c,
1002 0x1a0b0, 0x1a0e4,
1003 0x1a0ec, 0x1a0f4,
1004 0x1a100, 0x1a108,
1005 0x1a114, 0x1a120,
1006 0x1a128, 0x1a130,
1007 0x1a138, 0x1a138,
1008 0x1a190, 0x1a1c4,
1009 0x1a1fc, 0x1a1fc,
1010 0x1e040, 0x1e04c,
1011 0x1e284, 0x1e28c,
1012 0x1e2c0, 0x1e2c0,
1013 0x1e2e0, 0x1e2e0,
1014 0x1e300, 0x1e384,
1015 0x1e3c0, 0x1e3c8,
1016 0x1e440, 0x1e44c,
1017 0x1e684, 0x1e68c,
1018 0x1e6c0, 0x1e6c0,
1019 0x1e6e0, 0x1e6e0,
1020 0x1e700, 0x1e784,
1021 0x1e7c0, 0x1e7c8,
1022 0x1e840, 0x1e84c,
1023 0x1ea84, 0x1ea8c,
1024 0x1eac0, 0x1eac0,
1025 0x1eae0, 0x1eae0,
1026 0x1eb00, 0x1eb84,
1027 0x1ebc0, 0x1ebc8,
1028 0x1ec40, 0x1ec4c,
1029 0x1ee84, 0x1ee8c,
1030 0x1eec0, 0x1eec0,
1031 0x1eee0, 0x1eee0,
1032 0x1ef00, 0x1ef84,
1033 0x1efc0, 0x1efc8,
1034 0x1f040, 0x1f04c,
1035 0x1f284, 0x1f28c,
1036 0x1f2c0, 0x1f2c0,
1037 0x1f2e0, 0x1f2e0,
1038 0x1f300, 0x1f384,
1039 0x1f3c0, 0x1f3c8,
1040 0x1f440, 0x1f44c,
1041 0x1f684, 0x1f68c,
1042 0x1f6c0, 0x1f6c0,
1043 0x1f6e0, 0x1f6e0,
1044 0x1f700, 0x1f784,
1045 0x1f7c0, 0x1f7c8,
1046 0x1f840, 0x1f84c,
1047 0x1fa84, 0x1fa8c,
1048 0x1fac0, 0x1fac0,
1049 0x1fae0, 0x1fae0,
1050 0x1fb00, 0x1fb84,
1051 0x1fbc0, 0x1fbc8,
1052 0x1fc40, 0x1fc4c,
1053 0x1fe84, 0x1fe8c,
1054 0x1fec0, 0x1fec0,
1055 0x1fee0, 0x1fee0,
1056 0x1ff00, 0x1ff84,
1057 0x1ffc0, 0x1ffc8,
1058 0x20000, 0x2002c,
1059 0x20100, 0x2013c,
1060 0x20190, 0x201a0,
1061 0x201a8, 0x201b8,
1062 0x201c4, 0x201c8,
1063 0x20200, 0x20318,
1064 0x20400, 0x204b4,
1065 0x204c0, 0x20528,
1066 0x20540, 0x20614,
1067 0x21000, 0x21040,
1068 0x2104c, 0x21060,
1069 0x210c0, 0x210ec,
1070 0x21200, 0x21268,
1071 0x21270, 0x21284,
1072 0x212fc, 0x21388,
1073 0x21400, 0x21404,
1074 0x21500, 0x21500,
1075 0x21510, 0x21518,
1076 0x2152c, 0x21530,
1077 0x2153c, 0x2153c,
1078 0x21550, 0x21554,
1079 0x21600, 0x21600,
1080 0x21608, 0x2161c,
1081 0x21624, 0x21628,
1082 0x21630, 0x21634,
1083 0x2163c, 0x2163c,
1084 0x21700, 0x2171c,
1085 0x21780, 0x2178c,
1086 0x21800, 0x21818,
1087 0x21820, 0x21828,
1088 0x21830, 0x21848,
1089 0x21850, 0x21854,
1090 0x21860, 0x21868,
1091 0x21870, 0x21870,
1092 0x21878, 0x21898,
1093 0x218a0, 0x218a8,
1094 0x218b0, 0x218c8,
1095 0x218d0, 0x218d4,
1096 0x218e0, 0x218e8,
1097 0x218f0, 0x218f0,
1098 0x218f8, 0x21a18,
1099 0x21a20, 0x21a28,
1100 0x21a30, 0x21a48,
1101 0x21a50, 0x21a54,
1102 0x21a60, 0x21a68,
1103 0x21a70, 0x21a70,
1104 0x21a78, 0x21a98,
1105 0x21aa0, 0x21aa8,
1106 0x21ab0, 0x21ac8,
1107 0x21ad0, 0x21ad4,
1108 0x21ae0, 0x21ae8,
1109 0x21af0, 0x21af0,
1110 0x21af8, 0x21c18,
1111 0x21c20, 0x21c20,
1112 0x21c28, 0x21c30,
1113 0x21c38, 0x21c38,
1114 0x21c80, 0x21c98,
1115 0x21ca0, 0x21ca8,
1116 0x21cb0, 0x21cc8,
1117 0x21cd0, 0x21cd4,
1118 0x21ce0, 0x21ce8,
1119 0x21cf0, 0x21cf0,
1120 0x21cf8, 0x21d7c,
1121 0x21e00, 0x21e04,
1122 0x22000, 0x2202c,
1123 0x22100, 0x2213c,
1124 0x22190, 0x221a0,
1125 0x221a8, 0x221b8,
1126 0x221c4, 0x221c8,
1127 0x22200, 0x22318,
1128 0x22400, 0x224b4,
1129 0x224c0, 0x22528,
1130 0x22540, 0x22614,
1131 0x23000, 0x23040,
1132 0x2304c, 0x23060,
1133 0x230c0, 0x230ec,
1134 0x23200, 0x23268,
1135 0x23270, 0x23284,
1136 0x232fc, 0x23388,
1137 0x23400, 0x23404,
1138 0x23500, 0x23500,
1139 0x23510, 0x23518,
1140 0x2352c, 0x23530,
1141 0x2353c, 0x2353c,
1142 0x23550, 0x23554,
1143 0x23600, 0x23600,
1144 0x23608, 0x2361c,
1145 0x23624, 0x23628,
1146 0x23630, 0x23634,
1147 0x2363c, 0x2363c,
1148 0x23700, 0x2371c,
1149 0x23780, 0x2378c,
1150 0x23800, 0x23818,
1151 0x23820, 0x23828,
1152 0x23830, 0x23848,
1153 0x23850, 0x23854,
1154 0x23860, 0x23868,
1155 0x23870, 0x23870,
1156 0x23878, 0x23898,
1157 0x238a0, 0x238a8,
1158 0x238b0, 0x238c8,
1159 0x238d0, 0x238d4,
1160 0x238e0, 0x238e8,
1161 0x238f0, 0x238f0,
1162 0x238f8, 0x23a18,
1163 0x23a20, 0x23a28,
1164 0x23a30, 0x23a48,
1165 0x23a50, 0x23a54,
1166 0x23a60, 0x23a68,
1167 0x23a70, 0x23a70,
1168 0x23a78, 0x23a98,
1169 0x23aa0, 0x23aa8,
1170 0x23ab0, 0x23ac8,
1171 0x23ad0, 0x23ad4,
1172 0x23ae0, 0x23ae8,
1173 0x23af0, 0x23af0,
1174 0x23af8, 0x23c18,
1175 0x23c20, 0x23c20,
1176 0x23c28, 0x23c30,
1177 0x23c38, 0x23c38,
1178 0x23c80, 0x23c98,
1179 0x23ca0, 0x23ca8,
1180 0x23cb0, 0x23cc8,
1181 0x23cd0, 0x23cd4,
1182 0x23ce0, 0x23ce8,
1183 0x23cf0, 0x23cf0,
1184 0x23cf8, 0x23d7c,
1185 0x23e00, 0x23e04,
1186 0x24000, 0x2402c,
1187 0x24100, 0x2413c,
1188 0x24190, 0x241a0,
1189 0x241a8, 0x241b8,
1190 0x241c4, 0x241c8,
1191 0x24200, 0x24318,
1192 0x24400, 0x244b4,
1193 0x244c0, 0x24528,
1194 0x24540, 0x24614,
1195 0x25000, 0x25040,
1196 0x2504c, 0x25060,
1197 0x250c0, 0x250ec,
1198 0x25200, 0x25268,
1199 0x25270, 0x25284,
1200 0x252fc, 0x25388,
1201 0x25400, 0x25404,
1202 0x25500, 0x25500,
1203 0x25510, 0x25518,
1204 0x2552c, 0x25530,
1205 0x2553c, 0x2553c,
1206 0x25550, 0x25554,
1207 0x25600, 0x25600,
1208 0x25608, 0x2561c,
1209 0x25624, 0x25628,
1210 0x25630, 0x25634,
1211 0x2563c, 0x2563c,
1212 0x25700, 0x2571c,
1213 0x25780, 0x2578c,
1214 0x25800, 0x25818,
1215 0x25820, 0x25828,
1216 0x25830, 0x25848,
1217 0x25850, 0x25854,
1218 0x25860, 0x25868,
1219 0x25870, 0x25870,
1220 0x25878, 0x25898,
1221 0x258a0, 0x258a8,
1222 0x258b0, 0x258c8,
1223 0x258d0, 0x258d4,
1224 0x258e0, 0x258e8,
1225 0x258f0, 0x258f0,
1226 0x258f8, 0x25a18,
1227 0x25a20, 0x25a28,
1228 0x25a30, 0x25a48,
1229 0x25a50, 0x25a54,
1230 0x25a60, 0x25a68,
1231 0x25a70, 0x25a70,
1232 0x25a78, 0x25a98,
1233 0x25aa0, 0x25aa8,
1234 0x25ab0, 0x25ac8,
1235 0x25ad0, 0x25ad4,
1236 0x25ae0, 0x25ae8,
1237 0x25af0, 0x25af0,
1238 0x25af8, 0x25c18,
1239 0x25c20, 0x25c20,
1240 0x25c28, 0x25c30,
1241 0x25c38, 0x25c38,
1242 0x25c80, 0x25c98,
1243 0x25ca0, 0x25ca8,
1244 0x25cb0, 0x25cc8,
1245 0x25cd0, 0x25cd4,
1246 0x25ce0, 0x25ce8,
1247 0x25cf0, 0x25cf0,
1248 0x25cf8, 0x25d7c,
1249 0x25e00, 0x25e04,
1250 0x26000, 0x2602c,
1251 0x26100, 0x2613c,
1252 0x26190, 0x261a0,
1253 0x261a8, 0x261b8,
1254 0x261c4, 0x261c8,
1255 0x26200, 0x26318,
1256 0x26400, 0x264b4,
1257 0x264c0, 0x26528,
1258 0x26540, 0x26614,
1259 0x27000, 0x27040,
1260 0x2704c, 0x27060,
1261 0x270c0, 0x270ec,
1262 0x27200, 0x27268,
1263 0x27270, 0x27284,
1264 0x272fc, 0x27388,
1265 0x27400, 0x27404,
1266 0x27500, 0x27500,
1267 0x27510, 0x27518,
1268 0x2752c, 0x27530,
1269 0x2753c, 0x2753c,
1270 0x27550, 0x27554,
1271 0x27600, 0x27600,
1272 0x27608, 0x2761c,
1273 0x27624, 0x27628,
1274 0x27630, 0x27634,
1275 0x2763c, 0x2763c,
1276 0x27700, 0x2771c,
1277 0x27780, 0x2778c,
1278 0x27800, 0x27818,
1279 0x27820, 0x27828,
1280 0x27830, 0x27848,
1281 0x27850, 0x27854,
1282 0x27860, 0x27868,
1283 0x27870, 0x27870,
1284 0x27878, 0x27898,
1285 0x278a0, 0x278a8,
1286 0x278b0, 0x278c8,
1287 0x278d0, 0x278d4,
1288 0x278e0, 0x278e8,
1289 0x278f0, 0x278f0,
1290 0x278f8, 0x27a18,
1291 0x27a20, 0x27a28,
1292 0x27a30, 0x27a48,
1293 0x27a50, 0x27a54,
1294 0x27a60, 0x27a68,
1295 0x27a70, 0x27a70,
1296 0x27a78, 0x27a98,
1297 0x27aa0, 0x27aa8,
1298 0x27ab0, 0x27ac8,
1299 0x27ad0, 0x27ad4,
1300 0x27ae0, 0x27ae8,
1301 0x27af0, 0x27af0,
1302 0x27af8, 0x27c18,
1303 0x27c20, 0x27c20,
1304 0x27c28, 0x27c30,
1305 0x27c38, 0x27c38,
1306 0x27c80, 0x27c98,
1307 0x27ca0, 0x27ca8,
1308 0x27cb0, 0x27cc8,
1309 0x27cd0, 0x27cd4,
1310 0x27ce0, 0x27ce8,
1311 0x27cf0, 0x27cf0,
1312 0x27cf8, 0x27d7c,
1313 0x27e00, 0x27e04,
1314 };
1315
1316 static const unsigned int t4vf_reg_ranges[] = {
1317 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1318 VF_MPS_REG(A_MPS_VF_CTL),
1319 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1320 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1321 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1322 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1323 FW_T4VF_MBDATA_BASE_ADDR,
1324 FW_T4VF_MBDATA_BASE_ADDR +
1325 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1326 };
1327
1328 static const unsigned int t5_reg_ranges[] = {
1329 0x1008, 0x10c0,
1330 0x10cc, 0x10f8,
1331 0x1100, 0x1100,
1332 0x110c, 0x1148,
1333 0x1180, 0x1184,
1334 0x1190, 0x1194,
1335 0x11a0, 0x11a4,
1336 0x11b0, 0x11b4,
1337 0x11fc, 0x123c,
1338 0x1280, 0x173c,
1339 0x1800, 0x18fc,
1340 0x3000, 0x3028,
1341 0x3060, 0x30b0,
1342 0x30b8, 0x30d8,
1343 0x30e0, 0x30fc,
1344 0x3140, 0x357c,
1345 0x35a8, 0x35cc,
1346 0x35ec, 0x35ec,
1347 0x3600, 0x5624,
1348 0x56cc, 0x56ec,
1349 0x56f4, 0x5720,
1350 0x5728, 0x575c,
1351 0x580c, 0x5814,
1352 0x5890, 0x589c,
1353 0x58a4, 0x58ac,
1354 0x58b8, 0x58bc,
1355 0x5940, 0x59c8,
1356 0x59d0, 0x59dc,
1357 0x59fc, 0x5a18,
1358 0x5a60, 0x5a70,
1359 0x5a80, 0x5a9c,
1360 0x5b94, 0x5bfc,
1361 0x6000, 0x6020,
1362 0x6028, 0x6040,
1363 0x6058, 0x609c,
1364 0x60a8, 0x614c,
1365 0x7700, 0x7798,
1366 0x77c0, 0x78fc,
1367 0x7b00, 0x7b58,
1368 0x7b60, 0x7b84,
1369 0x7b8c, 0x7c54,
1370 0x7d00, 0x7d38,
1371 0x7d40, 0x7d80,
1372 0x7d8c, 0x7ddc,
1373 0x7de4, 0x7e04,
1374 0x7e10, 0x7e1c,
1375 0x7e24, 0x7e38,
1376 0x7e40, 0x7e44,
1377 0x7e4c, 0x7e78,
1378 0x7e80, 0x7edc,
1379 0x7ee8, 0x7efc,
1380 0x8dc0, 0x8de0,
1381 0x8df8, 0x8e04,
1382 0x8e10, 0x8e84,
1383 0x8ea0, 0x8f84,
1384 0x8fc0, 0x9058,
1385 0x9060, 0x9060,
1386 0x9068, 0x90f8,
1387 0x9400, 0x9408,
1388 0x9410, 0x9470,
1389 0x9600, 0x9600,
1390 0x9608, 0x9638,
1391 0x9640, 0x96f4,
1392 0x9800, 0x9808,
1393 0x9810, 0x9864,
1394 0x9c00, 0x9c6c,
1395 0x9c80, 0x9cec,
1396 0x9d00, 0x9d6c,
1397 0x9d80, 0x9dec,
1398 0x9e00, 0x9e6c,
1399 0x9e80, 0x9eec,
1400 0x9f00, 0x9f6c,
1401 0x9f80, 0xa020,
1402 0xd000, 0xd004,
1403 0xd010, 0xd03c,
1404 0xdfc0, 0xdfe0,
1405 0xe000, 0x1106c,
1406 0x11074, 0x11088,
1407 0x1109c, 0x11110,
1408 0x11118, 0x1117c,
1409 0x11190, 0x11204,
1410 0x19040, 0x1906c,
1411 0x19078, 0x19080,
1412 0x1908c, 0x190e8,
1413 0x190f0, 0x190f8,
1414 0x19100, 0x19110,
1415 0x19120, 0x19124,
1416 0x19150, 0x19194,
1417 0x1919c, 0x191b0,
1418 0x191d0, 0x191e8,
1419 0x19238, 0x19290,
1420 0x193f8, 0x19428,
1421 0x19430, 0x19444,
1422 0x1944c, 0x1946c,
1423 0x19474, 0x19474,
1424 0x19490, 0x194cc,
1425 0x194f0, 0x194f8,
1426 0x19c00, 0x19c08,
1427 0x19c10, 0x19c60,
1428 0x19c94, 0x19ce4,
1429 0x19cf0, 0x19d40,
1430 0x19d50, 0x19d94,
1431 0x19da0, 0x19de8,
1432 0x19df0, 0x19e10,
1433 0x19e50, 0x19e90,
1434 0x19ea0, 0x19f24,
1435 0x19f34, 0x19f34,
1436 0x19f40, 0x19f50,
1437 0x19f90, 0x19fb4,
1438 0x19fc4, 0x19fe4,
1439 0x1a000, 0x1a004,
1440 0x1a010, 0x1a06c,
1441 0x1a0b0, 0x1a0e4,
1442 0x1a0ec, 0x1a0f8,
1443 0x1a100, 0x1a108,
1444 0x1a114, 0x1a130,
1445 0x1a138, 0x1a1c4,
1446 0x1a1fc, 0x1a1fc,
1447 0x1e008, 0x1e00c,
1448 0x1e040, 0x1e044,
1449 0x1e04c, 0x1e04c,
1450 0x1e284, 0x1e290,
1451 0x1e2c0, 0x1e2c0,
1452 0x1e2e0, 0x1e2e0,
1453 0x1e300, 0x1e384,
1454 0x1e3c0, 0x1e3c8,
1455 0x1e408, 0x1e40c,
1456 0x1e440, 0x1e444,
1457 0x1e44c, 0x1e44c,
1458 0x1e684, 0x1e690,
1459 0x1e6c0, 0x1e6c0,
1460 0x1e6e0, 0x1e6e0,
1461 0x1e700, 0x1e784,
1462 0x1e7c0, 0x1e7c8,
1463 0x1e808, 0x1e80c,
1464 0x1e840, 0x1e844,
1465 0x1e84c, 0x1e84c,
1466 0x1ea84, 0x1ea90,
1467 0x1eac0, 0x1eac0,
1468 0x1eae0, 0x1eae0,
1469 0x1eb00, 0x1eb84,
1470 0x1ebc0, 0x1ebc8,
1471 0x1ec08, 0x1ec0c,
1472 0x1ec40, 0x1ec44,
1473 0x1ec4c, 0x1ec4c,
1474 0x1ee84, 0x1ee90,
1475 0x1eec0, 0x1eec0,
1476 0x1eee0, 0x1eee0,
1477 0x1ef00, 0x1ef84,
1478 0x1efc0, 0x1efc8,
1479 0x1f008, 0x1f00c,
1480 0x1f040, 0x1f044,
1481 0x1f04c, 0x1f04c,
1482 0x1f284, 0x1f290,
1483 0x1f2c0, 0x1f2c0,
1484 0x1f2e0, 0x1f2e0,
1485 0x1f300, 0x1f384,
1486 0x1f3c0, 0x1f3c8,
1487 0x1f408, 0x1f40c,
1488 0x1f440, 0x1f444,
1489 0x1f44c, 0x1f44c,
1490 0x1f684, 0x1f690,
1491 0x1f6c0, 0x1f6c0,
1492 0x1f6e0, 0x1f6e0,
1493 0x1f700, 0x1f784,
1494 0x1f7c0, 0x1f7c8,
1495 0x1f808, 0x1f80c,
1496 0x1f840, 0x1f844,
1497 0x1f84c, 0x1f84c,
1498 0x1fa84, 0x1fa90,
1499 0x1fac0, 0x1fac0,
1500 0x1fae0, 0x1fae0,
1501 0x1fb00, 0x1fb84,
1502 0x1fbc0, 0x1fbc8,
1503 0x1fc08, 0x1fc0c,
1504 0x1fc40, 0x1fc44,
1505 0x1fc4c, 0x1fc4c,
1506 0x1fe84, 0x1fe90,
1507 0x1fec0, 0x1fec0,
1508 0x1fee0, 0x1fee0,
1509 0x1ff00, 0x1ff84,
1510 0x1ffc0, 0x1ffc8,
1511 0x30000, 0x30030,
1512 0x30100, 0x30144,
1513 0x30190, 0x301a0,
1514 0x301a8, 0x301b8,
1515 0x301c4, 0x301c8,
1516 0x301d0, 0x301d0,
1517 0x30200, 0x30318,
1518 0x30400, 0x304b4,
1519 0x304c0, 0x3052c,
1520 0x30540, 0x3061c,
1521 0x30800, 0x30828,
1522 0x30834, 0x30834,
1523 0x308c0, 0x30908,
1524 0x30910, 0x309ac,
1525 0x30a00, 0x30a14,
1526 0x30a1c, 0x30a2c,
1527 0x30a44, 0x30a50,
1528 0x30a74, 0x30a74,
1529 0x30a7c, 0x30afc,
1530 0x30b08, 0x30c24,
1531 0x30d00, 0x30d00,
1532 0x30d08, 0x30d14,
1533 0x30d1c, 0x30d20,
1534 0x30d3c, 0x30d3c,
1535 0x30d48, 0x30d50,
1536 0x31200, 0x3120c,
1537 0x31220, 0x31220,
1538 0x31240, 0x31240,
1539 0x31600, 0x3160c,
1540 0x31a00, 0x31a1c,
1541 0x31e00, 0x31e20,
1542 0x31e38, 0x31e3c,
1543 0x31e80, 0x31e80,
1544 0x31e88, 0x31ea8,
1545 0x31eb0, 0x31eb4,
1546 0x31ec8, 0x31ed4,
1547 0x31fb8, 0x32004,
1548 0x32200, 0x32200,
1549 0x32208, 0x32240,
1550 0x32248, 0x32280,
1551 0x32288, 0x322c0,
1552 0x322c8, 0x322fc,
1553 0x32600, 0x32630,
1554 0x32a00, 0x32abc,
1555 0x32b00, 0x32b10,
1556 0x32b20, 0x32b30,
1557 0x32b40, 0x32b50,
1558 0x32b60, 0x32b70,
1559 0x33000, 0x33028,
1560 0x33030, 0x33048,
1561 0x33060, 0x33068,
1562 0x33070, 0x3309c,
1563 0x330f0, 0x33128,
1564 0x33130, 0x33148,
1565 0x33160, 0x33168,
1566 0x33170, 0x3319c,
1567 0x331f0, 0x33238,
1568 0x33240, 0x33240,
1569 0x33248, 0x33250,
1570 0x3325c, 0x33264,
1571 0x33270, 0x332b8,
1572 0x332c0, 0x332e4,
1573 0x332f8, 0x33338,
1574 0x33340, 0x33340,
1575 0x33348, 0x33350,
1576 0x3335c, 0x33364,
1577 0x33370, 0x333b8,
1578 0x333c0, 0x333e4,
1579 0x333f8, 0x33428,
1580 0x33430, 0x33448,
1581 0x33460, 0x33468,
1582 0x33470, 0x3349c,
1583 0x334f0, 0x33528,
1584 0x33530, 0x33548,
1585 0x33560, 0x33568,
1586 0x33570, 0x3359c,
1587 0x335f0, 0x33638,
1588 0x33640, 0x33640,
1589 0x33648, 0x33650,
1590 0x3365c, 0x33664,
1591 0x33670, 0x336b8,
1592 0x336c0, 0x336e4,
1593 0x336f8, 0x33738,
1594 0x33740, 0x33740,
1595 0x33748, 0x33750,
1596 0x3375c, 0x33764,
1597 0x33770, 0x337b8,
1598 0x337c0, 0x337e4,
1599 0x337f8, 0x337fc,
1600 0x33814, 0x33814,
1601 0x3382c, 0x3382c,
1602 0x33880, 0x3388c,
1603 0x338e8, 0x338ec,
1604 0x33900, 0x33928,
1605 0x33930, 0x33948,
1606 0x33960, 0x33968,
1607 0x33970, 0x3399c,
1608 0x339f0, 0x33a38,
1609 0x33a40, 0x33a40,
1610 0x33a48, 0x33a50,
1611 0x33a5c, 0x33a64,
1612 0x33a70, 0x33ab8,
1613 0x33ac0, 0x33ae4,
1614 0x33af8, 0x33b10,
1615 0x33b28, 0x33b28,
1616 0x33b3c, 0x33b50,
1617 0x33bf0, 0x33c10,
1618 0x33c28, 0x33c28,
1619 0x33c3c, 0x33c50,
1620 0x33cf0, 0x33cfc,
1621 0x34000, 0x34030,
1622 0x34100, 0x34144,
1623 0x34190, 0x341a0,
1624 0x341a8, 0x341b8,
1625 0x341c4, 0x341c8,
1626 0x341d0, 0x341d0,
1627 0x34200, 0x34318,
1628 0x34400, 0x344b4,
1629 0x344c0, 0x3452c,
1630 0x34540, 0x3461c,
1631 0x34800, 0x34828,
1632 0x34834, 0x34834,
1633 0x348c0, 0x34908,
1634 0x34910, 0x349ac,
1635 0x34a00, 0x34a14,
1636 0x34a1c, 0x34a2c,
1637 0x34a44, 0x34a50,
1638 0x34a74, 0x34a74,
1639 0x34a7c, 0x34afc,
1640 0x34b08, 0x34c24,
1641 0x34d00, 0x34d00,
1642 0x34d08, 0x34d14,
1643 0x34d1c, 0x34d20,
1644 0x34d3c, 0x34d3c,
1645 0x34d48, 0x34d50,
1646 0x35200, 0x3520c,
1647 0x35220, 0x35220,
1648 0x35240, 0x35240,
1649 0x35600, 0x3560c,
1650 0x35a00, 0x35a1c,
1651 0x35e00, 0x35e20,
1652 0x35e38, 0x35e3c,
1653 0x35e80, 0x35e80,
1654 0x35e88, 0x35ea8,
1655 0x35eb0, 0x35eb4,
1656 0x35ec8, 0x35ed4,
1657 0x35fb8, 0x36004,
1658 0x36200, 0x36200,
1659 0x36208, 0x36240,
1660 0x36248, 0x36280,
1661 0x36288, 0x362c0,
1662 0x362c8, 0x362fc,
1663 0x36600, 0x36630,
1664 0x36a00, 0x36abc,
1665 0x36b00, 0x36b10,
1666 0x36b20, 0x36b30,
1667 0x36b40, 0x36b50,
1668 0x36b60, 0x36b70,
1669 0x37000, 0x37028,
1670 0x37030, 0x37048,
1671 0x37060, 0x37068,
1672 0x37070, 0x3709c,
1673 0x370f0, 0x37128,
1674 0x37130, 0x37148,
1675 0x37160, 0x37168,
1676 0x37170, 0x3719c,
1677 0x371f0, 0x37238,
1678 0x37240, 0x37240,
1679 0x37248, 0x37250,
1680 0x3725c, 0x37264,
1681 0x37270, 0x372b8,
1682 0x372c0, 0x372e4,
1683 0x372f8, 0x37338,
1684 0x37340, 0x37340,
1685 0x37348, 0x37350,
1686 0x3735c, 0x37364,
1687 0x37370, 0x373b8,
1688 0x373c0, 0x373e4,
1689 0x373f8, 0x37428,
1690 0x37430, 0x37448,
1691 0x37460, 0x37468,
1692 0x37470, 0x3749c,
1693 0x374f0, 0x37528,
1694 0x37530, 0x37548,
1695 0x37560, 0x37568,
1696 0x37570, 0x3759c,
1697 0x375f0, 0x37638,
1698 0x37640, 0x37640,
1699 0x37648, 0x37650,
1700 0x3765c, 0x37664,
1701 0x37670, 0x376b8,
1702 0x376c0, 0x376e4,
1703 0x376f8, 0x37738,
1704 0x37740, 0x37740,
1705 0x37748, 0x37750,
1706 0x3775c, 0x37764,
1707 0x37770, 0x377b8,
1708 0x377c0, 0x377e4,
1709 0x377f8, 0x377fc,
1710 0x37814, 0x37814,
1711 0x3782c, 0x3782c,
1712 0x37880, 0x3788c,
1713 0x378e8, 0x378ec,
1714 0x37900, 0x37928,
1715 0x37930, 0x37948,
1716 0x37960, 0x37968,
1717 0x37970, 0x3799c,
1718 0x379f0, 0x37a38,
1719 0x37a40, 0x37a40,
1720 0x37a48, 0x37a50,
1721 0x37a5c, 0x37a64,
1722 0x37a70, 0x37ab8,
1723 0x37ac0, 0x37ae4,
1724 0x37af8, 0x37b10,
1725 0x37b28, 0x37b28,
1726 0x37b3c, 0x37b50,
1727 0x37bf0, 0x37c10,
1728 0x37c28, 0x37c28,
1729 0x37c3c, 0x37c50,
1730 0x37cf0, 0x37cfc,
1731 0x38000, 0x38030,
1732 0x38100, 0x38144,
1733 0x38190, 0x381a0,
1734 0x381a8, 0x381b8,
1735 0x381c4, 0x381c8,
1736 0x381d0, 0x381d0,
1737 0x38200, 0x38318,
1738 0x38400, 0x384b4,
1739 0x384c0, 0x3852c,
1740 0x38540, 0x3861c,
1741 0x38800, 0x38828,
1742 0x38834, 0x38834,
1743 0x388c0, 0x38908,
1744 0x38910, 0x389ac,
1745 0x38a00, 0x38a14,
1746 0x38a1c, 0x38a2c,
1747 0x38a44, 0x38a50,
1748 0x38a74, 0x38a74,
1749 0x38a7c, 0x38afc,
1750 0x38b08, 0x38c24,
1751 0x38d00, 0x38d00,
1752 0x38d08, 0x38d14,
1753 0x38d1c, 0x38d20,
1754 0x38d3c, 0x38d3c,
1755 0x38d48, 0x38d50,
1756 0x39200, 0x3920c,
1757 0x39220, 0x39220,
1758 0x39240, 0x39240,
1759 0x39600, 0x3960c,
1760 0x39a00, 0x39a1c,
1761 0x39e00, 0x39e20,
1762 0x39e38, 0x39e3c,
1763 0x39e80, 0x39e80,
1764 0x39e88, 0x39ea8,
1765 0x39eb0, 0x39eb4,
1766 0x39ec8, 0x39ed4,
1767 0x39fb8, 0x3a004,
1768 0x3a200, 0x3a200,
1769 0x3a208, 0x3a240,
1770 0x3a248, 0x3a280,
1771 0x3a288, 0x3a2c0,
1772 0x3a2c8, 0x3a2fc,
1773 0x3a600, 0x3a630,
1774 0x3aa00, 0x3aabc,
1775 0x3ab00, 0x3ab10,
1776 0x3ab20, 0x3ab30,
1777 0x3ab40, 0x3ab50,
1778 0x3ab60, 0x3ab70,
1779 0x3b000, 0x3b028,
1780 0x3b030, 0x3b048,
1781 0x3b060, 0x3b068,
1782 0x3b070, 0x3b09c,
1783 0x3b0f0, 0x3b128,
1784 0x3b130, 0x3b148,
1785 0x3b160, 0x3b168,
1786 0x3b170, 0x3b19c,
1787 0x3b1f0, 0x3b238,
1788 0x3b240, 0x3b240,
1789 0x3b248, 0x3b250,
1790 0x3b25c, 0x3b264,
1791 0x3b270, 0x3b2b8,
1792 0x3b2c0, 0x3b2e4,
1793 0x3b2f8, 0x3b338,
1794 0x3b340, 0x3b340,
1795 0x3b348, 0x3b350,
1796 0x3b35c, 0x3b364,
1797 0x3b370, 0x3b3b8,
1798 0x3b3c0, 0x3b3e4,
1799 0x3b3f8, 0x3b428,
1800 0x3b430, 0x3b448,
1801 0x3b460, 0x3b468,
1802 0x3b470, 0x3b49c,
1803 0x3b4f0, 0x3b528,
1804 0x3b530, 0x3b548,
1805 0x3b560, 0x3b568,
1806 0x3b570, 0x3b59c,
1807 0x3b5f0, 0x3b638,
1808 0x3b640, 0x3b640,
1809 0x3b648, 0x3b650,
1810 0x3b65c, 0x3b664,
1811 0x3b670, 0x3b6b8,
1812 0x3b6c0, 0x3b6e4,
1813 0x3b6f8, 0x3b738,
1814 0x3b740, 0x3b740,
1815 0x3b748, 0x3b750,
1816 0x3b75c, 0x3b764,
1817 0x3b770, 0x3b7b8,
1818 0x3b7c0, 0x3b7e4,
1819 0x3b7f8, 0x3b7fc,
1820 0x3b814, 0x3b814,
1821 0x3b82c, 0x3b82c,
1822 0x3b880, 0x3b88c,
1823 0x3b8e8, 0x3b8ec,
1824 0x3b900, 0x3b928,
1825 0x3b930, 0x3b948,
1826 0x3b960, 0x3b968,
1827 0x3b970, 0x3b99c,
1828 0x3b9f0, 0x3ba38,
1829 0x3ba40, 0x3ba40,
1830 0x3ba48, 0x3ba50,
1831 0x3ba5c, 0x3ba64,
1832 0x3ba70, 0x3bab8,
1833 0x3bac0, 0x3bae4,
1834 0x3baf8, 0x3bb10,
1835 0x3bb28, 0x3bb28,
1836 0x3bb3c, 0x3bb50,
1837 0x3bbf0, 0x3bc10,
1838 0x3bc28, 0x3bc28,
1839 0x3bc3c, 0x3bc50,
1840 0x3bcf0, 0x3bcfc,
1841 0x3c000, 0x3c030,
1842 0x3c100, 0x3c144,
1843 0x3c190, 0x3c1a0,
1844 0x3c1a8, 0x3c1b8,
1845 0x3c1c4, 0x3c1c8,
1846 0x3c1d0, 0x3c1d0,
1847 0x3c200, 0x3c318,
1848 0x3c400, 0x3c4b4,
1849 0x3c4c0, 0x3c52c,
1850 0x3c540, 0x3c61c,
1851 0x3c800, 0x3c828,
1852 0x3c834, 0x3c834,
1853 0x3c8c0, 0x3c908,
1854 0x3c910, 0x3c9ac,
1855 0x3ca00, 0x3ca14,
1856 0x3ca1c, 0x3ca2c,
1857 0x3ca44, 0x3ca50,
1858 0x3ca74, 0x3ca74,
1859 0x3ca7c, 0x3cafc,
1860 0x3cb08, 0x3cc24,
1861 0x3cd00, 0x3cd00,
1862 0x3cd08, 0x3cd14,
1863 0x3cd1c, 0x3cd20,
1864 0x3cd3c, 0x3cd3c,
1865 0x3cd48, 0x3cd50,
1866 0x3d200, 0x3d20c,
1867 0x3d220, 0x3d220,
1868 0x3d240, 0x3d240,
1869 0x3d600, 0x3d60c,
1870 0x3da00, 0x3da1c,
1871 0x3de00, 0x3de20,
1872 0x3de38, 0x3de3c,
1873 0x3de80, 0x3de80,
1874 0x3de88, 0x3dea8,
1875 0x3deb0, 0x3deb4,
1876 0x3dec8, 0x3ded4,
1877 0x3dfb8, 0x3e004,
1878 0x3e200, 0x3e200,
1879 0x3e208, 0x3e240,
1880 0x3e248, 0x3e280,
1881 0x3e288, 0x3e2c0,
1882 0x3e2c8, 0x3e2fc,
1883 0x3e600, 0x3e630,
1884 0x3ea00, 0x3eabc,
1885 0x3eb00, 0x3eb10,
1886 0x3eb20, 0x3eb30,
1887 0x3eb40, 0x3eb50,
1888 0x3eb60, 0x3eb70,
1889 0x3f000, 0x3f028,
1890 0x3f030, 0x3f048,
1891 0x3f060, 0x3f068,
1892 0x3f070, 0x3f09c,
1893 0x3f0f0, 0x3f128,
1894 0x3f130, 0x3f148,
1895 0x3f160, 0x3f168,
1896 0x3f170, 0x3f19c,
1897 0x3f1f0, 0x3f238,
1898 0x3f240, 0x3f240,
1899 0x3f248, 0x3f250,
1900 0x3f25c, 0x3f264,
1901 0x3f270, 0x3f2b8,
1902 0x3f2c0, 0x3f2e4,
1903 0x3f2f8, 0x3f338,
1904 0x3f340, 0x3f340,
1905 0x3f348, 0x3f350,
1906 0x3f35c, 0x3f364,
1907 0x3f370, 0x3f3b8,
1908 0x3f3c0, 0x3f3e4,
1909 0x3f3f8, 0x3f428,
1910 0x3f430, 0x3f448,
1911 0x3f460, 0x3f468,
1912 0x3f470, 0x3f49c,
1913 0x3f4f0, 0x3f528,
1914 0x3f530, 0x3f548,
1915 0x3f560, 0x3f568,
1916 0x3f570, 0x3f59c,
1917 0x3f5f0, 0x3f638,
1918 0x3f640, 0x3f640,
1919 0x3f648, 0x3f650,
1920 0x3f65c, 0x3f664,
1921 0x3f670, 0x3f6b8,
1922 0x3f6c0, 0x3f6e4,
1923 0x3f6f8, 0x3f738,
1924 0x3f740, 0x3f740,
1925 0x3f748, 0x3f750,
1926 0x3f75c, 0x3f764,
1927 0x3f770, 0x3f7b8,
1928 0x3f7c0, 0x3f7e4,
1929 0x3f7f8, 0x3f7fc,
1930 0x3f814, 0x3f814,
1931 0x3f82c, 0x3f82c,
1932 0x3f880, 0x3f88c,
1933 0x3f8e8, 0x3f8ec,
1934 0x3f900, 0x3f928,
1935 0x3f930, 0x3f948,
1936 0x3f960, 0x3f968,
1937 0x3f970, 0x3f99c,
1938 0x3f9f0, 0x3fa38,
1939 0x3fa40, 0x3fa40,
1940 0x3fa48, 0x3fa50,
1941 0x3fa5c, 0x3fa64,
1942 0x3fa70, 0x3fab8,
1943 0x3fac0, 0x3fae4,
1944 0x3faf8, 0x3fb10,
1945 0x3fb28, 0x3fb28,
1946 0x3fb3c, 0x3fb50,
1947 0x3fbf0, 0x3fc10,
1948 0x3fc28, 0x3fc28,
1949 0x3fc3c, 0x3fc50,
1950 0x3fcf0, 0x3fcfc,
1951 0x40000, 0x4000c,
1952 0x40040, 0x40050,
1953 0x40060, 0x40068,
1954 0x4007c, 0x4008c,
1955 0x40094, 0x400b0,
1956 0x400c0, 0x40144,
1957 0x40180, 0x4018c,
1958 0x40200, 0x40254,
1959 0x40260, 0x40264,
1960 0x40270, 0x40288,
1961 0x40290, 0x40298,
1962 0x402ac, 0x402c8,
1963 0x402d0, 0x402e0,
1964 0x402f0, 0x402f0,
1965 0x40300, 0x4033c,
1966 0x403f8, 0x403fc,
1967 0x41304, 0x413c4,
1968 0x41400, 0x4140c,
1969 0x41414, 0x4141c,
1970 0x41480, 0x414d0,
1971 0x44000, 0x44054,
1972 0x4405c, 0x44078,
1973 0x440c0, 0x44174,
1974 0x44180, 0x441ac,
1975 0x441b4, 0x441b8,
1976 0x441c0, 0x44254,
1977 0x4425c, 0x44278,
1978 0x442c0, 0x44374,
1979 0x44380, 0x443ac,
1980 0x443b4, 0x443b8,
1981 0x443c0, 0x44454,
1982 0x4445c, 0x44478,
1983 0x444c0, 0x44574,
1984 0x44580, 0x445ac,
1985 0x445b4, 0x445b8,
1986 0x445c0, 0x44654,
1987 0x4465c, 0x44678,
1988 0x446c0, 0x44774,
1989 0x44780, 0x447ac,
1990 0x447b4, 0x447b8,
1991 0x447c0, 0x44854,
1992 0x4485c, 0x44878,
1993 0x448c0, 0x44974,
1994 0x44980, 0x449ac,
1995 0x449b4, 0x449b8,
1996 0x449c0, 0x449fc,
1997 0x45000, 0x45004,
1998 0x45010, 0x45030,
1999 0x45040, 0x45060,
2000 0x45068, 0x45068,
2001 0x45080, 0x45084,
2002 0x450a0, 0x450b0,
2003 0x45200, 0x45204,
2004 0x45210, 0x45230,
2005 0x45240, 0x45260,
2006 0x45268, 0x45268,
2007 0x45280, 0x45284,
2008 0x452a0, 0x452b0,
2009 0x460c0, 0x460e4,
2010 0x47000, 0x4703c,
2011 0x47044, 0x4708c,
2012 0x47200, 0x47250,
2013 0x47400, 0x47408,
2014 0x47414, 0x47420,
2015 0x47600, 0x47618,
2016 0x47800, 0x47814,
2017 0x48000, 0x4800c,
2018 0x48040, 0x48050,
2019 0x48060, 0x48068,
2020 0x4807c, 0x4808c,
2021 0x48094, 0x480b0,
2022 0x480c0, 0x48144,
2023 0x48180, 0x4818c,
2024 0x48200, 0x48254,
2025 0x48260, 0x48264,
2026 0x48270, 0x48288,
2027 0x48290, 0x48298,
2028 0x482ac, 0x482c8,
2029 0x482d0, 0x482e0,
2030 0x482f0, 0x482f0,
2031 0x48300, 0x4833c,
2032 0x483f8, 0x483fc,
2033 0x49304, 0x493c4,
2034 0x49400, 0x4940c,
2035 0x49414, 0x4941c,
2036 0x49480, 0x494d0,
2037 0x4c000, 0x4c054,
2038 0x4c05c, 0x4c078,
2039 0x4c0c0, 0x4c174,
2040 0x4c180, 0x4c1ac,
2041 0x4c1b4, 0x4c1b8,
2042 0x4c1c0, 0x4c254,
2043 0x4c25c, 0x4c278,
2044 0x4c2c0, 0x4c374,
2045 0x4c380, 0x4c3ac,
2046 0x4c3b4, 0x4c3b8,
2047 0x4c3c0, 0x4c454,
2048 0x4c45c, 0x4c478,
2049 0x4c4c0, 0x4c574,
2050 0x4c580, 0x4c5ac,
2051 0x4c5b4, 0x4c5b8,
2052 0x4c5c0, 0x4c654,
2053 0x4c65c, 0x4c678,
2054 0x4c6c0, 0x4c774,
2055 0x4c780, 0x4c7ac,
2056 0x4c7b4, 0x4c7b8,
2057 0x4c7c0, 0x4c854,
2058 0x4c85c, 0x4c878,
2059 0x4c8c0, 0x4c974,
2060 0x4c980, 0x4c9ac,
2061 0x4c9b4, 0x4c9b8,
2062 0x4c9c0, 0x4c9fc,
2063 0x4d000, 0x4d004,
2064 0x4d010, 0x4d030,
2065 0x4d040, 0x4d060,
2066 0x4d068, 0x4d068,
2067 0x4d080, 0x4d084,
2068 0x4d0a0, 0x4d0b0,
2069 0x4d200, 0x4d204,
2070 0x4d210, 0x4d230,
2071 0x4d240, 0x4d260,
2072 0x4d268, 0x4d268,
2073 0x4d280, 0x4d284,
2074 0x4d2a0, 0x4d2b0,
2075 0x4e0c0, 0x4e0e4,
2076 0x4f000, 0x4f03c,
2077 0x4f044, 0x4f08c,
2078 0x4f200, 0x4f250,
2079 0x4f400, 0x4f408,
2080 0x4f414, 0x4f420,
2081 0x4f600, 0x4f618,
2082 0x4f800, 0x4f814,
2083 0x50000, 0x50084,
2084 0x50090, 0x500cc,
2085 0x50400, 0x50400,
2086 0x50800, 0x50884,
2087 0x50890, 0x508cc,
2088 0x50c00, 0x50c00,
2089 0x51000, 0x5101c,
2090 0x51300, 0x51308,
2091 };
2092
2093 static const unsigned int t5vf_reg_ranges[] = {
2094 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2095 VF_MPS_REG(A_MPS_VF_CTL),
2096 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2097 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2098 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2099 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2100 FW_T4VF_MBDATA_BASE_ADDR,
2101 FW_T4VF_MBDATA_BASE_ADDR +
2102 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2103 };
2104
2105 static const unsigned int t6_reg_ranges[] = {
2106 0x1008, 0x101c,
2107 0x1024, 0x10a8,
2108 0x10b4, 0x10f8,
2109 0x1100, 0x1114,
2110 0x111c, 0x112c,
2111 0x1138, 0x113c,
2112 0x1144, 0x114c,
2113 0x1180, 0x1184,
2114 0x1190, 0x1194,
2115 0x11a0, 0x11a4,
2116 0x11b0, 0x11c4,
2117 0x11fc, 0x123c,
2118 0x1254, 0x1274,
2119 0x1280, 0x133c,
2120 0x1800, 0x18fc,
2121 0x3000, 0x302c,
2122 0x3060, 0x30b0,
2123 0x30b8, 0x30d8,
2124 0x30e0, 0x30fc,
2125 0x3140, 0x357c,
2126 0x35a8, 0x35cc,
2127 0x35ec, 0x35ec,
2128 0x3600, 0x5624,
2129 0x56cc, 0x56ec,
2130 0x56f4, 0x5720,
2131 0x5728, 0x575c,
2132 0x580c, 0x5814,
2133 0x5890, 0x589c,
2134 0x58a4, 0x58ac,
2135 0x58b8, 0x58bc,
2136 0x5940, 0x595c,
2137 0x5980, 0x598c,
2138 0x59b0, 0x59c8,
2139 0x59d0, 0x59dc,
2140 0x59fc, 0x5a18,
2141 0x5a60, 0x5a6c,
2142 0x5a80, 0x5a8c,
2143 0x5a94, 0x5a9c,
2144 0x5b94, 0x5bfc,
2145 0x5c10, 0x5e48,
2146 0x5e50, 0x5e94,
2147 0x5ea0, 0x5eb0,
2148 0x5ec0, 0x5ec0,
2149 0x5ec8, 0x5ed0,
2150 0x5ee0, 0x5ee0,
2151 0x5ef0, 0x5ef0,
2152 0x5f00, 0x5f00,
2153 0x6000, 0x6020,
2154 0x6028, 0x6040,
2155 0x6058, 0x609c,
2156 0x60a8, 0x619c,
2157 0x7700, 0x7798,
2158 0x77c0, 0x7880,
2159 0x78cc, 0x78fc,
2160 0x7b00, 0x7b58,
2161 0x7b60, 0x7b84,
2162 0x7b8c, 0x7c54,
2163 0x7d00, 0x7d38,
2164 0x7d40, 0x7d84,
2165 0x7d8c, 0x7ddc,
2166 0x7de4, 0x7e04,
2167 0x7e10, 0x7e1c,
2168 0x7e24, 0x7e38,
2169 0x7e40, 0x7e44,
2170 0x7e4c, 0x7e78,
2171 0x7e80, 0x7edc,
2172 0x7ee8, 0x7efc,
2173 0x8dc0, 0x8de0,
2174 0x8df8, 0x8e04,
2175 0x8e10, 0x8e84,
2176 0x8ea0, 0x8f88,
2177 0x8fb8, 0x9058,
2178 0x9060, 0x9060,
2179 0x9068, 0x90f8,
2180 0x9100, 0x9124,
2181 0x9400, 0x9470,
2182 0x9600, 0x9600,
2183 0x9608, 0x9638,
2184 0x9640, 0x9704,
2185 0x9710, 0x971c,
2186 0x9800, 0x9808,
2187 0x9810, 0x9864,
2188 0x9c00, 0x9c6c,
2189 0x9c80, 0x9cec,
2190 0x9d00, 0x9d6c,
2191 0x9d80, 0x9dec,
2192 0x9e00, 0x9e6c,
2193 0x9e80, 0x9eec,
2194 0x9f00, 0x9f6c,
2195 0x9f80, 0xa020,
2196 0xd000, 0xd03c,
2197 0xd100, 0xd118,
2198 0xd200, 0xd214,
2199 0xd220, 0xd234,
2200 0xd240, 0xd254,
2201 0xd260, 0xd274,
2202 0xd280, 0xd294,
2203 0xd2a0, 0xd2b4,
2204 0xd2c0, 0xd2d4,
2205 0xd2e0, 0xd2f4,
2206 0xd300, 0xd31c,
2207 0xdfc0, 0xdfe0,
2208 0xe000, 0xf008,
2209 0xf010, 0xf018,
2210 0xf020, 0xf028,
2211 0x11000, 0x11014,
2212 0x11048, 0x1106c,
2213 0x11074, 0x11088,
2214 0x11098, 0x11120,
2215 0x1112c, 0x1117c,
2216 0x11190, 0x112e0,
2217 0x11300, 0x1130c,
2218 0x12000, 0x1206c,
2219 0x19040, 0x1906c,
2220 0x19078, 0x19080,
2221 0x1908c, 0x190e8,
2222 0x190f0, 0x190f8,
2223 0x19100, 0x19110,
2224 0x19120, 0x19124,
2225 0x19150, 0x19194,
2226 0x1919c, 0x191b0,
2227 0x191d0, 0x191e8,
2228 0x19238, 0x19290,
2229 0x192a4, 0x192b0,
2230 0x19348, 0x1934c,
2231 0x193f8, 0x19418,
2232 0x19420, 0x19428,
2233 0x19430, 0x19444,
2234 0x1944c, 0x1946c,
2235 0x19474, 0x19474,
2236 0x19490, 0x194cc,
2237 0x194f0, 0x194f8,
2238 0x19c00, 0x19c48,
2239 0x19c50, 0x19c80,
2240 0x19c94, 0x19c98,
2241 0x19ca0, 0x19cbc,
2242 0x19ce4, 0x19ce4,
2243 0x19cf0, 0x19cf8,
2244 0x19d00, 0x19d28,
2245 0x19d50, 0x19d78,
2246 0x19d94, 0x19d98,
2247 0x19da0, 0x19de0,
2248 0x19df0, 0x19e10,
2249 0x19e50, 0x19e6c,
2250 0x19ea0, 0x19ebc,
2251 0x19ec4, 0x19ef4,
2252 0x19f04, 0x19f2c,
2253 0x19f34, 0x19f34,
2254 0x19f40, 0x19f50,
2255 0x19f90, 0x19fac,
2256 0x19fc4, 0x19fc8,
2257 0x19fd0, 0x19fe4,
2258 0x1a000, 0x1a004,
2259 0x1a010, 0x1a06c,
2260 0x1a0b0, 0x1a0e4,
2261 0x1a0ec, 0x1a0f8,
2262 0x1a100, 0x1a108,
2263 0x1a114, 0x1a130,
2264 0x1a138, 0x1a1c4,
2265 0x1a1fc, 0x1a1fc,
2266 0x1e008, 0x1e00c,
2267 0x1e040, 0x1e044,
2268 0x1e04c, 0x1e04c,
2269 0x1e284, 0x1e290,
2270 0x1e2c0, 0x1e2c0,
2271 0x1e2e0, 0x1e2e0,
2272 0x1e300, 0x1e384,
2273 0x1e3c0, 0x1e3c8,
2274 0x1e408, 0x1e40c,
2275 0x1e440, 0x1e444,
2276 0x1e44c, 0x1e44c,
2277 0x1e684, 0x1e690,
2278 0x1e6c0, 0x1e6c0,
2279 0x1e6e0, 0x1e6e0,
2280 0x1e700, 0x1e784,
2281 0x1e7c0, 0x1e7c8,
2282 0x1e808, 0x1e80c,
2283 0x1e840, 0x1e844,
2284 0x1e84c, 0x1e84c,
2285 0x1ea84, 0x1ea90,
2286 0x1eac0, 0x1eac0,
2287 0x1eae0, 0x1eae0,
2288 0x1eb00, 0x1eb84,
2289 0x1ebc0, 0x1ebc8,
2290 0x1ec08, 0x1ec0c,
2291 0x1ec40, 0x1ec44,
2292 0x1ec4c, 0x1ec4c,
2293 0x1ee84, 0x1ee90,
2294 0x1eec0, 0x1eec0,
2295 0x1eee0, 0x1eee0,
2296 0x1ef00, 0x1ef84,
2297 0x1efc0, 0x1efc8,
2298 0x1f008, 0x1f00c,
2299 0x1f040, 0x1f044,
2300 0x1f04c, 0x1f04c,
2301 0x1f284, 0x1f290,
2302 0x1f2c0, 0x1f2c0,
2303 0x1f2e0, 0x1f2e0,
2304 0x1f300, 0x1f384,
2305 0x1f3c0, 0x1f3c8,
2306 0x1f408, 0x1f40c,
2307 0x1f440, 0x1f444,
2308 0x1f44c, 0x1f44c,
2309 0x1f684, 0x1f690,
2310 0x1f6c0, 0x1f6c0,
2311 0x1f6e0, 0x1f6e0,
2312 0x1f700, 0x1f784,
2313 0x1f7c0, 0x1f7c8,
2314 0x1f808, 0x1f80c,
2315 0x1f840, 0x1f844,
2316 0x1f84c, 0x1f84c,
2317 0x1fa84, 0x1fa90,
2318 0x1fac0, 0x1fac0,
2319 0x1fae0, 0x1fae0,
2320 0x1fb00, 0x1fb84,
2321 0x1fbc0, 0x1fbc8,
2322 0x1fc08, 0x1fc0c,
2323 0x1fc40, 0x1fc44,
2324 0x1fc4c, 0x1fc4c,
2325 0x1fe84, 0x1fe90,
2326 0x1fec0, 0x1fec0,
2327 0x1fee0, 0x1fee0,
2328 0x1ff00, 0x1ff84,
2329 0x1ffc0, 0x1ffc8,
2330 0x30000, 0x30030,
2331 0x30100, 0x30168,
2332 0x30190, 0x301a0,
2333 0x301a8, 0x301b8,
2334 0x301c4, 0x301c8,
2335 0x301d0, 0x301d0,
2336 0x30200, 0x30320,
2337 0x30400, 0x304b4,
2338 0x304c0, 0x3052c,
2339 0x30540, 0x3061c,
2340 0x30800, 0x308a0,
2341 0x308c0, 0x30908,
2342 0x30910, 0x309b8,
2343 0x30a00, 0x30a04,
2344 0x30a0c, 0x30a14,
2345 0x30a1c, 0x30a2c,
2346 0x30a44, 0x30a50,
2347 0x30a74, 0x30a74,
2348 0x30a7c, 0x30afc,
2349 0x30b08, 0x30c24,
2350 0x30d00, 0x30d14,
2351 0x30d1c, 0x30d3c,
2352 0x30d44, 0x30d4c,
2353 0x30d54, 0x30d74,
2354 0x30d7c, 0x30d7c,
2355 0x30de0, 0x30de0,
2356 0x30e00, 0x30ed4,
2357 0x30f00, 0x30fa4,
2358 0x30fc0, 0x30fc4,
2359 0x31000, 0x31004,
2360 0x31080, 0x310fc,
2361 0x31208, 0x31220,
2362 0x3123c, 0x31254,
2363 0x31300, 0x31300,
2364 0x31308, 0x3131c,
2365 0x31338, 0x3133c,
2366 0x31380, 0x31380,
2367 0x31388, 0x313a8,
2368 0x313b4, 0x313b4,
2369 0x31400, 0x31420,
2370 0x31438, 0x3143c,
2371 0x31480, 0x31480,
2372 0x314a8, 0x314a8,
2373 0x314b0, 0x314b4,
2374 0x314c8, 0x314d4,
2375 0x31a40, 0x31a4c,
2376 0x31af0, 0x31b20,
2377 0x31b38, 0x31b3c,
2378 0x31b80, 0x31b80,
2379 0x31ba8, 0x31ba8,
2380 0x31bb0, 0x31bb4,
2381 0x31bc8, 0x31bd4,
2382 0x32140, 0x3218c,
2383 0x321f0, 0x321f4,
2384 0x32200, 0x32200,
2385 0x32218, 0x32218,
2386 0x32400, 0x32400,
2387 0x32408, 0x3241c,
2388 0x32618, 0x32620,
2389 0x32664, 0x32664,
2390 0x326a8, 0x326a8,
2391 0x326ec, 0x326ec,
2392 0x32a00, 0x32abc,
2393 0x32b00, 0x32b18,
2394 0x32b20, 0x32b38,
2395 0x32b40, 0x32b58,
2396 0x32b60, 0x32b78,
2397 0x32c00, 0x32c00,
2398 0x32c08, 0x32c3c,
2399 0x33000, 0x3302c,
2400 0x33034, 0x33050,
2401 0x33058, 0x33058,
2402 0x33060, 0x3308c,
2403 0x3309c, 0x330ac,
2404 0x330c0, 0x330c0,
2405 0x330c8, 0x330d0,
2406 0x330d8, 0x330e0,
2407 0x330ec, 0x3312c,
2408 0x33134, 0x33150,
2409 0x33158, 0x33158,
2410 0x33160, 0x3318c,
2411 0x3319c, 0x331ac,
2412 0x331c0, 0x331c0,
2413 0x331c8, 0x331d0,
2414 0x331d8, 0x331e0,
2415 0x331ec, 0x33290,
2416 0x33298, 0x332c4,
2417 0x332e4, 0x33390,
2418 0x33398, 0x333c4,
2419 0x333e4, 0x3342c,
2420 0x33434, 0x33450,
2421 0x33458, 0x33458,
2422 0x33460, 0x3348c,
2423 0x3349c, 0x334ac,
2424 0x334c0, 0x334c0,
2425 0x334c8, 0x334d0,
2426 0x334d8, 0x334e0,
2427 0x334ec, 0x3352c,
2428 0x33534, 0x33550,
2429 0x33558, 0x33558,
2430 0x33560, 0x3358c,
2431 0x3359c, 0x335ac,
2432 0x335c0, 0x335c0,
2433 0x335c8, 0x335d0,
2434 0x335d8, 0x335e0,
2435 0x335ec, 0x33690,
2436 0x33698, 0x336c4,
2437 0x336e4, 0x33790,
2438 0x33798, 0x337c4,
2439 0x337e4, 0x337fc,
2440 0x33814, 0x33814,
2441 0x33854, 0x33868,
2442 0x33880, 0x3388c,
2443 0x338c0, 0x338d0,
2444 0x338e8, 0x338ec,
2445 0x33900, 0x3392c,
2446 0x33934, 0x33950,
2447 0x33958, 0x33958,
2448 0x33960, 0x3398c,
2449 0x3399c, 0x339ac,
2450 0x339c0, 0x339c0,
2451 0x339c8, 0x339d0,
2452 0x339d8, 0x339e0,
2453 0x339ec, 0x33a90,
2454 0x33a98, 0x33ac4,
2455 0x33ae4, 0x33b10,
2456 0x33b24, 0x33b28,
2457 0x33b38, 0x33b50,
2458 0x33bf0, 0x33c10,
2459 0x33c24, 0x33c28,
2460 0x33c38, 0x33c50,
2461 0x33cf0, 0x33cfc,
2462 0x34000, 0x34030,
2463 0x34100, 0x34168,
2464 0x34190, 0x341a0,
2465 0x341a8, 0x341b8,
2466 0x341c4, 0x341c8,
2467 0x341d0, 0x341d0,
2468 0x34200, 0x34320,
2469 0x34400, 0x344b4,
2470 0x344c0, 0x3452c,
2471 0x34540, 0x3461c,
2472 0x34800, 0x348a0,
2473 0x348c0, 0x34908,
2474 0x34910, 0x349b8,
2475 0x34a00, 0x34a04,
2476 0x34a0c, 0x34a14,
2477 0x34a1c, 0x34a2c,
2478 0x34a44, 0x34a50,
2479 0x34a74, 0x34a74,
2480 0x34a7c, 0x34afc,
2481 0x34b08, 0x34c24,
2482 0x34d00, 0x34d14,
2483 0x34d1c, 0x34d3c,
2484 0x34d44, 0x34d4c,
2485 0x34d54, 0x34d74,
2486 0x34d7c, 0x34d7c,
2487 0x34de0, 0x34de0,
2488 0x34e00, 0x34ed4,
2489 0x34f00, 0x34fa4,
2490 0x34fc0, 0x34fc4,
2491 0x35000, 0x35004,
2492 0x35080, 0x350fc,
2493 0x35208, 0x35220,
2494 0x3523c, 0x35254,
2495 0x35300, 0x35300,
2496 0x35308, 0x3531c,
2497 0x35338, 0x3533c,
2498 0x35380, 0x35380,
2499 0x35388, 0x353a8,
2500 0x353b4, 0x353b4,
2501 0x35400, 0x35420,
2502 0x35438, 0x3543c,
2503 0x35480, 0x35480,
2504 0x354a8, 0x354a8,
2505 0x354b0, 0x354b4,
2506 0x354c8, 0x354d4,
2507 0x35a40, 0x35a4c,
2508 0x35af0, 0x35b20,
2509 0x35b38, 0x35b3c,
2510 0x35b80, 0x35b80,
2511 0x35ba8, 0x35ba8,
2512 0x35bb0, 0x35bb4,
2513 0x35bc8, 0x35bd4,
2514 0x36140, 0x3618c,
2515 0x361f0, 0x361f4,
2516 0x36200, 0x36200,
2517 0x36218, 0x36218,
2518 0x36400, 0x36400,
2519 0x36408, 0x3641c,
2520 0x36618, 0x36620,
2521 0x36664, 0x36664,
2522 0x366a8, 0x366a8,
2523 0x366ec, 0x366ec,
2524 0x36a00, 0x36abc,
2525 0x36b00, 0x36b18,
2526 0x36b20, 0x36b38,
2527 0x36b40, 0x36b58,
2528 0x36b60, 0x36b78,
2529 0x36c00, 0x36c00,
2530 0x36c08, 0x36c3c,
2531 0x37000, 0x3702c,
2532 0x37034, 0x37050,
2533 0x37058, 0x37058,
2534 0x37060, 0x3708c,
2535 0x3709c, 0x370ac,
2536 0x370c0, 0x370c0,
2537 0x370c8, 0x370d0,
2538 0x370d8, 0x370e0,
2539 0x370ec, 0x3712c,
2540 0x37134, 0x37150,
2541 0x37158, 0x37158,
2542 0x37160, 0x3718c,
2543 0x3719c, 0x371ac,
2544 0x371c0, 0x371c0,
2545 0x371c8, 0x371d0,
2546 0x371d8, 0x371e0,
2547 0x371ec, 0x37290,
2548 0x37298, 0x372c4,
2549 0x372e4, 0x37390,
2550 0x37398, 0x373c4,
2551 0x373e4, 0x3742c,
2552 0x37434, 0x37450,
2553 0x37458, 0x37458,
2554 0x37460, 0x3748c,
2555 0x3749c, 0x374ac,
2556 0x374c0, 0x374c0,
2557 0x374c8, 0x374d0,
2558 0x374d8, 0x374e0,
2559 0x374ec, 0x3752c,
2560 0x37534, 0x37550,
2561 0x37558, 0x37558,
2562 0x37560, 0x3758c,
2563 0x3759c, 0x375ac,
2564 0x375c0, 0x375c0,
2565 0x375c8, 0x375d0,
2566 0x375d8, 0x375e0,
2567 0x375ec, 0x37690,
2568 0x37698, 0x376c4,
2569 0x376e4, 0x37790,
2570 0x37798, 0x377c4,
2571 0x377e4, 0x377fc,
2572 0x37814, 0x37814,
2573 0x37854, 0x37868,
2574 0x37880, 0x3788c,
2575 0x378c0, 0x378d0,
2576 0x378e8, 0x378ec,
2577 0x37900, 0x3792c,
2578 0x37934, 0x37950,
2579 0x37958, 0x37958,
2580 0x37960, 0x3798c,
2581 0x3799c, 0x379ac,
2582 0x379c0, 0x379c0,
2583 0x379c8, 0x379d0,
2584 0x379d8, 0x379e0,
2585 0x379ec, 0x37a90,
2586 0x37a98, 0x37ac4,
2587 0x37ae4, 0x37b10,
2588 0x37b24, 0x37b28,
2589 0x37b38, 0x37b50,
2590 0x37bf0, 0x37c10,
2591 0x37c24, 0x37c28,
2592 0x37c38, 0x37c50,
2593 0x37cf0, 0x37cfc,
2594 0x40040, 0x40040,
2595 0x40080, 0x40084,
2596 0x40100, 0x40100,
2597 0x40140, 0x401bc,
2598 0x40200, 0x40214,
2599 0x40228, 0x40228,
2600 0x40240, 0x40258,
2601 0x40280, 0x40280,
2602 0x40304, 0x40304,
2603 0x40330, 0x4033c,
2604 0x41304, 0x413c8,
2605 0x413d0, 0x413dc,
2606 0x413f0, 0x413f0,
2607 0x41400, 0x4140c,
2608 0x41414, 0x4141c,
2609 0x41480, 0x414d0,
2610 0x44000, 0x4407c,
2611 0x440c0, 0x441ac,
2612 0x441b4, 0x4427c,
2613 0x442c0, 0x443ac,
2614 0x443b4, 0x4447c,
2615 0x444c0, 0x445ac,
2616 0x445b4, 0x4467c,
2617 0x446c0, 0x447ac,
2618 0x447b4, 0x4487c,
2619 0x448c0, 0x449ac,
2620 0x449b4, 0x44a7c,
2621 0x44ac0, 0x44bac,
2622 0x44bb4, 0x44c7c,
2623 0x44cc0, 0x44dac,
2624 0x44db4, 0x44e7c,
2625 0x44ec0, 0x44fac,
2626 0x44fb4, 0x4507c,
2627 0x450c0, 0x451ac,
2628 0x451b4, 0x451fc,
2629 0x45800, 0x45804,
2630 0x45810, 0x45830,
2631 0x45840, 0x45860,
2632 0x45868, 0x45868,
2633 0x45880, 0x45884,
2634 0x458a0, 0x458b0,
2635 0x45a00, 0x45a04,
2636 0x45a10, 0x45a30,
2637 0x45a40, 0x45a60,
2638 0x45a68, 0x45a68,
2639 0x45a80, 0x45a84,
2640 0x45aa0, 0x45ab0,
2641 0x460c0, 0x460e4,
2642 0x47000, 0x4703c,
2643 0x47044, 0x4708c,
2644 0x47200, 0x47250,
2645 0x47400, 0x47408,
2646 0x47414, 0x47420,
2647 0x47600, 0x47618,
2648 0x47800, 0x47814,
2649 0x47820, 0x4782c,
2650 0x50000, 0x50084,
2651 0x50090, 0x500cc,
2652 0x50300, 0x50384,
2653 0x50400, 0x50400,
2654 0x50800, 0x50884,
2655 0x50890, 0x508cc,
2656 0x50b00, 0x50b84,
2657 0x50c00, 0x50c00,
2658 0x51000, 0x51020,
2659 0x51028, 0x510b0,
2660 0x51300, 0x51324,
2661 };
2662
2663 static const unsigned int t6vf_reg_ranges[] = {
2664 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2665 VF_MPS_REG(A_MPS_VF_CTL),
2666 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2667 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2668 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2669 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2670 FW_T6VF_MBDATA_BASE_ADDR,
2671 FW_T6VF_MBDATA_BASE_ADDR +
2672 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2673 };
2674
2675 static const unsigned int t7_reg_ranges[] = {
2676 0x1008, 0x101c,
2677 0x1024, 0x10a8,
2678 0x10b4, 0x10f8,
2679 0x1100, 0x1114,
2680 0x111c, 0x112c,
2681 0x1138, 0x113c,
2682 0x1144, 0x115c,
2683 0x1180, 0x1184,
2684 0x1190, 0x1194,
2685 0x11a0, 0x11a4,
2686 0x11b0, 0x11d0,
2687 0x11fc, 0x1278,
2688 0x1280, 0x1368,
2689 0x1700, 0x172c,
2690 0x173c, 0x1760,
2691 0x1800, 0x18fc,
2692 0x3000, 0x3044,
2693 0x30a4, 0x30b0,
2694 0x30b8, 0x30d8,
2695 0x30e0, 0x30e8,
2696 0x3140, 0x357c,
2697 0x35a8, 0x35cc,
2698 0x35e0, 0x35ec,
2699 0x3600, 0x37fc,
2700 0x3804, 0x3818,
2701 0x3880, 0x388c,
2702 0x3900, 0x3904,
2703 0x3910, 0x3978,
2704 0x3980, 0x399c,
2705 0x4700, 0x4720,
2706 0x4728, 0x475c,
2707 0x480c, 0x4814,
2708 0x4890, 0x489c,
2709 0x48a4, 0x48ac,
2710 0x48b8, 0x48bc,
2711 0x4900, 0x4924,
2712 0x4ffc, 0x4ffc,
2713 0x5500, 0x5624,
2714 0x56c4, 0x56ec,
2715 0x56f4, 0x5720,
2716 0x5728, 0x575c,
2717 0x580c, 0x5814,
2718 0x5890, 0x589c,
2719 0x58a4, 0x58ac,
2720 0x58b8, 0x58bc,
2721 0x5940, 0x598c,
2722 0x59b0, 0x59c8,
2723 0x59d0, 0x59dc,
2724 0x59fc, 0x5a18,
2725 0x5a60, 0x5a6c,
2726 0x5a80, 0x5a8c,
2727 0x5a94, 0x5a9c,
2728 0x5b94, 0x5bec,
2729 0x5bf8, 0x5bfc,
2730 0x5c10, 0x5c40,
2731 0x5c4c, 0x5e48,
2732 0x5e50, 0x5e94,
2733 0x5ea0, 0x5eb0,
2734 0x5ec0, 0x5ec0,
2735 0x5ec8, 0x5ed0,
2736 0x5ee0, 0x5ee0,
2737 0x5ef0, 0x5ef0,
2738 0x5f00, 0x5f04,
2739 0x5f0c, 0x5f10,
2740 0x5f20, 0x5f78,
2741 0x5f84, 0x5f88,
2742 0x5f90, 0x5fd8,
2743 0x6000, 0x6020,
2744 0x6028, 0x6030,
2745 0x6044, 0x609c,
2746 0x60a8, 0x60ac,
2747 0x60b8, 0x60ec,
2748 0x6100, 0x6104,
2749 0x6118, 0x611c,
2750 0x6150, 0x6150,
2751 0x6180, 0x61b8,
2752 0x7700, 0x77a8,
2753 0x77b0, 0x7888,
2754 0x78cc, 0x7970,
2755 0x7b00, 0x7b00,
2756 0x7b08, 0x7b0c,
2757 0x7b24, 0x7b84,
2758 0x7b8c, 0x7c2c,
2759 0x7c34, 0x7c40,
2760 0x7c48, 0x7c68,
2761 0x7c70, 0x7c7c,
2762 0x7d00, 0x7ddc,
2763 0x7de4, 0x7e38,
2764 0x7e40, 0x7e44,
2765 0x7e4c, 0x7e74,
2766 0x7e80, 0x7ee0,
2767 0x7ee8, 0x7f0c,
2768 0x7f20, 0x7f5c,
2769 0x8dc0, 0x8de8,
2770 0x8df8, 0x8e04,
2771 0x8e10, 0x8e30,
2772 0x8e7c, 0x8ee8,
2773 0x8f88, 0x8f88,
2774 0x8f90, 0x8fb0,
2775 0x8fb8, 0x9058,
2776 0x9074, 0x90f8,
2777 0x9100, 0x912c,
2778 0x9138, 0x9188,
2779 0x9400, 0x9414,
2780 0x9430, 0x9440,
2781 0x9454, 0x9454,
2782 0x945c, 0x947c,
2783 0x9498, 0x94b8,
2784 0x9600, 0x9600,
2785 0x9608, 0x9638,
2786 0x9640, 0x9704,
2787 0x9710, 0x971c,
2788 0x9800, 0x9804,
2789 0x9854, 0x9854,
2790 0x9c00, 0x9c6c,
2791 0x9c80, 0x9cec,
2792 0x9d00, 0x9d6c,
2793 0x9d80, 0x9dec,
2794 0x9e00, 0x9e6c,
2795 0x9e80, 0x9eec,
2796 0x9f00, 0x9f6c,
2797 0x9f80, 0x9fec,
2798 0xa000, 0xa06c,
2799 0xa080, 0xa0ec,
2800 0xa100, 0xa16c,
2801 0xa180, 0xa1ec,
2802 0xa200, 0xa26c,
2803 0xa280, 0xa2ec,
2804 0xa300, 0xa36c,
2805 0xa380, 0xa458,
2806 0xa460, 0xa4f8,
2807 0xd000, 0xd03c,
2808 0xd100, 0xd134,
2809 0xd200, 0xd214,
2810 0xd220, 0xd234,
2811 0xd240, 0xd254,
2812 0xd260, 0xd274,
2813 0xd280, 0xd294,
2814 0xd2a0, 0xd2b4,
2815 0xd2c0, 0xd2d4,
2816 0xd2e0, 0xd2f4,
2817 0xd300, 0xd31c,
2818 0xdfc0, 0xdfe0,
2819 0xe000, 0xe00c,
2820 0xf000, 0xf008,
2821 0xf010, 0xf06c,
2822 0x11000, 0x11014,
2823 0x11048, 0x11120,
2824 0x11130, 0x11144,
2825 0x11174, 0x11178,
2826 0x11190, 0x111a0,
2827 0x111e4, 0x112f0,
2828 0x11300, 0x1133c,
2829 0x11408, 0x1146c,
2830 0x12000, 0x12004,
2831 0x12060, 0x122c4,
2832 0x19040, 0x1906c,
2833 0x19078, 0x19080,
2834 0x1908c, 0x190e8,
2835 0x190f0, 0x190f8,
2836 0x19100, 0x19110,
2837 0x19120, 0x19124,
2838 0x19150, 0x19194,
2839 0x1919c, 0x191a0,
2840 0x191ac, 0x191c8,
2841 0x191d0, 0x191e4,
2842 0x19250, 0x19250,
2843 0x19258, 0x19268,
2844 0x19278, 0x19278,
2845 0x19280, 0x192b0,
2846 0x192bc, 0x192f0,
2847 0x19300, 0x19308,
2848 0x19310, 0x19318,
2849 0x19320, 0x19328,
2850 0x19330, 0x19330,
2851 0x19348, 0x1934c,
2852 0x193f8, 0x19428,
2853 0x19430, 0x19444,
2854 0x1944c, 0x1946c,
2855 0x19474, 0x1947c,
2856 0x19488, 0x194cc,
2857 0x194f0, 0x194f8,
2858 0x19c00, 0x19c48,
2859 0x19c50, 0x19c80,
2860 0x19c94, 0x19c98,
2861 0x19ca0, 0x19cdc,
2862 0x19ce4, 0x19cf8,
2863 0x19d00, 0x19d30,
2864 0x19d50, 0x19d80,
2865 0x19d94, 0x19d98,
2866 0x19da0, 0x19de0,
2867 0x19df0, 0x19e10,
2868 0x19e50, 0x19e6c,
2869 0x19ea0, 0x19ebc,
2870 0x19ec4, 0x19ef4,
2871 0x19f04, 0x19f2c,
2872 0x19f34, 0x19f34,
2873 0x19f40, 0x19f50,
2874 0x19f90, 0x19fb4,
2875 0x19fbc, 0x19fbc,
2876 0x19fc4, 0x19fc8,
2877 0x19fd0, 0x19fe4,
2878 0x1a000, 0x1a004,
2879 0x1a010, 0x1a06c,
2880 0x1a0b0, 0x1a0e4,
2881 0x1a0ec, 0x1a108,
2882 0x1a114, 0x1a130,
2883 0x1a138, 0x1a1c4,
2884 0x1a1fc, 0x1a29c,
2885 0x1a2a8, 0x1a2b8,
2886 0x1a2c0, 0x1a388,
2887 0x1a398, 0x1a3ac,
2888 0x1e008, 0x1e00c,
2889 0x1e040, 0x1e044,
2890 0x1e04c, 0x1e04c,
2891 0x1e284, 0x1e290,
2892 0x1e2c0, 0x1e2c0,
2893 0x1e2e0, 0x1e2e4,
2894 0x1e300, 0x1e384,
2895 0x1e3c0, 0x1e3c8,
2896 0x1e408, 0x1e40c,
2897 0x1e440, 0x1e444,
2898 0x1e44c, 0x1e44c,
2899 0x1e684, 0x1e690,
2900 0x1e6c0, 0x1e6c0,
2901 0x1e6e0, 0x1e6e4,
2902 0x1e700, 0x1e784,
2903 0x1e7c0, 0x1e7c8,
2904 0x1e808, 0x1e80c,
2905 0x1e840, 0x1e844,
2906 0x1e84c, 0x1e84c,
2907 0x1ea84, 0x1ea90,
2908 0x1eac0, 0x1eac0,
2909 0x1eae0, 0x1eae4,
2910 0x1eb00, 0x1eb84,
2911 0x1ebc0, 0x1ebc8,
2912 0x1ec08, 0x1ec0c,
2913 0x1ec40, 0x1ec44,
2914 0x1ec4c, 0x1ec4c,
2915 0x1ee84, 0x1ee90,
2916 0x1eec0, 0x1eec0,
2917 0x1eee0, 0x1eee4,
2918 0x1ef00, 0x1ef84,
2919 0x1efc0, 0x1efc8,
2920 0x1f008, 0x1f00c,
2921 0x1f040, 0x1f044,
2922 0x1f04c, 0x1f04c,
2923 0x1f284, 0x1f290,
2924 0x1f2c0, 0x1f2c0,
2925 0x1f2e0, 0x1f2e4,
2926 0x1f300, 0x1f384,
2927 0x1f3c0, 0x1f3c8,
2928 0x1f408, 0x1f40c,
2929 0x1f440, 0x1f444,
2930 0x1f44c, 0x1f44c,
2931 0x1f684, 0x1f690,
2932 0x1f6c0, 0x1f6c0,
2933 0x1f6e0, 0x1f6e4,
2934 0x1f700, 0x1f784,
2935 0x1f7c0, 0x1f7c8,
2936 0x1f808, 0x1f80c,
2937 0x1f840, 0x1f844,
2938 0x1f84c, 0x1f84c,
2939 0x1fa84, 0x1fa90,
2940 0x1fac0, 0x1fac0,
2941 0x1fae0, 0x1fae4,
2942 0x1fb00, 0x1fb84,
2943 0x1fbc0, 0x1fbc8,
2944 0x1fc08, 0x1fc0c,
2945 0x1fc40, 0x1fc44,
2946 0x1fc4c, 0x1fc4c,
2947 0x1fe84, 0x1fe90,
2948 0x1fec0, 0x1fec0,
2949 0x1fee0, 0x1fee4,
2950 0x1ff00, 0x1ff84,
2951 0x1ffc0, 0x1ffc8,
2952 0x30000, 0x30038,
2953 0x30100, 0x3017c,
2954 0x30190, 0x301a0,
2955 0x301a8, 0x301b8,
2956 0x301c4, 0x301c8,
2957 0x301d0, 0x301e0,
2958 0x30200, 0x30344,
2959 0x30400, 0x304b4,
2960 0x304c0, 0x3052c,
2961 0x30540, 0x3065c,
2962 0x30800, 0x30848,
2963 0x30850, 0x308a8,
2964 0x308b8, 0x308c0,
2965 0x308cc, 0x308dc,
2966 0x30900, 0x30904,
2967 0x3090c, 0x30914,
2968 0x3091c, 0x30928,
2969 0x30930, 0x3093c,
2970 0x30944, 0x30948,
2971 0x30954, 0x30974,
2972 0x3097c, 0x30980,
2973 0x30a00, 0x30a20,
2974 0x30a38, 0x30a3c,
2975 0x30a50, 0x30a50,
2976 0x30a80, 0x30a80,
2977 0x30a88, 0x30aa8,
2978 0x30ab0, 0x30ab4,
2979 0x30ac8, 0x30ad4,
2980 0x30b28, 0x30b84,
2981 0x30b98, 0x30bb8,
2982 0x30c98, 0x30d14,
2983 0x31000, 0x31020,
2984 0x31038, 0x3103c,
2985 0x31050, 0x31050,
2986 0x31080, 0x31080,
2987 0x31088, 0x310a8,
2988 0x310b0, 0x310b4,
2989 0x310c8, 0x310d4,
2990 0x31128, 0x31184,
2991 0x31198, 0x311b8,
2992 0x32000, 0x32038,
2993 0x32100, 0x3217c,
2994 0x32190, 0x321a0,
2995 0x321a8, 0x321b8,
2996 0x321c4, 0x321c8,
2997 0x321d0, 0x321e0,
2998 0x32200, 0x32344,
2999 0x32400, 0x324b4,
3000 0x324c0, 0x3252c,
3001 0x32540, 0x3265c,
3002 0x32800, 0x32848,
3003 0x32850, 0x328a8,
3004 0x328b8, 0x328c0,
3005 0x328cc, 0x328dc,
3006 0x32900, 0x32904,
3007 0x3290c, 0x32914,
3008 0x3291c, 0x32928,
3009 0x32930, 0x3293c,
3010 0x32944, 0x32948,
3011 0x32954, 0x32974,
3012 0x3297c, 0x32980,
3013 0x32a00, 0x32a20,
3014 0x32a38, 0x32a3c,
3015 0x32a50, 0x32a50,
3016 0x32a80, 0x32a80,
3017 0x32a88, 0x32aa8,
3018 0x32ab0, 0x32ab4,
3019 0x32ac8, 0x32ad4,
3020 0x32b28, 0x32b84,
3021 0x32b98, 0x32bb8,
3022 0x32c98, 0x32d14,
3023 0x33000, 0x33020,
3024 0x33038, 0x3303c,
3025 0x33050, 0x33050,
3026 0x33080, 0x33080,
3027 0x33088, 0x330a8,
3028 0x330b0, 0x330b4,
3029 0x330c8, 0x330d4,
3030 0x33128, 0x33184,
3031 0x33198, 0x331b8,
3032 0x34000, 0x34038,
3033 0x34100, 0x3417c,
3034 0x34190, 0x341a0,
3035 0x341a8, 0x341b8,
3036 0x341c4, 0x341c8,
3037 0x341d0, 0x341e0,
3038 0x34200, 0x34344,
3039 0x34400, 0x344b4,
3040 0x344c0, 0x3452c,
3041 0x34540, 0x3465c,
3042 0x34800, 0x34848,
3043 0x34850, 0x348a8,
3044 0x348b8, 0x348c0,
3045 0x348cc, 0x348dc,
3046 0x34900, 0x34904,
3047 0x3490c, 0x34914,
3048 0x3491c, 0x34928,
3049 0x34930, 0x3493c,
3050 0x34944, 0x34948,
3051 0x34954, 0x34974,
3052 0x3497c, 0x34980,
3053 0x34a00, 0x34a20,
3054 0x34a38, 0x34a3c,
3055 0x34a50, 0x34a50,
3056 0x34a80, 0x34a80,
3057 0x34a88, 0x34aa8,
3058 0x34ab0, 0x34ab4,
3059 0x34ac8, 0x34ad4,
3060 0x34b28, 0x34b84,
3061 0x34b98, 0x34bb8,
3062 0x34c98, 0x34d14,
3063 0x35000, 0x35020,
3064 0x35038, 0x3503c,
3065 0x35050, 0x35050,
3066 0x35080, 0x35080,
3067 0x35088, 0x350a8,
3068 0x350b0, 0x350b4,
3069 0x350c8, 0x350d4,
3070 0x35128, 0x35184,
3071 0x35198, 0x351b8,
3072 0x36000, 0x36038,
3073 0x36100, 0x3617c,
3074 0x36190, 0x361a0,
3075 0x361a8, 0x361b8,
3076 0x361c4, 0x361c8,
3077 0x361d0, 0x361e0,
3078 0x36200, 0x36344,
3079 0x36400, 0x364b4,
3080 0x364c0, 0x3652c,
3081 0x36540, 0x3665c,
3082 0x36800, 0x36848,
3083 0x36850, 0x368a8,
3084 0x368b8, 0x368c0,
3085 0x368cc, 0x368dc,
3086 0x36900, 0x36904,
3087 0x3690c, 0x36914,
3088 0x3691c, 0x36928,
3089 0x36930, 0x3693c,
3090 0x36944, 0x36948,
3091 0x36954, 0x36974,
3092 0x3697c, 0x36980,
3093 0x36a00, 0x36a20,
3094 0x36a38, 0x36a3c,
3095 0x36a50, 0x36a50,
3096 0x36a80, 0x36a80,
3097 0x36a88, 0x36aa8,
3098 0x36ab0, 0x36ab4,
3099 0x36ac8, 0x36ad4,
3100 0x36b28, 0x36b84,
3101 0x36b98, 0x36bb8,
3102 0x36c98, 0x36d14,
3103 0x37000, 0x37020,
3104 0x37038, 0x3703c,
3105 0x37050, 0x37050,
3106 0x37080, 0x37080,
3107 0x37088, 0x370a8,
3108 0x370b0, 0x370b4,
3109 0x370c8, 0x370d4,
3110 0x37128, 0x37184,
3111 0x37198, 0x371b8,
3112 0x38000, 0x380b0,
3113 0x380b8, 0x38130,
3114 0x38140, 0x38140,
3115 0x38150, 0x38154,
3116 0x38160, 0x381c4,
3117 0x381d0, 0x38204,
3118 0x3820c, 0x38214,
3119 0x3821c, 0x3822c,
3120 0x38244, 0x38244,
3121 0x38254, 0x38274,
3122 0x3827c, 0x38280,
3123 0x38300, 0x38304,
3124 0x3830c, 0x38314,
3125 0x3831c, 0x3832c,
3126 0x38344, 0x38344,
3127 0x38354, 0x38374,
3128 0x3837c, 0x38380,
3129 0x38400, 0x38424,
3130 0x38438, 0x3843c,
3131 0x38480, 0x38480,
3132 0x384a8, 0x384a8,
3133 0x384b0, 0x384b4,
3134 0x384c8, 0x38514,
3135 0x38600, 0x3860c,
3136 0x3861c, 0x38624,
3137 0x38900, 0x38924,
3138 0x38938, 0x3893c,
3139 0x38980, 0x38980,
3140 0x389a8, 0x389a8,
3141 0x389b0, 0x389b4,
3142 0x389c8, 0x38a14,
3143 0x38b00, 0x38b0c,
3144 0x38b1c, 0x38b24,
3145 0x38e00, 0x38e00,
3146 0x38e18, 0x38e20,
3147 0x38e38, 0x38e40,
3148 0x38e58, 0x38e60,
3149 0x38e78, 0x38e80,
3150 0x38e98, 0x38ea0,
3151 0x38eb8, 0x38ec0,
3152 0x38ed8, 0x38ee0,
3153 0x38ef8, 0x38f08,
3154 0x38f10, 0x38f2c,
3155 0x38f80, 0x38ffc,
3156 0x39080, 0x39080,
3157 0x39088, 0x39090,
3158 0x39100, 0x39108,
3159 0x39120, 0x39128,
3160 0x39140, 0x39148,
3161 0x39160, 0x39168,
3162 0x39180, 0x39188,
3163 0x391a0, 0x391a8,
3164 0x391c0, 0x391c8,
3165 0x391e0, 0x391e8,
3166 0x39200, 0x39200,
3167 0x39208, 0x39240,
3168 0x39300, 0x39300,
3169 0x39308, 0x39340,
3170 0x39400, 0x39400,
3171 0x39408, 0x39440,
3172 0x39500, 0x39500,
3173 0x39508, 0x39540,
3174 0x39600, 0x39600,
3175 0x39608, 0x39640,
3176 0x39700, 0x39700,
3177 0x39708, 0x39740,
3178 0x39800, 0x39800,
3179 0x39808, 0x39840,
3180 0x39900, 0x39900,
3181 0x39908, 0x39940,
3182 0x39a00, 0x39a04,
3183 0x39a10, 0x39a14,
3184 0x39a1c, 0x39aa8,
3185 0x39b00, 0x39ecc,
3186 0x3a000, 0x3a004,
3187 0x3a050, 0x3a084,
3188 0x3a090, 0x3a09c,
3189 0x3a93c, 0x3a93c,
3190 0x3b93c, 0x3b93c,
3191 0x3c93c, 0x3c93c,
3192 0x3d93c, 0x3d93c,
3193 0x3e000, 0x3e020,
3194 0x3e03c, 0x3e05c,
3195 0x3e100, 0x3e120,
3196 0x3e13c, 0x3e15c,
3197 0x3e200, 0x3e220,
3198 0x3e23c, 0x3e25c,
3199 0x3e300, 0x3e320,
3200 0x3e33c, 0x3e35c,
3201 0x3f000, 0x3f034,
3202 0x3f100, 0x3f130,
3203 0x3f200, 0x3f218,
3204 0x44000, 0x44014,
3205 0x44020, 0x44028,
3206 0x44030, 0x44030,
3207 0x44100, 0x44114,
3208 0x44120, 0x44128,
3209 0x44130, 0x44130,
3210 0x44200, 0x44214,
3211 0x44220, 0x44228,
3212 0x44230, 0x44230,
3213 0x44300, 0x44314,
3214 0x44320, 0x44328,
3215 0x44330, 0x44330,
3216 0x44400, 0x44414,
3217 0x44420, 0x44428,
3218 0x44430, 0x44430,
3219 0x44500, 0x44514,
3220 0x44520, 0x44528,
3221 0x44530, 0x44530,
3222 0x44714, 0x44718,
3223 0x44730, 0x44730,
3224 0x447c0, 0x447c0,
3225 0x447f0, 0x447f0,
3226 0x447f8, 0x447fc,
3227 0x45000, 0x45014,
3228 0x45020, 0x45028,
3229 0x45030, 0x45030,
3230 0x45100, 0x45114,
3231 0x45120, 0x45128,
3232 0x45130, 0x45130,
3233 0x45200, 0x45214,
3234 0x45220, 0x45228,
3235 0x45230, 0x45230,
3236 0x45300, 0x45314,
3237 0x45320, 0x45328,
3238 0x45330, 0x45330,
3239 0x45400, 0x45414,
3240 0x45420, 0x45428,
3241 0x45430, 0x45430,
3242 0x45500, 0x45514,
3243 0x45520, 0x45528,
3244 0x45530, 0x45530,
3245 0x45714, 0x45718,
3246 0x45730, 0x45730,
3247 0x457c0, 0x457c0,
3248 0x457f0, 0x457f0,
3249 0x457f8, 0x457fc,
3250 0x46000, 0x46010,
3251 0x46020, 0x46034,
3252 0x46040, 0x46050,
3253 0x46060, 0x46088,
3254 0x47000, 0x4709c,
3255 0x470c0, 0x470d4,
3256 0x47100, 0x471a8,
3257 0x471b0, 0x471e8,
3258 0x47200, 0x47210,
3259 0x4721c, 0x47230,
3260 0x47238, 0x47238,
3261 0x47240, 0x472ac,
3262 0x472d0, 0x472f4,
3263 0x47300, 0x47310,
3264 0x47318, 0x47348,
3265 0x47350, 0x47354,
3266 0x47380, 0x47388,
3267 0x47390, 0x47394,
3268 0x47400, 0x47448,
3269 0x47450, 0x47458,
3270 0x47500, 0x4751c,
3271 0x47530, 0x4754c,
3272 0x47560, 0x4757c,
3273 0x47590, 0x475ac,
3274 0x47600, 0x47630,
3275 0x47640, 0x47644,
3276 0x47660, 0x4769c,
3277 0x47700, 0x47710,
3278 0x47740, 0x47750,
3279 0x4775c, 0x4779c,
3280 0x477b0, 0x477bc,
3281 0x477c4, 0x477c8,
3282 0x477d4, 0x477fc,
3283 0x48000, 0x48004,
3284 0x48018, 0x4801c,
3285 0x49304, 0x49320,
3286 0x4932c, 0x4932c,
3287 0x49334, 0x493f0,
3288 0x49400, 0x49410,
3289 0x49460, 0x494f4,
3290 0x50000, 0x50084,
3291 0x50090, 0x500cc,
3292 0x50300, 0x50384,
3293 0x50400, 0x50404,
3294 0x50800, 0x50884,
3295 0x50890, 0x508cc,
3296 0x50b00, 0x50b84,
3297 0x50c00, 0x50c04,
3298 0x51000, 0x51020,
3299 0x51028, 0x510c4,
3300 0x51104, 0x51108,
3301 0x51200, 0x51274,
3302 0x51300, 0x51324,
3303 0x51400, 0x51548,
3304 0x51550, 0x51554,
3305 0x5155c, 0x51584,
3306 0x5158c, 0x515c8,
3307 0x515f0, 0x515f4,
3308 0x58000, 0x58004,
3309 0x58018, 0x5801c,
3310 0x59304, 0x59320,
3311 0x5932c, 0x5932c,
3312 0x59334, 0x593f0,
3313 0x59400, 0x59410,
3314 0x59460, 0x594f4,
3315 };
3316
3317 u32 *buf_end = (u32 *)(buf + buf_size);
3318 const unsigned int *reg_ranges;
3319 int reg_ranges_size, range;
3320 unsigned int chip_version = chip_id(adap);
3321
3322 /*
3323 * Select the right set of register ranges to dump depending on the
3324 * adapter chip type.
3325 */
3326 switch (chip_version) {
3327 case CHELSIO_T4:
3328 if (adap->flags & IS_VF) {
3329 reg_ranges = t4vf_reg_ranges;
3330 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
3331 } else {
3332 reg_ranges = t4_reg_ranges;
3333 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
3334 }
3335 break;
3336
3337 case CHELSIO_T5:
3338 if (adap->flags & IS_VF) {
3339 reg_ranges = t5vf_reg_ranges;
3340 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
3341 } else {
3342 reg_ranges = t5_reg_ranges;
3343 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
3344 }
3345 break;
3346
3347 case CHELSIO_T6:
3348 if (adap->flags & IS_VF) {
3349 reg_ranges = t6vf_reg_ranges;
3350 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
3351 } else {
3352 reg_ranges = t6_reg_ranges;
3353 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
3354 }
3355 break;
3356
3357 case CHELSIO_T7:
3358 if (adap->flags & IS_VF) {
3359 reg_ranges = t6vf_reg_ranges;
3360 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
3361 } else {
3362 reg_ranges = t7_reg_ranges;
3363 reg_ranges_size = ARRAY_SIZE(t7_reg_ranges);
3364 }
3365 break;
3366
3367 default:
3368 CH_ERR(adap,
3369 "Unsupported chip version %d\n", chip_version);
3370 return;
3371 }
3372
3373 /*
3374 * Clear the register buffer and insert the appropriate register
3375 * values selected by the above register ranges.
3376 */
3377 memset(buf, 0, buf_size);
3378 for (range = 0; range < reg_ranges_size; range += 2) {
3379 unsigned int reg = reg_ranges[range];
3380 unsigned int last_reg = reg_ranges[range + 1];
3381 u32 *bufp = (u32 *)(buf + reg);
3382
3383 /*
3384 * Iterate across the register range filling in the register
3385 * buffer but don't write past the end of the register buffer.
3386 */
3387 while (reg <= last_reg && bufp < buf_end) {
3388 *bufp++ = t4_read_reg(adap, reg);
3389 reg += sizeof(u32);
3390 }
3391 }
3392 }
3393
3394 /*
3395 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID
3396 * header followed by one or more VPD-R sections, each with its own header.
3397 */
3398 struct t4_vpd_hdr {
3399 u8 id_tag;
3400 u8 id_len[2];
3401 u8 id_data[ID_LEN];
3402 };
3403
3404 struct t4_vpdr_hdr {
3405 u8 vpdr_tag;
3406 u8 vpdr_len[2];
3407 };
3408
3409 /*
3410 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
3411 */
3412 #define EEPROM_DELAY 10 /* 10us per poll spin */
3413 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
3414
3415 #define EEPROM_STAT_ADDR 0x7bfc
3416 #define VPD_SIZE 0x800
3417 #define VPD_BASE 0x400
3418 #define VPD_BASE_OLD 0
3419 #define VPD_LEN 1024
3420 #define VPD_INFO_FLD_HDR_SIZE 3
3421 #define CHELSIO_VPD_UNIQUE_ID 0x82
3422
3423 /*
3424 * Small utility function to wait till any outstanding VPD Access is complete.
3425 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
3426 * VPD Access in flight. This allows us to handle the problem of having a
3427 * previous VPD Access time out and prevent an attempt to inject a new VPD
3428 * Request before any in-flight VPD reguest has completed.
3429 */
t4_seeprom_wait(struct adapter * adapter)3430 static int t4_seeprom_wait(struct adapter *adapter)
3431 {
3432 unsigned int base = adapter->params.pci.vpd_cap_addr;
3433 int max_poll;
3434
3435 /*
3436 * If no VPD Access is in flight, we can just return success right
3437 * away.
3438 */
3439 if (!adapter->vpd_busy)
3440 return 0;
3441
3442 /*
3443 * Poll the VPD Capability Address/Flag register waiting for it
3444 * to indicate that the operation is complete.
3445 */
3446 max_poll = EEPROM_MAX_POLL;
3447 do {
3448 u16 val;
3449
3450 udelay(EEPROM_DELAY);
3451 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
3452
3453 /*
3454 * If the operation is complete, mark the VPD as no longer
3455 * busy and return success.
3456 */
3457 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
3458 adapter->vpd_busy = 0;
3459 return 0;
3460 }
3461 } while (--max_poll);
3462
3463 /*
3464 * Failure! Note that we leave the VPD Busy status set in order to
3465 * avoid pushing a new VPD Access request into the VPD Capability till
3466 * the current operation eventually succeeds. It's a bug to issue a
3467 * new request when an existing request is in flight and will result
3468 * in corrupt hardware state.
3469 */
3470 return -ETIMEDOUT;
3471 }
3472
3473 /**
3474 * t4_seeprom_read - read a serial EEPROM location
3475 * @adapter: adapter to read
3476 * @addr: EEPROM virtual address
3477 * @data: where to store the read data
3478 *
3479 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
3480 * VPD capability. Note that this function must be called with a virtual
3481 * address.
3482 */
t4_seeprom_read(struct adapter * adapter,u32 addr,u32 * data)3483 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3484 {
3485 unsigned int base = adapter->params.pci.vpd_cap_addr;
3486 int ret;
3487
3488 /*
3489 * VPD Accesses must alway be 4-byte aligned!
3490 */
3491 if (addr >= EEPROMVSIZE || (addr & 3))
3492 return -EINVAL;
3493
3494 /*
3495 * Wait for any previous operation which may still be in flight to
3496 * complete.
3497 */
3498 ret = t4_seeprom_wait(adapter);
3499 if (ret) {
3500 CH_ERR(adapter, "VPD still busy from previous operation\n");
3501 return ret;
3502 }
3503
3504 /*
3505 * Issue our new VPD Read request, mark the VPD as being busy and wait
3506 * for our request to complete. If it doesn't complete, note the
3507 * error and return it to our caller. Note that we do not reset the
3508 * VPD Busy status!
3509 */
3510 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3511 adapter->vpd_busy = 1;
3512 adapter->vpd_flag = PCI_VPD_ADDR_F;
3513 ret = t4_seeprom_wait(adapter);
3514 if (ret) {
3515 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3516 return ret;
3517 }
3518
3519 /*
3520 * Grab the returned data, swizzle it into our endianness and
3521 * return success.
3522 */
3523 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3524 *data = le32_to_cpu(*data);
3525 return 0;
3526 }
3527
3528 /**
3529 * t4_seeprom_write - write a serial EEPROM location
3530 * @adapter: adapter to write
3531 * @addr: virtual EEPROM address
3532 * @data: value to write
3533 *
3534 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
3535 * VPD capability. Note that this function must be called with a virtual
3536 * address.
3537 */
t4_seeprom_write(struct adapter * adapter,u32 addr,u32 data)3538 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3539 {
3540 unsigned int base = adapter->params.pci.vpd_cap_addr;
3541 int ret;
3542 u32 stats_reg;
3543 int max_poll;
3544
3545 /*
3546 * VPD Accesses must alway be 4-byte aligned!
3547 */
3548 if (addr >= EEPROMVSIZE || (addr & 3))
3549 return -EINVAL;
3550
3551 /*
3552 * Wait for any previous operation which may still be in flight to
3553 * complete.
3554 */
3555 ret = t4_seeprom_wait(adapter);
3556 if (ret) {
3557 CH_ERR(adapter, "VPD still busy from previous operation\n");
3558 return ret;
3559 }
3560
3561 /*
3562 * Issue our new VPD Read request, mark the VPD as being busy and wait
3563 * for our request to complete. If it doesn't complete, note the
3564 * error and return it to our caller. Note that we do not reset the
3565 * VPD Busy status!
3566 */
3567 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3568 cpu_to_le32(data));
3569 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3570 (u16)addr | PCI_VPD_ADDR_F);
3571 adapter->vpd_busy = 1;
3572 adapter->vpd_flag = 0;
3573 ret = t4_seeprom_wait(adapter);
3574 if (ret) {
3575 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3576 return ret;
3577 }
3578
3579 /*
3580 * Reset PCI_VPD_DATA register after a transaction and wait for our
3581 * request to complete. If it doesn't complete, return error.
3582 */
3583 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3584 max_poll = EEPROM_MAX_POLL;
3585 do {
3586 udelay(EEPROM_DELAY);
3587 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3588 } while ((stats_reg & 0x1) && --max_poll);
3589 if (!max_poll)
3590 return -ETIMEDOUT;
3591
3592 /* Return success! */
3593 return 0;
3594 }
3595
3596 /**
3597 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
3598 * @phys_addr: the physical EEPROM address
3599 * @fn: the PCI function number
3600 * @sz: size of function-specific area
3601 *
3602 * Translate a physical EEPROM address to virtual. The first 1K is
3603 * accessed through virtual addresses starting at 31K, the rest is
3604 * accessed through virtual addresses starting at 0.
3605 *
3606 * The mapping is as follows:
3607 * [0..1K) -> [31K..32K)
3608 * [1K..1K+A) -> [ES-A..ES)
3609 * [1K+A..ES) -> [0..ES-A-1K)
3610 *
3611 * where A = @fn * @sz, and ES = EEPROM size.
3612 */
t4_eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)3613 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3614 {
3615 fn *= sz;
3616 if (phys_addr < 1024)
3617 return phys_addr + (31 << 10);
3618 if (phys_addr < 1024 + fn)
3619 return EEPROMSIZE - fn + phys_addr - 1024;
3620 if (phys_addr < EEPROMSIZE)
3621 return phys_addr - 1024 - fn;
3622 return -EINVAL;
3623 }
3624
3625 /**
3626 * t4_seeprom_wp - enable/disable EEPROM write protection
3627 * @adapter: the adapter
3628 * @enable: whether to enable or disable write protection
3629 *
3630 * Enables or disables write protection on the serial EEPROM.
3631 */
t4_seeprom_wp(struct adapter * adapter,int enable)3632 int t4_seeprom_wp(struct adapter *adapter, int enable)
3633 {
3634 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3635 }
3636
3637 /**
3638 * get_vpd_keyword_val - Locates an information field keyword in the VPD
3639 * @vpd: Pointer to buffered vpd data structure
3640 * @kw: The keyword to search for
3641 * @region: VPD region to search (starting from 0)
3642 *
3643 * Returns the value of the information field keyword or
3644 * -ENOENT otherwise.
3645 */
get_vpd_keyword_val(const u8 * vpd,const char * kw,int region)3646 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
3647 {
3648 int i, tag;
3649 unsigned int offset, len;
3650 const struct t4_vpdr_hdr *vpdr;
3651
3652 offset = sizeof(struct t4_vpd_hdr);
3653 vpdr = (const void *)(vpd + offset);
3654 tag = vpdr->vpdr_tag;
3655 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
3656 while (region--) {
3657 offset += sizeof(struct t4_vpdr_hdr) + len;
3658 vpdr = (const void *)(vpd + offset);
3659 if (++tag != vpdr->vpdr_tag)
3660 return -ENOENT;
3661 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
3662 }
3663 offset += sizeof(struct t4_vpdr_hdr);
3664
3665 if (offset + len > VPD_LEN) {
3666 return -ENOENT;
3667 }
3668
3669 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3670 if (memcmp(vpd + i , kw , 2) == 0){
3671 i += VPD_INFO_FLD_HDR_SIZE;
3672 return i;
3673 }
3674
3675 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
3676 }
3677
3678 return -ENOENT;
3679 }
3680
3681
3682 /**
3683 * get_vpd_params - read VPD parameters from VPD EEPROM
3684 * @adapter: adapter to read
3685 * @p: where to store the parameters
3686 * @vpd: caller provided temporary space to read the VPD into
3687 *
3688 * Reads card parameters stored in VPD EEPROM.
3689 */
get_vpd_params(struct adapter * adapter,struct vpd_params * p,uint16_t device_id,u32 * buf)3690 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
3691 uint16_t device_id, u32 *buf)
3692 {
3693 int i, ret, addr;
3694 int ec, sn, pn, na, md;
3695 u8 csum;
3696 const u8 *vpd = (const u8 *)buf;
3697
3698 /*
3699 * Card information normally starts at VPD_BASE but early cards had
3700 * it at 0.
3701 */
3702 ret = t4_seeprom_read(adapter, VPD_BASE, buf);
3703 if (ret)
3704 return (ret);
3705
3706 /*
3707 * The VPD shall have a unique identifier specified by the PCI SIG.
3708 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3709 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3710 * is expected to automatically put this entry at the
3711 * beginning of the VPD.
3712 */
3713 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3714
3715 for (i = 0; i < VPD_LEN; i += 4) {
3716 ret = t4_seeprom_read(adapter, addr + i, buf++);
3717 if (ret)
3718 return ret;
3719 }
3720
3721 #define FIND_VPD_KW(var,name) do { \
3722 var = get_vpd_keyword_val(vpd, name, 0); \
3723 if (var < 0) { \
3724 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3725 return -EINVAL; \
3726 } \
3727 } while (0)
3728
3729 FIND_VPD_KW(i, "RV");
3730 for (csum = 0; i >= 0; i--)
3731 csum += vpd[i];
3732
3733 if (csum) {
3734 CH_ERR(adapter,
3735 "corrupted VPD EEPROM, actual csum %u\n", csum);
3736 return -EINVAL;
3737 }
3738
3739 FIND_VPD_KW(ec, "EC");
3740 FIND_VPD_KW(sn, "SN");
3741 FIND_VPD_KW(pn, "PN");
3742 FIND_VPD_KW(na, "NA");
3743 #undef FIND_VPD_KW
3744
3745 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
3746 strstrip(p->id);
3747 memcpy(p->ec, vpd + ec, EC_LEN);
3748 strstrip(p->ec);
3749 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3750 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3751 strstrip(p->sn);
3752 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3753 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3754 strstrip((char *)p->pn);
3755 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3756 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3757 strstrip((char *)p->na);
3758
3759 if (device_id & 0x80)
3760 return 0; /* Custom card */
3761
3762 md = get_vpd_keyword_val(vpd, "VF", 1);
3763 if (md < 0) {
3764 snprintf(p->md, sizeof(p->md), "unknown");
3765 } else {
3766 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
3767 memcpy(p->md, vpd + md, min(i, MD_LEN));
3768 strstrip((char *)p->md);
3769 }
3770
3771 return 0;
3772 }
3773
3774 /* Flash Layout {start sector, # of sectors} for T4/T5/T6 adapters */
3775 static const struct t4_flash_loc_entry t4_flash_loc_arr[] = {
3776 [FLASH_LOC_EXP_ROM] = { 0, 6 },
3777 [FLASH_LOC_IBFT] = { 6, 1 },
3778 [FLASH_LOC_BOOTCFG] = { 7, 1 },
3779 [FLASH_LOC_FW] = { 8, 16 },
3780 [FLASH_LOC_FWBOOTSTRAP] = { 27, 1 },
3781 [FLASH_LOC_ISCSI_CRASH] = { 29, 1 },
3782 [FLASH_LOC_FCOE_CRASH] = { 30, 1 },
3783 [FLASH_LOC_CFG] = { 31, 1 },
3784 [FLASH_LOC_CUDBG] = { 32, 32 },
3785 [FLASH_LOC_BOOT_AREA] = { 0, 8 }, /* Spans complete Boot Area */
3786 [FLASH_LOC_END] = { 64, 0 },
3787 };
3788
3789 /* Flash Layout {start sector, # of sectors} for T7 adapters */
3790 static const struct t4_flash_loc_entry t7_flash_loc_arr[] = {
3791 [FLASH_LOC_VPD] = { 0, 1 },
3792 [FLASH_LOC_FWBOOTSTRAP] = { 1, 1 },
3793 [FLASH_LOC_FW] = { 2, 29 },
3794 [FLASH_LOC_CFG] = { 31, 1 },
3795 [FLASH_LOC_EXP_ROM] = { 32, 15 },
3796 [FLASH_LOC_IBFT] = { 47, 1 },
3797 [FLASH_LOC_BOOTCFG] = { 48, 1 },
3798 [FLASH_LOC_DPU_BOOT] = { 49, 13 },
3799 [FLASH_LOC_ISCSI_CRASH] = { 62, 1 },
3800 [FLASH_LOC_FCOE_CRASH] = { 63, 1 },
3801 [FLASH_LOC_VPD_BACKUP] = { 64, 1 },
3802 [FLASH_LOC_FWBOOTSTRAP_BACKUP] = { 65, 1 },
3803 [FLASH_LOC_FW_BACKUP] = { 66, 29 },
3804 [FLASH_LOC_CFG_BACK] = { 95, 1 },
3805 [FLASH_LOC_CUDBG] = { 96, 48 },
3806 [FLASH_LOC_CHIP_DUMP] = { 144, 48 },
3807 [FLASH_LOC_DPU_AREA] = { 192, 64 },
3808 [FLASH_LOC_BOOT_AREA] = { 32, 17 }, /* Spans complete UEFI/PXE Boot Area */
3809 [FLASH_LOC_END] = { 256, 0 },
3810 };
3811
3812 int
t4_flash_loc_start(struct adapter * adap,enum t4_flash_loc loc,unsigned int * lenp)3813 t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
3814 unsigned int *lenp)
3815 {
3816 const struct t4_flash_loc_entry *l = chip_id(adap) >= CHELSIO_T7 ?
3817 &t7_flash_loc_arr[loc] : &t4_flash_loc_arr[loc];
3818
3819 if (lenp != NULL)
3820 *lenp = FLASH_MAX_SIZE(l->nsecs);
3821 return (FLASH_START(l->start_sec));
3822 }
3823
3824 /* serial flash and firmware constants and flash config file constants */
3825 enum {
3826 SF_ATTEMPTS = 10, /* max retries for SF operations */
3827
3828 /* flash command opcodes */
3829 SF_PROG_PAGE = 2, /* program 256B page */
3830 SF_WR_DISABLE = 4, /* disable writes */
3831 SF_RD_STATUS = 5, /* read status register */
3832 SF_WR_ENABLE = 6, /* enable writes */
3833 SF_RD_DATA_FAST = 0xb, /* read flash */
3834 SF_RD_ID = 0x9f, /* read ID */
3835 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
3836 };
3837
3838 /**
3839 * sf1_read - read data from the serial flash
3840 * @adapter: the adapter
3841 * @byte_cnt: number of bytes to read
3842 * @cont: whether another operation will be chained
3843 * @lock: whether to lock SF for PL access only
3844 * @valp: where to store the read data
3845 *
3846 * Reads up to 4 bytes of data from the serial flash. The location of
3847 * the read needs to be specified prior to calling this by issuing the
3848 * appropriate commands to the serial flash.
3849 */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)3850 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3851 int lock, u32 *valp)
3852 {
3853 int ret;
3854 uint32_t op;
3855
3856 if (!byte_cnt || byte_cnt > 4)
3857 return -EINVAL;
3858 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3859 return -EBUSY;
3860 op = V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1);
3861 if (chip_id(adapter) >= CHELSIO_T7)
3862 op |= F_QUADREADDISABLE;
3863 t4_write_reg(adapter, A_SF_OP, op);
3864 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3865 if (!ret)
3866 *valp = t4_read_reg(adapter, A_SF_DATA);
3867 return ret;
3868 }
3869
3870 /**
3871 * sf1_write - write data to the serial flash
3872 * @adapter: the adapter
3873 * @byte_cnt: number of bytes to write
3874 * @cont: whether another operation will be chained
3875 * @lock: whether to lock SF for PL access only
3876 * @val: value to write
3877 *
3878 * Writes up to 4 bytes of data to the serial flash. The location of
3879 * the write needs to be specified prior to calling this by issuing the
3880 * appropriate commands to the serial flash.
3881 */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)3882 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3883 int lock, u32 val)
3884 {
3885 if (!byte_cnt || byte_cnt > 4)
3886 return -EINVAL;
3887 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3888 return -EBUSY;
3889 t4_write_reg(adapter, A_SF_DATA, val);
3890 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3891 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3892 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3893 }
3894
3895 /**
3896 * flash_wait_op - wait for a flash operation to complete
3897 * @adapter: the adapter
3898 * @attempts: max number of polls of the status register
3899 * @delay: delay between polls in ms
3900 *
3901 * Wait for a flash operation to complete by polling the status register.
3902 */
flash_wait_op(struct adapter * adapter,int attempts,int delay)3903 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3904 {
3905 int ret;
3906 u32 status;
3907
3908 while (1) {
3909 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3910 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3911 return ret;
3912 if (!(status & 1))
3913 return 0;
3914 if (--attempts == 0)
3915 return -EAGAIN;
3916 if (delay)
3917 msleep(delay);
3918 }
3919 }
3920
3921 /**
3922 * t4_read_flash - read words from serial flash
3923 * @adapter: the adapter
3924 * @addr: the start address for the read
3925 * @nwords: how many 32-bit words to read
3926 * @data: where to store the read data
3927 * @byte_oriented: whether to store data as bytes or as words
3928 *
3929 * Read the specified number of 32-bit words from the serial flash.
3930 * If @byte_oriented is set the read data is stored as a byte array
3931 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3932 * natural endianness.
3933 */
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)3934 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3935 unsigned int nwords, u32 *data, int byte_oriented)
3936 {
3937 int ret;
3938
3939 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3940 return -EINVAL;
3941
3942 addr = swab32(addr) | SF_RD_DATA_FAST;
3943
3944 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3945 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3946 return ret;
3947
3948 for ( ; nwords; nwords--, data++) {
3949 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3950 if (nwords == 1)
3951 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3952 if (ret)
3953 return ret;
3954 if (byte_oriented)
3955 *data = (__force __u32)(cpu_to_be32(*data));
3956 }
3957 return 0;
3958 }
3959
3960 /**
3961 * t4_write_flash - write up to a page of data to the serial flash
3962 * @adapter: the adapter
3963 * @addr: the start address to write
3964 * @n: length of data to write in bytes
3965 * @data: the data to write
3966 * @byte_oriented: whether to store data as bytes or as words
3967 *
3968 * Writes up to a page of data (256 bytes) to the serial flash starting
3969 * at the given address. All the data must be written to the same page.
3970 * If @byte_oriented is set the write data is stored as byte stream
3971 * (i.e. matches what on disk), otherwise in big-endian.
3972 */
t4_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data,int byte_oriented)3973 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3974 unsigned int n, const u8 *data, int byte_oriented)
3975 {
3976 int ret;
3977 u32 buf[SF_PAGE_SIZE / 4];
3978 unsigned int i, c, left, val, offset = addr & 0xff;
3979
3980 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3981 return -EINVAL;
3982
3983 val = swab32(addr) | SF_PROG_PAGE;
3984
3985 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3986 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3987 goto unlock;
3988
3989 for (left = n; left; left -= c) {
3990 c = min(left, 4U);
3991 for (val = 0, i = 0; i < c; ++i)
3992 val = (val << 8) + *data++;
3993
3994 if (!byte_oriented)
3995 val = cpu_to_be32(val);
3996
3997 ret = sf1_write(adapter, c, c != left, 1, val);
3998 if (ret)
3999 goto unlock;
4000 }
4001 ret = flash_wait_op(adapter, 8, 1);
4002 if (ret)
4003 goto unlock;
4004
4005 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4006
4007 /* Read the page to verify the write succeeded */
4008 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
4009 byte_oriented);
4010 if (ret)
4011 return ret;
4012
4013 if (memcmp(data - n, (u8 *)buf + offset, n)) {
4014 CH_ERR(adapter,
4015 "failed to correctly write the flash page at %#x\n",
4016 addr);
4017 return -EIO;
4018 }
4019 return 0;
4020
4021 unlock:
4022 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4023 return ret;
4024 }
4025
4026 /**
4027 * t4_get_fw_version - read the firmware version
4028 * @adapter: the adapter
4029 * @vers: where to place the version
4030 *
4031 * Reads the FW version from flash.
4032 */
t4_get_fw_version(struct adapter * adapter,u32 * vers)4033 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
4034 {
4035 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
4036
4037 return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
4038 1, vers, 0);
4039 }
4040
4041 /**
4042 * t4_get_fw_hdr - read the firmware header
4043 * @adapter: the adapter
4044 * @hdr: where to place the version
4045 *
4046 * Reads the FW header from flash into caller provided buffer.
4047 */
t4_get_fw_hdr(struct adapter * adapter,struct fw_hdr * hdr)4048 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
4049 {
4050 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
4051
4052 return t4_read_flash(adapter, start, sizeof (*hdr) / sizeof (uint32_t),
4053 (uint32_t *)hdr, 1);
4054 }
4055
4056 /**
4057 * t4_get_bs_version - read the firmware bootstrap version
4058 * @adapter: the adapter
4059 * @vers: where to place the version
4060 *
4061 * Reads the FW Bootstrap version from flash.
4062 */
t4_get_bs_version(struct adapter * adapter,u32 * vers)4063 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
4064 {
4065 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FWBOOTSTRAP,
4066 NULL);
4067
4068 return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
4069 1, vers, 0);
4070 }
4071
4072 /**
4073 * t4_get_tp_version - read the TP microcode version
4074 * @adapter: the adapter
4075 * @vers: where to place the version
4076 *
4077 * Reads the TP microcode version from flash.
4078 */
t4_get_tp_version(struct adapter * adapter,u32 * vers)4079 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
4080 {
4081 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
4082
4083 return t4_read_flash(adapter, start +
4084 offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0);
4085 }
4086
4087 /**
4088 * t4_get_exprom_version - return the Expansion ROM version (if any)
4089 * @adapter: the adapter
4090 * @vers: where to place the version
4091 *
4092 * Reads the Expansion ROM header from FLASH and returns the version
4093 * number (if present) through the @vers return value pointer. We return
4094 * this in the Firmware Version Format since it's convenient. Return
4095 * 0 on success, -ENOENT if no Expansion ROM is present.
4096 */
t4_get_exprom_version(struct adapter * adapter,u32 * vers)4097 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
4098 {
4099 struct exprom_header {
4100 unsigned char hdr_arr[16]; /* must start with 0x55aa */
4101 unsigned char hdr_ver[4]; /* Expansion ROM version */
4102 } *hdr;
4103 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
4104 sizeof(u32))];
4105 int ret;
4106 const int start = t4_flash_loc_start(adapter, FLASH_LOC_EXP_ROM, NULL);
4107
4108 ret = t4_read_flash(adapter, start, ARRAY_SIZE(exprom_header_buf),
4109 exprom_header_buf, 0);
4110 if (ret)
4111 return ret;
4112
4113 hdr = (struct exprom_header *)exprom_header_buf;
4114 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
4115 return -ENOENT;
4116
4117 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
4118 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
4119 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
4120 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
4121 return 0;
4122 }
4123
4124 /**
4125 * t4_get_scfg_version - return the Serial Configuration version
4126 * @adapter: the adapter
4127 * @vers: where to place the version
4128 *
4129 * Reads the Serial Configuration Version via the Firmware interface
4130 * (thus this can only be called once we're ready to issue Firmware
4131 * commands). The format of the Serial Configuration version is
4132 * adapter specific. Returns 0 on success, an error on failure.
4133 *
4134 * Note that early versions of the Firmware didn't include the ability
4135 * to retrieve the Serial Configuration version, so we zero-out the
4136 * return-value parameter in that case to avoid leaving it with
4137 * garbage in it.
4138 *
4139 * Also note that the Firmware will return its cached copy of the Serial
4140 * Initialization Revision ID, not the actual Revision ID as written in
4141 * the Serial EEPROM. This is only an issue if a new VPD has been written
4142 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
4143 * it's best to defer calling this routine till after a FW_RESET_CMD has
4144 * been issued if the Host Driver will be performing a full adapter
4145 * initialization.
4146 */
t4_get_scfg_version(struct adapter * adapter,u32 * vers)4147 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
4148 {
4149 u32 scfgrev_param;
4150 int ret;
4151
4152 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4153 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
4154 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4155 1, &scfgrev_param, vers);
4156 if (ret)
4157 *vers = 0;
4158 return ret;
4159 }
4160
4161 /**
4162 * t4_get_vpd_version - return the VPD version
4163 * @adapter: the adapter
4164 * @vers: where to place the version
4165 *
4166 * Reads the VPD via the Firmware interface (thus this can only be called
4167 * once we're ready to issue Firmware commands). The format of the
4168 * VPD version is adapter specific. Returns 0 on success, an error on
4169 * failure.
4170 *
4171 * Note that early versions of the Firmware didn't include the ability
4172 * to retrieve the VPD version, so we zero-out the return-value parameter
4173 * in that case to avoid leaving it with garbage in it.
4174 *
4175 * Also note that the Firmware will return its cached copy of the VPD
4176 * Revision ID, not the actual Revision ID as written in the Serial
4177 * EEPROM. This is only an issue if a new VPD has been written and the
4178 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
4179 * to defer calling this routine till after a FW_RESET_CMD has been issued
4180 * if the Host Driver will be performing a full adapter initialization.
4181 */
t4_get_vpd_version(struct adapter * adapter,u32 * vers)4182 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
4183 {
4184 u32 vpdrev_param;
4185 int ret;
4186
4187 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4188 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
4189 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4190 1, &vpdrev_param, vers);
4191 if (ret)
4192 *vers = 0;
4193 return ret;
4194 }
4195
4196 /**
4197 * t4_get_version_info - extract various chip/firmware version information
4198 * @adapter: the adapter
4199 *
4200 * Reads various chip/firmware version numbers and stores them into the
4201 * adapter Adapter Parameters structure. If any of the efforts fails
4202 * the first failure will be returned, but all of the version numbers
4203 * will be read.
4204 */
t4_get_version_info(struct adapter * adapter)4205 int t4_get_version_info(struct adapter *adapter)
4206 {
4207 int ret = 0;
4208
4209 #define FIRST_RET(__getvinfo) \
4210 do { \
4211 int __ret = __getvinfo; \
4212 if (__ret && !ret) \
4213 ret = __ret; \
4214 } while (0)
4215
4216 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
4217 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
4218 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
4219 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
4220 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
4221 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
4222
4223 #undef FIRST_RET
4224
4225 return ret;
4226 }
4227
4228 /**
4229 * t4_flash_erase_sectors - erase a range of flash sectors
4230 * @adapter: the adapter
4231 * @start: the first sector to erase
4232 * @end: the last sector to erase
4233 *
4234 * Erases the sectors in the given inclusive range.
4235 */
t4_flash_erase_sectors(struct adapter * adapter,int start,int end)4236 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4237 {
4238 int ret = 0;
4239
4240 if (end >= adapter->params.sf_nsec)
4241 return -EINVAL;
4242
4243 while (start <= end) {
4244 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4245 (ret = sf1_write(adapter, 4, 0, 1,
4246 SF_ERASE_SECTOR | (start << 8))) != 0 ||
4247 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4248 CH_ERR(adapter,
4249 "erase of flash sector %d failed, error %d\n",
4250 start, ret);
4251 break;
4252 }
4253 start++;
4254 }
4255 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4256 return ret;
4257 }
4258
4259 /**
4260 * t4_flash_cfg_addr - return the address of the flash configuration file
4261 * @adapter: the adapter
4262 *
4263 * Return the address within the flash where the Firmware Configuration
4264 * File is stored, or an error if the device FLASH is too small to contain
4265 * a Firmware Configuration File.
4266 */
t4_flash_cfg_addr(struct adapter * adapter,unsigned int * lenp)4267 int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp)
4268 {
4269 unsigned int len = 0;
4270 const int cfg_start = t4_flash_loc_start(adapter, FLASH_LOC_CFG, &len);
4271
4272 /*
4273 * If the device FLASH isn't large enough to hold a Firmware
4274 * Configuration File, return an error.
4275 */
4276 if (adapter->params.sf_size < cfg_start + len)
4277 return -ENOSPC;
4278 if (lenp != NULL)
4279 *lenp = len;
4280 return (cfg_start);
4281 }
4282
4283 /*
4284 * Return TRUE if the specified firmware matches the adapter. I.e. T4
4285 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
4286 * and emit an error message for mismatched firmware to save our caller the
4287 * effort ...
4288 */
t4_fw_matches_chip(struct adapter * adap,const struct fw_hdr * hdr)4289 static int t4_fw_matches_chip(struct adapter *adap,
4290 const struct fw_hdr *hdr)
4291 {
4292 /*
4293 * The expression below will return FALSE for any unsupported adapter
4294 * which will keep us "honest" in the future ...
4295 */
4296 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
4297 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
4298 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6) ||
4299 (is_t7(adap) && hdr->chip == FW_HDR_CHIP_T7))
4300 return 1;
4301
4302 CH_ERR(adap,
4303 "FW image (%d) is not suitable for this adapter (%d)\n",
4304 hdr->chip, chip_id(adap));
4305 return 0;
4306 }
4307
4308 /**
4309 * t4_load_fw - download firmware
4310 * @adap: the adapter
4311 * @fw_data: the firmware image to write
4312 * @size: image size
4313 *
4314 * Write the supplied firmware image to the card's serial flash.
4315 */
t4_load_fw(struct adapter * adap,const u8 * fw_data,unsigned int size)4316 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
4317 {
4318 u32 csum;
4319 int ret, addr;
4320 unsigned int i;
4321 u8 first_page[SF_PAGE_SIZE];
4322 const u32 *p = (const u32 *)fw_data;
4323 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4324 unsigned int fw_start_sec;
4325 unsigned int fw_start;
4326 unsigned int fw_size;
4327 enum t4_flash_loc loc;
4328
4329 loc = ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP ?
4330 FLASH_LOC_FWBOOTSTRAP : FLASH_LOC_FW;
4331 fw_start = t4_flash_loc_start(adap, loc, &fw_size);
4332 fw_start_sec = fw_start / SF_SEC_SIZE;
4333
4334 if (!size) {
4335 CH_ERR(adap, "FW image has no data\n");
4336 return -EINVAL;
4337 }
4338 if (size & 511) {
4339 CH_ERR(adap,
4340 "FW image size not multiple of 512 bytes\n");
4341 return -EINVAL;
4342 }
4343 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4344 CH_ERR(adap,
4345 "FW image size differs from size in FW header\n");
4346 return -EINVAL;
4347 }
4348 if (size > fw_size) {
4349 CH_ERR(adap, "FW image too large, max is %u bytes\n",
4350 fw_size);
4351 return -EFBIG;
4352 }
4353 if (!t4_fw_matches_chip(adap, hdr))
4354 return -EINVAL;
4355
4356 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4357 csum += be32_to_cpu(p[i]);
4358
4359 if (csum != 0xffffffff) {
4360 CH_ERR(adap,
4361 "corrupted firmware image, checksum %#x\n", csum);
4362 return -EINVAL;
4363 }
4364
4365 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
4366 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4367 if (ret)
4368 goto out;
4369
4370 /*
4371 * We write the correct version at the end so the driver can see a bad
4372 * version if the FW write fails. Start by writing a copy of the
4373 * first page with a bad version.
4374 */
4375 memcpy(first_page, fw_data, SF_PAGE_SIZE);
4376 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4377 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4378 if (ret)
4379 goto out;
4380
4381 addr = fw_start;
4382 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4383 addr += SF_PAGE_SIZE;
4384 fw_data += SF_PAGE_SIZE;
4385 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4386 if (ret)
4387 goto out;
4388 }
4389
4390 ret = t4_write_flash(adap,
4391 fw_start + offsetof(struct fw_hdr, fw_ver),
4392 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4393 out:
4394 if (ret)
4395 CH_ERR(adap, "firmware download failed, error %d\n",
4396 ret);
4397 return ret;
4398 }
4399
4400 /**
4401 * t4_fwcache - firmware cache operation
4402 * @adap: the adapter
4403 * @op : the operation (flush or flush and invalidate)
4404 */
t4_fwcache(struct adapter * adap,enum fw_params_param_dev_fwcache op)4405 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4406 {
4407 struct fw_params_cmd c;
4408
4409 memset(&c, 0, sizeof(c));
4410 c.op_to_vfn =
4411 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4412 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4413 V_FW_PARAMS_CMD_PFN(adap->pf) |
4414 V_FW_PARAMS_CMD_VFN(0));
4415 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4416 c.param[0].mnem =
4417 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4418 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4419 c.param[0].val = cpu_to_be32(op);
4420
4421 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4422 }
4423
t4_cim_read_pif_la(struct adapter * adap,u32 * pif_req,u32 * pif_rsp,unsigned int * pif_req_wrptr,unsigned int * pif_rsp_wrptr)4424 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4425 unsigned int *pif_req_wrptr,
4426 unsigned int *pif_rsp_wrptr)
4427 {
4428 int i, j;
4429 u32 cfg, val, req, rsp;
4430
4431 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4432 if (cfg & F_LADBGEN)
4433 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4434
4435 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4436 req = G_POLADBGWRPTR(val);
4437 rsp = G_PILADBGWRPTR(val);
4438 if (pif_req_wrptr)
4439 *pif_req_wrptr = req;
4440 if (pif_rsp_wrptr)
4441 *pif_rsp_wrptr = rsp;
4442
4443 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4444 for (j = 0; j < 6; j++) {
4445 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4446 V_PILADBGRDPTR(rsp));
4447 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4448 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4449 req++;
4450 rsp++;
4451 }
4452 req = (req + 2) & M_POLADBGRDPTR;
4453 rsp = (rsp + 2) & M_PILADBGRDPTR;
4454 }
4455 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4456 }
4457
t4_cim_read_ma_la(struct adapter * adap,u32 * ma_req,u32 * ma_rsp)4458 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4459 {
4460 u32 cfg;
4461 int i, j, idx;
4462
4463 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4464 if (cfg & F_LADBGEN)
4465 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4466
4467 for (i = 0; i < CIM_MALA_SIZE; i++) {
4468 for (j = 0; j < 5; j++) {
4469 idx = 8 * i + j;
4470 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4471 V_PILADBGRDPTR(idx));
4472 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4473 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4474 }
4475 }
4476 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4477 }
4478
t4_ulprx_read_la(struct adapter * adap,u32 * la_buf)4479 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4480 {
4481 unsigned int i, j;
4482
4483 for (i = 0; i < 8; i++) {
4484 u32 *p = la_buf + i;
4485
4486 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4487 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4488 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4489 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4490 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4491 }
4492 }
4493
4494 /**
4495 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
4496 * @caps16: a 16-bit Port Capabilities value
4497 *
4498 * Returns the equivalent 32-bit Port Capabilities value.
4499 */
fwcaps16_to_caps32(uint16_t caps16)4500 static uint32_t fwcaps16_to_caps32(uint16_t caps16)
4501 {
4502 uint32_t caps32 = 0;
4503
4504 #define CAP16_TO_CAP32(__cap) \
4505 do { \
4506 if (caps16 & FW_PORT_CAP_##__cap) \
4507 caps32 |= FW_PORT_CAP32_##__cap; \
4508 } while (0)
4509
4510 CAP16_TO_CAP32(SPEED_100M);
4511 CAP16_TO_CAP32(SPEED_1G);
4512 CAP16_TO_CAP32(SPEED_25G);
4513 CAP16_TO_CAP32(SPEED_10G);
4514 CAP16_TO_CAP32(SPEED_40G);
4515 CAP16_TO_CAP32(SPEED_100G);
4516 CAP16_TO_CAP32(FC_RX);
4517 CAP16_TO_CAP32(FC_TX);
4518 CAP16_TO_CAP32(ANEG);
4519 CAP16_TO_CAP32(FORCE_PAUSE);
4520 CAP16_TO_CAP32(MDIAUTO);
4521 CAP16_TO_CAP32(MDISTRAIGHT);
4522 CAP16_TO_CAP32(FEC_RS);
4523 CAP16_TO_CAP32(FEC_BASER_RS);
4524 CAP16_TO_CAP32(802_3_PAUSE);
4525 CAP16_TO_CAP32(802_3_ASM_DIR);
4526
4527 #undef CAP16_TO_CAP32
4528
4529 return caps32;
4530 }
4531
4532 /**
4533 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
4534 * @caps32: a 32-bit Port Capabilities value
4535 *
4536 * Returns the equivalent 16-bit Port Capabilities value. Note that
4537 * not all 32-bit Port Capabilities can be represented in the 16-bit
4538 * Port Capabilities and some fields/values may not make it.
4539 */
fwcaps32_to_caps16(uint32_t caps32)4540 static uint16_t fwcaps32_to_caps16(uint32_t caps32)
4541 {
4542 uint16_t caps16 = 0;
4543
4544 #define CAP32_TO_CAP16(__cap) \
4545 do { \
4546 if (caps32 & FW_PORT_CAP32_##__cap) \
4547 caps16 |= FW_PORT_CAP_##__cap; \
4548 } while (0)
4549
4550 CAP32_TO_CAP16(SPEED_100M);
4551 CAP32_TO_CAP16(SPEED_1G);
4552 CAP32_TO_CAP16(SPEED_10G);
4553 CAP32_TO_CAP16(SPEED_25G);
4554 CAP32_TO_CAP16(SPEED_40G);
4555 CAP32_TO_CAP16(SPEED_100G);
4556 CAP32_TO_CAP16(FC_RX);
4557 CAP32_TO_CAP16(FC_TX);
4558 CAP32_TO_CAP16(802_3_PAUSE);
4559 CAP32_TO_CAP16(802_3_ASM_DIR);
4560 CAP32_TO_CAP16(ANEG);
4561 CAP32_TO_CAP16(FORCE_PAUSE);
4562 CAP32_TO_CAP16(MDIAUTO);
4563 CAP32_TO_CAP16(MDISTRAIGHT);
4564 CAP32_TO_CAP16(FEC_RS);
4565 CAP32_TO_CAP16(FEC_BASER_RS);
4566
4567 #undef CAP32_TO_CAP16
4568
4569 return caps16;
4570 }
4571
fwcap_to_fec(uint32_t caps,bool unset_means_none)4572 static int8_t fwcap_to_fec(uint32_t caps, bool unset_means_none)
4573 {
4574 int8_t fec = 0;
4575
4576 if ((caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) == 0)
4577 return (unset_means_none ? FEC_NONE : 0);
4578
4579 if (caps & FW_PORT_CAP32_FEC_RS)
4580 fec |= FEC_RS;
4581 if (caps & FW_PORT_CAP32_FEC_BASER_RS)
4582 fec |= FEC_BASER_RS;
4583 if (caps & FW_PORT_CAP32_FEC_NO_FEC)
4584 fec |= FEC_NONE;
4585
4586 return (fec);
4587 }
4588
4589 /*
4590 * Note that 0 is not translated to NO_FEC.
4591 */
fec_to_fwcap(int8_t fec)4592 static uint32_t fec_to_fwcap(int8_t fec)
4593 {
4594 uint32_t caps = 0;
4595
4596 /* Only real FECs allowed. */
4597 MPASS((fec & ~M_FW_PORT_CAP32_FEC) == 0);
4598
4599 if (fec & FEC_RS)
4600 caps |= FW_PORT_CAP32_FEC_RS;
4601 if (fec & FEC_BASER_RS)
4602 caps |= FW_PORT_CAP32_FEC_BASER_RS;
4603 if (fec & FEC_NONE)
4604 caps |= FW_PORT_CAP32_FEC_NO_FEC;
4605
4606 return (caps);
4607 }
4608
4609 /**
4610 * t4_link_l1cfg - apply link configuration to MAC/PHY
4611 * @phy: the PHY to setup
4612 * @mac: the MAC to setup
4613 * @lc: the requested link configuration
4614 *
4615 * Set up a port's MAC and PHY according to a desired link configuration.
4616 * - If the PHY can auto-negotiate first decide what to advertise, then
4617 * enable/disable auto-negotiation as desired, and reset.
4618 * - If the PHY does not auto-negotiate just reset it.
4619 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4620 * otherwise do it later based on the outcome of auto-negotiation.
4621 */
t4_link_l1cfg(struct adapter * adap,unsigned int mbox,unsigned int port,struct link_config * lc)4622 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
4623 struct link_config *lc)
4624 {
4625 struct fw_port_cmd c;
4626 unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4627 unsigned int aneg, fc, fec, speed, rcap;
4628
4629 fc = 0;
4630 if (lc->requested_fc & PAUSE_RX)
4631 fc |= FW_PORT_CAP32_FC_RX;
4632 if (lc->requested_fc & PAUSE_TX)
4633 fc |= FW_PORT_CAP32_FC_TX;
4634 if (!(lc->requested_fc & PAUSE_AUTONEG))
4635 fc |= FW_PORT_CAP32_FORCE_PAUSE;
4636
4637 if (lc->requested_aneg == AUTONEG_DISABLE)
4638 aneg = 0;
4639 else if (lc->requested_aneg == AUTONEG_ENABLE)
4640 aneg = FW_PORT_CAP32_ANEG;
4641 else
4642 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
4643
4644 if (aneg) {
4645 speed = lc->pcaps &
4646 V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4647 } else if (lc->requested_speed != 0)
4648 speed = speed_to_fwcap(lc->requested_speed);
4649 else
4650 speed = fwcap_top_speed(lc->pcaps);
4651
4652 fec = 0;
4653 if (fec_supported(speed)) {
4654 int force_fec;
4655
4656 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
4657 force_fec = lc->force_fec;
4658 else
4659 force_fec = 0;
4660
4661 if (lc->requested_fec == FEC_AUTO) {
4662 if (force_fec > 0) {
4663 /*
4664 * Must use FORCE_FEC even though requested FEC
4665 * is AUTO. Set all the FEC bits valid for the
4666 * speed and let the firmware pick one.
4667 */
4668 fec |= FW_PORT_CAP32_FORCE_FEC;
4669 if (speed & FW_PORT_CAP32_SPEED_25G) {
4670 fec |= FW_PORT_CAP32_FEC_RS;
4671 fec |= FW_PORT_CAP32_FEC_BASER_RS;
4672 fec |= FW_PORT_CAP32_FEC_NO_FEC;
4673 } else {
4674 fec |= FW_PORT_CAP32_FEC_RS;
4675 fec |= FW_PORT_CAP32_FEC_NO_FEC;
4676 }
4677 } else {
4678 /*
4679 * Set only 1b. Old firmwares can't deal with
4680 * multiple bits and new firmwares are free to
4681 * ignore this and try whatever FECs they want
4682 * because we aren't setting FORCE_FEC here.
4683 */
4684 fec |= fec_to_fwcap(lc->fec_hint);
4685 MPASS(powerof2(fec));
4686
4687 /*
4688 * Override the hint if the FEC is not valid for
4689 * the potential top speed. Request the best
4690 * FEC at that speed instead.
4691 */
4692 if ((speed & FW_PORT_CAP32_SPEED_25G) == 0 &&
4693 fec == FW_PORT_CAP32_FEC_BASER_RS) {
4694 fec = FW_PORT_CAP32_FEC_RS;
4695 }
4696 }
4697 } else {
4698 /*
4699 * User has explicitly requested some FEC(s). Set
4700 * FORCE_FEC unless prohibited from using it.
4701 */
4702 if (force_fec != 0)
4703 fec |= FW_PORT_CAP32_FORCE_FEC;
4704 fec |= fec_to_fwcap(lc->requested_fec &
4705 M_FW_PORT_CAP32_FEC);
4706 if (lc->requested_fec & FEC_MODULE)
4707 fec |= fec_to_fwcap(lc->fec_hint);
4708 }
4709
4710 /*
4711 * This is for compatibility with old firmwares. The original
4712 * way to request NO_FEC was to not set any of the FEC bits. New
4713 * firmwares understand this too.
4714 */
4715 if (fec == FW_PORT_CAP32_FEC_NO_FEC)
4716 fec = 0;
4717 }
4718
4719 /* Force AN on for BT cards. */
4720 if (isset(&adap->bt_map, port))
4721 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
4722
4723 rcap = aneg | speed | fc | fec;
4724 if ((rcap | lc->pcaps) != lc->pcaps) {
4725 #ifdef INVARIANTS
4726 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x, removed 0x%x\n", rcap,
4727 lc->pcaps, rcap & (rcap ^ lc->pcaps));
4728 #endif
4729 rcap &= lc->pcaps;
4730 }
4731 rcap |= mdi;
4732
4733 memset(&c, 0, sizeof(c));
4734 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4735 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4736 V_FW_PORT_CMD_PORTID(port));
4737 if (adap->params.port_caps32) {
4738 c.action_to_len16 =
4739 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
4740 FW_LEN16(c));
4741 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4742 } else {
4743 c.action_to_len16 =
4744 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4745 FW_LEN16(c));
4746 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4747 }
4748
4749 lc->requested_caps = rcap;
4750 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4751 }
4752
4753 /**
4754 * t4_restart_aneg - restart autonegotiation
4755 * @adap: the adapter
4756 * @mbox: mbox to use for the FW command
4757 * @port: the port id
4758 *
4759 * Restarts autonegotiation for the selected port.
4760 */
t4_restart_aneg(struct adapter * adap,unsigned int mbox,unsigned int port)4761 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4762 {
4763 struct fw_port_cmd c;
4764
4765 memset(&c, 0, sizeof(c));
4766 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4767 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4768 V_FW_PORT_CMD_PORTID(port));
4769 c.action_to_len16 =
4770 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4771 FW_LEN16(c));
4772 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4773 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4774 }
4775
4776 struct intr_details {
4777 u32 mask;
4778 const char *msg;
4779 };
4780
4781 struct intr_action {
4782 u32 mask;
4783 int arg;
4784 bool (*action)(struct adapter *, int, int);
4785 };
4786
4787 struct intr_info {
4788 const char *name; /* name of the INT_CAUSE register */
4789 int cause_reg; /* INT_CAUSE register */
4790 int enable_reg; /* INT_ENABLE register */
4791 u32 fatal; /* bits that are fatal */
4792 int flags; /* hints */
4793 const struct intr_details *details;
4794 const struct intr_action *actions;
4795 };
4796
4797 static inline char
intr_alert_char(u32 cause,u32 enable,u32 fatal)4798 intr_alert_char(u32 cause, u32 enable, u32 fatal)
4799 {
4800
4801 if (cause & fatal)
4802 return ('!');
4803 if (cause & enable)
4804 return ('*');
4805 return ('-');
4806 }
4807
4808 static void
show_intr_info(struct adapter * sc,const struct intr_info * ii,uint32_t cause,uint32_t ucause,uint32_t enabled,uint32_t fatal,int flags)4809 show_intr_info(struct adapter *sc, const struct intr_info *ii, uint32_t cause,
4810 uint32_t ucause, uint32_t enabled, uint32_t fatal, int flags)
4811 {
4812 uint32_t leftover, msgbits;
4813 const struct intr_details *details;
4814 char alert;
4815 const bool verbose = flags & IHF_VERBOSE;
4816
4817 if (verbose || ucause != 0 || flags & IHF_RUN_ALL_ACTIONS) {
4818 alert = intr_alert_char(cause, enabled, fatal);
4819 CH_ALERT(sc, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", alert,
4820 ii->name, ii->cause_reg, cause, enabled, fatal);
4821 }
4822
4823 leftover = verbose ? cause : ucause;
4824 for (details = ii->details; details && details->mask != 0; details++) {
4825 msgbits = details->mask & leftover;
4826 if (msgbits == 0)
4827 continue;
4828 alert = intr_alert_char(msgbits, enabled, fatal);
4829 CH_ALERT(sc, " %c [0x%08x] %s\n", alert, msgbits, details->msg);
4830 leftover &= ~msgbits;
4831 }
4832 if (leftover != 0 && leftover != (verbose ? cause : ucause))
4833 CH_ALERT(sc, " ? [0x%08x]\n", leftover);
4834 }
4835
4836 /*
4837 * Returns true for fatal error.
4838 */
4839 static bool
t4_handle_intr(struct adapter * sc,const struct intr_info * ii,uint32_t acause,int flags)4840 t4_handle_intr(struct adapter *sc, const struct intr_info *ii, uint32_t acause,
4841 int flags)
4842 {
4843 uint32_t cause, ucause, enabled, fatal;
4844 bool rc;
4845 const struct intr_action *action;
4846
4847 cause = t4_read_reg(sc, ii->cause_reg);
4848 enabled = t4_read_reg(sc, ii->enable_reg);
4849 flags |= ii->flags;
4850 fatal = ii->fatal & cause;
4851 if (flags & IHF_FATAL_IFF_ENABLED)
4852 fatal &= enabled;
4853 ucause = cause;
4854 if (flags & IHF_IGNORE_IF_DISABLED)
4855 ucause &= enabled;
4856 if (!(flags & IHF_NO_SHOW))
4857 show_intr_info(sc, ii, cause, ucause, enabled, fatal, flags);
4858
4859 rc = fatal != 0;
4860 for (action = ii->actions; action && action->mask != 0; action++) {
4861 if (action->action == NULL)
4862 continue;
4863 if (action->mask & (ucause | acause) ||
4864 flags & IHF_RUN_ALL_ACTIONS) {
4865 bool rc1 = (action->action)(sc, action->arg, flags);
4866 if (action->mask & ucause)
4867 rc |= rc1;
4868 }
4869 }
4870
4871 /* clear */
4872 if (cause != 0) {
4873 if (flags & IHF_CLR_ALL_SET) {
4874 t4_write_reg(sc, ii->cause_reg, cause);
4875 (void)t4_read_reg(sc, ii->cause_reg);
4876 } else if (ucause != 0 && flags & IHF_CLR_ALL_UNIGNORED) {
4877 t4_write_reg(sc, ii->cause_reg, ucause);
4878 (void)t4_read_reg(sc, ii->cause_reg);
4879 }
4880 }
4881
4882 return (rc);
4883 }
4884
4885 /*
4886 * Interrupt handler for the PCIE module.
4887 */
pcie_intr_handler(struct adapter * adap,int arg,int flags)4888 static bool pcie_intr_handler(struct adapter *adap, int arg, int flags)
4889 {
4890 static const struct intr_details sysbus_intr_details[] = {
4891 { F_RNPP, "RXNP array parity error" },
4892 { F_RPCP, "RXPC array parity error" },
4893 { F_RCIP, "RXCIF array parity error" },
4894 { F_RCCP, "Rx completions control array parity error" },
4895 { F_RFTP, "RXFT array parity error" },
4896 { 0 }
4897 };
4898 static const struct intr_info sysbus_intr_info = {
4899 .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS",
4900 .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4901 .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE,
4902 .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP,
4903 .flags = 0,
4904 .details = sysbus_intr_details,
4905 .actions = NULL,
4906 };
4907 static const struct intr_details pcie_port_intr_details[] = {
4908 { F_TPCP, "TXPC array parity error" },
4909 { F_TNPP, "TXNP array parity error" },
4910 { F_TFTP, "TXFT array parity error" },
4911 { F_TCAP, "TXCA array parity error" },
4912 { F_TCIP, "TXCIF array parity error" },
4913 { F_RCAP, "RXCA array parity error" },
4914 { F_OTDD, "outbound request TLP discarded" },
4915 { F_RDPE, "Rx data parity error" },
4916 { F_TDUE, "Tx uncorrectable data error" },
4917 { 0 }
4918 };
4919 static const struct intr_info pcie_port_intr_info = {
4920 .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS",
4921 .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4922 .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE,
4923 .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP |
4924 F_OTDD | F_RDPE | F_TDUE,
4925 .flags = 0,
4926 .details = pcie_port_intr_details,
4927 .actions = NULL,
4928 };
4929 static const struct intr_details pcie_intr_details[] = {
4930 { F_MSIADDRLPERR, "MSI AddrL parity error" },
4931 { F_MSIADDRHPERR, "MSI AddrH parity error" },
4932 { F_MSIDATAPERR, "MSI data parity error" },
4933 { F_MSIXADDRLPERR, "MSI-X AddrL parity error" },
4934 { F_MSIXADDRHPERR, "MSI-X AddrH parity error" },
4935 { F_MSIXDATAPERR, "MSI-X data parity error" },
4936 { F_MSIXDIPERR, "MSI-X DI parity error" },
4937 { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" },
4938 { F_PIOREQPERR, "PCIe PIO request FIFO parity error" },
4939 { F_TARTAGPERR, "PCIe target tag FIFO parity error" },
4940 { F_CCNTPERR, "PCIe CMD channel count parity error" },
4941 { F_CREQPERR, "PCIe CMD channel request parity error" },
4942 { F_CRSPPERR, "PCIe CMD channel response parity error" },
4943 { F_DCNTPERR, "PCIe DMA channel count parity error" },
4944 { F_DREQPERR, "PCIe DMA channel request parity error" },
4945 { F_DRSPPERR, "PCIe DMA channel response parity error" },
4946 { F_HCNTPERR, "PCIe HMA channel count parity error" },
4947 { F_HREQPERR, "PCIe HMA channel request parity error" },
4948 { F_HRSPPERR, "PCIe HMA channel response parity error" },
4949 { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" },
4950 { F_FIDPERR, "PCIe FID parity error" },
4951 { F_INTXCLRPERR, "PCIe INTx clear parity error" },
4952 { F_MATAGPERR, "PCIe MA tag parity error" },
4953 { F_PIOTAGPERR, "PCIe PIO tag parity error" },
4954 { F_RXCPLPERR, "PCIe Rx completion parity error" },
4955 { F_RXWRPERR, "PCIe Rx write parity error" },
4956 { F_RPLPERR, "PCIe replay buffer parity error" },
4957 { F_PCIESINT, "PCIe core secondary fault" },
4958 { F_PCIEPINT, "PCIe core primary fault" },
4959 { F_UNXSPLCPLERR, "PCIe unexpected split completion error" },
4960 { 0 }
4961 };
4962 static const struct intr_details t5_pcie_intr_details[] = {
4963 { F_IPGRPPERR, "Parity errors observed by IP" },
4964 { F_NONFATALERR, "PCIe non-fatal error" },
4965 { F_READRSPERR, "Outbound read error" },
4966 { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" },
4967 { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" },
4968 { F_IPRETRYPERR, "PCIe IP replay buffer parity error" },
4969 { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" },
4970 { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" },
4971 { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" },
4972 { F_MAGRPPERR, "MA group FIFO parity error" },
4973 { F_VFIDPERR, "VFID SRAM parity error" },
4974 { F_FIDPERR, "FID SRAM parity error" },
4975 { F_CFGSNPPERR, "config snoop FIFO parity error" },
4976 { F_HRSPPERR, "HMA channel response data SRAM parity error" },
4977 { F_HREQRDPERR, "HMA channel read request SRAM parity error" },
4978 { F_HREQWRPERR, "HMA channel write request SRAM parity error" },
4979 { F_DRSPPERR, "DMA channel response data SRAM parity error" },
4980 { F_DREQRDPERR, "DMA channel write request SRAM parity error" },
4981 { F_CRSPPERR, "CMD channel response data SRAM parity error" },
4982 { F_CREQRDPERR, "CMD channel read request SRAM parity error" },
4983 { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" },
4984 { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" },
4985 { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" },
4986 { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" },
4987 { F_MSIXDIPERR, "MSI-X DI SRAM parity error" },
4988 { F_MSIXDATAPERR, "MSI-X data SRAM parity error" },
4989 { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" },
4990 { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" },
4991 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" },
4992 { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" },
4993 { F_MSTGRPPERR, "Master response read queue SRAM parity error" },
4994 { 0 }
4995 };
4996 struct intr_info pcie_intr_info = {
4997 .name = "PCIE_INT_CAUSE",
4998 .cause_reg = A_PCIE_INT_CAUSE,
4999 .enable_reg = A_PCIE_INT_ENABLE,
5000 .fatal = 0xffffffff,
5001 .flags = IHF_FATAL_IFF_ENABLED,
5002 .details = NULL,
5003 .actions = NULL,
5004 };
5005 struct intr_info pcie_int_cause_ext = {
5006 .name = "PCIE_INT_CAUSE_EXT",
5007 .cause_reg = A_PCIE_INT_CAUSE_EXT,
5008 .enable_reg = A_PCIE_INT_ENABLE_EXT,
5009 .fatal = 0,
5010 .flags = 0,
5011 .details = NULL,
5012 .actions = NULL,
5013 };
5014 struct intr_info pcie_int_cause_x8 = {
5015 .name = "PCIE_INT_CAUSE_X8",
5016 .cause_reg = A_PCIE_INT_CAUSE_X8,
5017 .enable_reg = A_PCIE_INT_ENABLE_X8,
5018 .fatal = 0,
5019 .flags = 0,
5020 .details = NULL,
5021 .actions = NULL,
5022 };
5023 bool fatal = false;
5024
5025 if (is_t4(adap)) {
5026 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, flags);
5027 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, flags);
5028
5029 pcie_intr_info.details = pcie_intr_details;
5030 } else {
5031 pcie_intr_info.details = t5_pcie_intr_details;
5032 }
5033 fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, flags);
5034 if (chip_id(adap) > CHELSIO_T6) {
5035 fatal |= t4_handle_intr(adap, &pcie_int_cause_ext, 0, flags);
5036 fatal |= t4_handle_intr(adap, &pcie_int_cause_x8, 0, flags);
5037 }
5038
5039 return (fatal);
5040 }
5041
5042 /*
5043 * TP interrupt handler.
5044 */
tp_intr_handler(struct adapter * adap,int arg,int flags)5045 static bool tp_intr_handler(struct adapter *adap, int arg, int flags)
5046 {
5047 static const struct intr_details tp_intr_details[] = {
5048 { 0x3fffffff, "TP parity error" },
5049 { F_FLMTXFLSTEMPTY, "TP out of Tx pages" },
5050 { 0 }
5051 };
5052 static const struct intr_info tp_intr_info = {
5053 .name = "TP_INT_CAUSE",
5054 .cause_reg = A_TP_INT_CAUSE,
5055 .enable_reg = A_TP_INT_ENABLE,
5056 .fatal = 0x7fffffff,
5057 .flags = IHF_FATAL_IFF_ENABLED,
5058 .details = tp_intr_details,
5059 .actions = NULL,
5060 };
5061 static const struct intr_info tp_inic_perr_cause = {
5062 .name = "TP_INIC_PERR_CAUSE",
5063 .cause_reg = A_TP_INIC_PERR_CAUSE,
5064 .enable_reg = A_TP_INIC_PERR_ENABLE,
5065 .fatal = 0xffffffff,
5066 .flags = IHF_FATAL_IFF_ENABLED,
5067 .details = NULL,
5068 .actions = NULL,
5069 };
5070 static const struct intr_info tp_c_perr_cause = {
5071 .name = "TP_C_PERR_CAUSE",
5072 .cause_reg = A_TP_C_PERR_CAUSE,
5073 .enable_reg = A_TP_C_PERR_ENABLE,
5074 .fatal = 0xffffffff,
5075 .flags = IHF_FATAL_IFF_ENABLED,
5076 .details = NULL,
5077 .actions = NULL,
5078 };
5079 static const struct intr_info tp_e_eg_perr_cause = {
5080 .name = "TP_E_EG_PERR_CAUSE",
5081 .cause_reg = A_TP_E_EG_PERR_CAUSE,
5082 .enable_reg = A_TP_E_EG_PERR_ENABLE,
5083 .fatal = 0xffffffff,
5084 .flags = IHF_FATAL_IFF_ENABLED,
5085 .details = NULL,
5086 .actions = NULL,
5087 };
5088 static const struct intr_info tp_e_in0_perr_cause = {
5089 .name = "TP_E_IN0_PERR_CAUSE",
5090 .cause_reg = A_TP_E_IN0_PERR_CAUSE,
5091 .enable_reg = A_TP_E_IN0_PERR_ENABLE,
5092 .fatal = 0xffffffff,
5093 .flags = IHF_FATAL_IFF_ENABLED,
5094 .details = NULL,
5095 .actions = NULL,
5096 };
5097 static const struct intr_info tp_e_in1_perr_cause = {
5098 .name = "TP_E_IN1_PERR_CAUSE",
5099 .cause_reg = A_TP_E_IN1_PERR_CAUSE,
5100 .enable_reg = A_TP_E_IN1_PERR_ENABLE,
5101 .fatal = 0xffffffff,
5102 .flags = IHF_FATAL_IFF_ENABLED,
5103 .details = NULL,
5104 .actions = NULL,
5105 };
5106 static const struct intr_info tp_o_perr_cause = {
5107 .name = "TP_O_PERR_CAUSE",
5108 .cause_reg = A_TP_O_PERR_CAUSE,
5109 .enable_reg = A_TP_O_PERR_ENABLE,
5110 .fatal = 0xffffffff,
5111 .flags = IHF_FATAL_IFF_ENABLED,
5112 .details = NULL,
5113 .actions = NULL,
5114 };
5115 bool fatal;
5116
5117 fatal = t4_handle_intr(adap, &tp_intr_info, 0, flags);
5118 if (chip_id(adap) > CHELSIO_T6) {
5119 fatal |= t4_handle_intr(adap, &tp_inic_perr_cause, 0, flags);
5120 fatal |= t4_handle_intr(adap, &tp_c_perr_cause, 0, flags);
5121 fatal |= t4_handle_intr(adap, &tp_e_eg_perr_cause, 0, flags);
5122 fatal |= t4_handle_intr(adap, &tp_e_in0_perr_cause, 0, flags);
5123 fatal |= t4_handle_intr(adap, &tp_e_in1_perr_cause, 0, flags);
5124 fatal |= t4_handle_intr(adap, &tp_o_perr_cause, 0, flags);
5125 }
5126
5127 return (fatal);
5128 }
5129
5130 /*
5131 * SGE interrupt handler.
5132 */
sge_intr_handler(struct adapter * adap,int arg,int flags)5133 static bool sge_intr_handler(struct adapter *adap, int arg, int flags)
5134 {
5135 static const struct intr_info sge_int1_info = {
5136 .name = "SGE_INT_CAUSE1",
5137 .cause_reg = A_SGE_INT_CAUSE1,
5138 .enable_reg = A_SGE_INT_ENABLE1,
5139 .fatal = 0xffffffff,
5140 .flags = IHF_FATAL_IFF_ENABLED,
5141 .details = NULL,
5142 .actions = NULL,
5143 };
5144 static const struct intr_info sge_int2_info = {
5145 .name = "SGE_INT_CAUSE2",
5146 .cause_reg = A_SGE_INT_CAUSE2,
5147 .enable_reg = A_SGE_INT_ENABLE2,
5148 .fatal = 0xffffffff,
5149 .flags = IHF_FATAL_IFF_ENABLED,
5150 .details = NULL,
5151 .actions = NULL,
5152 };
5153 static const struct intr_details sge_int3_details[] = {
5154 { F_ERR_FLM_DBP,
5155 "DBP pointer delivery for invalid context or QID" },
5156 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
5157 "Invalid QID or header request by IDMA" },
5158 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
5159 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
5160 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
5161 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
5162 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
5163 { F_ERR_TIMER_ABOVE_MAX_QID,
5164 "SGE GTS with timer 0-5 for IQID > 1023" },
5165 { F_ERR_CPL_EXCEED_IQE_SIZE,
5166 "SGE received CPL exceeding IQE size" },
5167 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
5168 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
5169 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
5170 { F_ERR_DROPPED_DB, "SGE DB dropped" },
5171 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
5172 "SGE IQID > 1023 received CPL for FL" },
5173 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5174 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
5175 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
5176 { F_ERR_ING_CTXT_PRIO,
5177 "Ingress context manager priority user error" },
5178 { F_ERR_EGR_CTXT_PRIO,
5179 "Egress context manager priority user error" },
5180 { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" },
5181 { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" },
5182 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
5183 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
5184 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
5185 { 0x0000000f, "SGE context access for invalid queue" },
5186 { 0 }
5187 };
5188 static const struct intr_details t6_sge_int3_details[] = {
5189 { F_ERR_FLM_DBP,
5190 "DBP pointer delivery for invalid context or QID" },
5191 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
5192 "Invalid QID or header request by IDMA" },
5193 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
5194 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
5195 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
5196 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
5197 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
5198 { F_ERR_TIMER_ABOVE_MAX_QID,
5199 "SGE GTS with timer 0-5 for IQID > 1023" },
5200 { F_ERR_CPL_EXCEED_IQE_SIZE,
5201 "SGE received CPL exceeding IQE size" },
5202 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
5203 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
5204 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
5205 { F_ERR_DROPPED_DB, "SGE DB dropped" },
5206 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
5207 "SGE IQID > 1023 received CPL for FL" },
5208 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5209 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
5210 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
5211 { F_ERR_ING_CTXT_PRIO,
5212 "Ingress context manager priority user error" },
5213 { F_ERR_EGR_CTXT_PRIO,
5214 "Egress context manager priority user error" },
5215 { F_DBP_TBUF_FULL, "SGE DBP tbuf full" },
5216 { F_FATAL_WRE_LEN,
5217 "SGE WRE packet less than advertized length" },
5218 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
5219 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
5220 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
5221 { 0x0000000f, "SGE context access for invalid queue" },
5222 { 0 }
5223 };
5224 struct intr_info sge_int3_info = {
5225 .name = "SGE_INT_CAUSE3",
5226 .cause_reg = A_SGE_INT_CAUSE3,
5227 .enable_reg = A_SGE_INT_ENABLE3,
5228 .fatal = F_ERR_CPL_EXCEED_IQE_SIZE,
5229 .flags = 0,
5230 .details = NULL,
5231 .actions = NULL,
5232 };
5233 static const struct intr_info sge_int4_info = {
5234 .name = "SGE_INT_CAUSE4",
5235 .cause_reg = A_SGE_INT_CAUSE4,
5236 .enable_reg = A_SGE_INT_ENABLE4,
5237 .fatal = 0,
5238 .flags = 0,
5239 .details = NULL,
5240 .actions = NULL,
5241 };
5242 static const struct intr_info sge_int5_info = {
5243 .name = "SGE_INT_CAUSE5",
5244 .cause_reg = A_SGE_INT_CAUSE5,
5245 .enable_reg = A_SGE_INT_ENABLE5,
5246 .fatal = 0xffffffff,
5247 .flags = IHF_FATAL_IFF_ENABLED,
5248 .details = NULL,
5249 .actions = NULL,
5250 };
5251 static const struct intr_info sge_int6_info = {
5252 .name = "SGE_INT_CAUSE6",
5253 .cause_reg = A_SGE_INT_CAUSE6,
5254 .enable_reg = A_SGE_INT_ENABLE6,
5255 .fatal = 0,
5256 .flags = 0,
5257 .details = NULL,
5258 .actions = NULL,
5259 };
5260 static const struct intr_info sge_int7_info = {
5261 .name = "SGE_INT_CAUSE7",
5262 .cause_reg = A_SGE_INT_CAUSE7,
5263 .enable_reg = A_SGE_INT_ENABLE7,
5264 .fatal = 0,
5265 .flags = 0,
5266 .details = NULL,
5267 .actions = NULL,
5268 };
5269 static const struct intr_info sge_int8_info = {
5270 .name = "SGE_INT_CAUSE8",
5271 .cause_reg = A_SGE_INT_CAUSE8,
5272 .enable_reg = A_SGE_INT_ENABLE8,
5273 .fatal = 0,
5274 .flags = 0,
5275 .details = NULL,
5276 .actions = NULL,
5277 };
5278 bool fatal;
5279 u32 v;
5280
5281 if (chip_id(adap) <= CHELSIO_T5) {
5282 sge_int3_info.details = sge_int3_details;
5283 } else {
5284 sge_int3_info.details = t6_sge_int3_details;
5285 }
5286
5287 fatal = false;
5288 fatal |= t4_handle_intr(adap, &sge_int1_info, 0, flags);
5289 fatal |= t4_handle_intr(adap, &sge_int2_info, 0, flags);
5290 fatal |= t4_handle_intr(adap, &sge_int3_info, 0, flags);
5291 fatal |= t4_handle_intr(adap, &sge_int4_info, 0, flags);
5292 if (chip_id(adap) >= CHELSIO_T5)
5293 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, flags);
5294 if (chip_id(adap) >= CHELSIO_T6)
5295 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, flags);
5296 if (chip_id(adap) >= CHELSIO_T7) {
5297 fatal |= t4_handle_intr(adap, &sge_int7_info, 0, flags);
5298 fatal |= t4_handle_intr(adap, &sge_int8_info, 0, flags);
5299 }
5300
5301 v = t4_read_reg(adap, A_SGE_ERROR_STATS);
5302 if (v & F_ERROR_QID_VALID) {
5303 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v));
5304 if (v & F_UNCAPTURED_ERROR)
5305 CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n");
5306 t4_write_reg(adap, A_SGE_ERROR_STATS,
5307 F_ERROR_QID_VALID | F_UNCAPTURED_ERROR);
5308 }
5309
5310 return (fatal);
5311 }
5312
5313 /*
5314 * CIM interrupt handler.
5315 */
cim_intr_handler(struct adapter * adap,int arg,int flags)5316 static bool cim_intr_handler(struct adapter *adap, int arg, int flags)
5317 {
5318 static const struct intr_details cim_host_intr_details[] = {
5319 /* T6+ */
5320 { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" },
5321
5322 /* T5+ */
5323 { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" },
5324 { F_PLCIM_MSTRSPDATAPARERR,
5325 "PL2CIM master response data parity error" },
5326 { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" },
5327 { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" },
5328 { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" },
5329 { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" },
5330 { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" },
5331 { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" },
5332
5333 /* T4+ */
5334 { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" },
5335 { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" },
5336 { F_MBHOSTPARERR, "CIM mailbox host read parity error" },
5337 { F_MBUPPARERR, "CIM mailbox uP parity error" },
5338 { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" },
5339 { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" },
5340 { F_IBQULPPARERR, "CIM IBQ ULP parity error" },
5341 { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" },
5342 { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */
5343 "CIM IBQ PCIe/SGE_HI parity error" },
5344 { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" },
5345 { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" },
5346 { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" },
5347 { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" },
5348 { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" },
5349 { F_OBQSGEPARERR, "CIM OBQ SGE parity error" },
5350 { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" },
5351 { F_TIMER1INT, "CIM TIMER0 interrupt" },
5352 { F_TIMER0INT, "CIM TIMER0 interrupt" },
5353 { F_PREFDROPINT, "CIM control register prefetch drop" },
5354 { 0}
5355 };
5356 static const struct intr_info cim_host_intr_info = {
5357 .name = "CIM_HOST_INT_CAUSE",
5358 .cause_reg = A_CIM_HOST_INT_CAUSE,
5359 .enable_reg = A_CIM_HOST_INT_ENABLE,
5360 .fatal = 0x007fffe6,
5361 .flags = IHF_FATAL_IFF_ENABLED,
5362 .details = cim_host_intr_details,
5363 .actions = NULL,
5364 };
5365 static const struct intr_details cim_host_upacc_intr_details[] = {
5366 { F_EEPROMWRINT, "CIM EEPROM came out of busy state" },
5367 { F_TIMEOUTMAINT, "CIM PIF MA timeout" },
5368 { F_TIMEOUTINT, "CIM PIF timeout" },
5369 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" },
5370 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" },
5371 { F_BLKWRPLINT, "CIM block write to PL space" },
5372 { F_BLKRDPLINT, "CIM block read from PL space" },
5373 { F_SGLWRPLINT,
5374 "CIM single write to PL space with illegal BEs" },
5375 { F_SGLRDPLINT,
5376 "CIM single read from PL space with illegal BEs" },
5377 { F_BLKWRCTLINT, "CIM block write to CTL space" },
5378 { F_BLKRDCTLINT, "CIM block read from CTL space" },
5379 { F_SGLWRCTLINT,
5380 "CIM single write to CTL space with illegal BEs" },
5381 { F_SGLRDCTLINT,
5382 "CIM single read from CTL space with illegal BEs" },
5383 { F_BLKWREEPROMINT, "CIM block write to EEPROM space" },
5384 { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" },
5385 { F_SGLWREEPROMINT,
5386 "CIM single write to EEPROM space with illegal BEs" },
5387 { F_SGLRDEEPROMINT,
5388 "CIM single read from EEPROM space with illegal BEs" },
5389 { F_BLKWRFLASHINT, "CIM block write to flash space" },
5390 { F_BLKRDFLASHINT, "CIM block read from flash space" },
5391 { F_SGLWRFLASHINT, "CIM single write to flash space" },
5392 { F_SGLRDFLASHINT,
5393 "CIM single read from flash space with illegal BEs" },
5394 { F_BLKWRBOOTINT, "CIM block write to boot space" },
5395 { F_BLKRDBOOTINT, "CIM block read from boot space" },
5396 { F_SGLWRBOOTINT, "CIM single write to boot space" },
5397 { F_SGLRDBOOTINT,
5398 "CIM single read from boot space with illegal BEs" },
5399 { F_ILLWRBEINT, "CIM illegal write BEs" },
5400 { F_ILLRDBEINT, "CIM illegal read BEs" },
5401 { F_ILLRDINT, "CIM illegal read" },
5402 { F_ILLWRINT, "CIM illegal write" },
5403 { F_ILLTRANSINT, "CIM illegal transaction" },
5404 { F_RSVDSPACEINT, "CIM reserved space access" },
5405 {0}
5406 };
5407 static const struct intr_info cim_host_upacc_intr_info = {
5408 .name = "CIM_HOST_UPACC_INT_CAUSE",
5409 .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
5410 .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
5411 .fatal = 0x3fffeeff,
5412 .flags = IHF_FATAL_IFF_ENABLED,
5413 .details = cim_host_upacc_intr_details,
5414 .actions = NULL,
5415 };
5416 static const struct intr_info cim_pf_host_intr_info = {
5417 .name = "CIM_PF_HOST_INT_CAUSE",
5418 .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
5419 .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE),
5420 .fatal = 0,
5421 .flags = 0,
5422 .details = NULL,
5423 .actions = NULL,
5424 };
5425 static const struct intr_info cim_perr_cause = {
5426 .name = "CIM_PERR_CAUSE",
5427 .cause_reg = A_CIM_PERR_CAUSE,
5428 .enable_reg = A_CIM_PERR_ENABLE,
5429 .fatal = 0xffffffff,
5430 .flags = IHF_FATAL_IFF_ENABLED,
5431 .details = NULL,
5432 .actions = NULL,
5433 };
5434 u32 val, fw_err;
5435 bool fatal;
5436
5437 /*
5438 * When the Firmware detects an internal error which normally wouldn't
5439 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
5440 * to make sure the Host sees the Firmware Crash. So if we have a
5441 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
5442 * interrupt.
5443 */
5444 fw_err = t4_read_reg(adap, A_PCIE_FW);
5445 val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE);
5446 if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) ||
5447 G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) {
5448 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT);
5449 }
5450
5451 fatal = (fw_err & F_PCIE_FW_ERR) != 0;
5452 fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, flags);
5453 fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, flags);
5454 fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, flags);
5455 if (chip_id(adap) > CHELSIO_T6)
5456 fatal |= t4_handle_intr(adap, &cim_perr_cause, 0, flags);
5457 if (fatal)
5458 t4_os_cim_err(adap);
5459
5460 return (fatal);
5461 }
5462
5463 /*
5464 * ULP RX interrupt handler.
5465 */
ulprx_intr_handler(struct adapter * adap,int arg,int flags)5466 static bool ulprx_intr_handler(struct adapter *adap, int arg, int flags)
5467 {
5468 static const struct intr_details ulprx_intr_details[] = {
5469 /* T5+ */
5470 { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" },
5471 { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" },
5472
5473 /* T4+ */
5474 { F_CAUSE_CTX_1, "ULPRX channel 1 context error" },
5475 { F_CAUSE_CTX_0, "ULPRX channel 0 context error" },
5476 { 0x007fffff, "ULPRX parity error" },
5477 { 0 }
5478 };
5479 static const struct intr_info ulprx_intr_info = {
5480 .name = "ULP_RX_INT_CAUSE",
5481 .cause_reg = A_ULP_RX_INT_CAUSE,
5482 .enable_reg = A_ULP_RX_INT_ENABLE,
5483 .fatal = 0x07ffffff,
5484 .flags = IHF_FATAL_IFF_ENABLED,
5485 .details = ulprx_intr_details,
5486 .actions = NULL,
5487 };
5488 static const struct intr_info ulprx_intr2_info = {
5489 .name = "ULP_RX_INT_CAUSE_2",
5490 .cause_reg = A_ULP_RX_INT_CAUSE_2,
5491 .enable_reg = A_ULP_RX_INT_ENABLE_2,
5492 .fatal = 0,
5493 .flags = 0,
5494 .details = NULL,
5495 .actions = NULL,
5496 };
5497 static const struct intr_info ulprx_int_cause_pcmd = {
5498 .name = "ULP_RX_INT_CAUSE_PCMD",
5499 .cause_reg = A_ULP_RX_INT_CAUSE_PCMD,
5500 .enable_reg = A_ULP_RX_INT_ENABLE_PCMD,
5501 .fatal = 0,
5502 .flags = 0,
5503 .details = NULL,
5504 .actions = NULL,
5505 };
5506 static const struct intr_info ulprx_int_cause_data = {
5507 .name = "ULP_RX_INT_CAUSE_DATA",
5508 .cause_reg = A_ULP_RX_INT_CAUSE_DATA,
5509 .enable_reg = A_ULP_RX_INT_ENABLE_DATA,
5510 .fatal = 0,
5511 .flags = 0,
5512 .details = NULL,
5513 .actions = NULL,
5514 };
5515 static const struct intr_info ulprx_int_cause_arb = {
5516 .name = "ULP_RX_INT_CAUSE_ARB",
5517 .cause_reg = A_ULP_RX_INT_CAUSE_ARB,
5518 .enable_reg = A_ULP_RX_INT_ENABLE_ARB,
5519 .fatal = 0,
5520 .flags = 0,
5521 .details = NULL,
5522 .actions = NULL,
5523 };
5524 static const struct intr_info ulprx_int_cause_intf = {
5525 .name = "ULP_RX_INT_CAUSE_INTERFACE",
5526 .cause_reg = A_ULP_RX_INT_CAUSE_INTERFACE,
5527 .enable_reg = A_ULP_RX_INT_ENABLE_INTERFACE,
5528 .fatal = 0,
5529 .flags = 0,
5530 .details = NULL,
5531 .actions = NULL,
5532 };
5533 bool fatal = false;
5534
5535 fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, flags);
5536 if (chip_id(adap) < CHELSIO_T7)
5537 fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, flags);
5538 else {
5539 fatal |= t4_handle_intr(adap, &ulprx_int_cause_pcmd, 0, flags);
5540 fatal |= t4_handle_intr(adap, &ulprx_int_cause_data, 0, flags);
5541 fatal |= t4_handle_intr(adap, &ulprx_int_cause_arb, 0, flags);
5542 fatal |= t4_handle_intr(adap, &ulprx_int_cause_intf, 0, flags);
5543 }
5544
5545 return (fatal);
5546 }
5547
5548 /*
5549 * ULP TX interrupt handler.
5550 */
ulptx_intr_handler(struct adapter * adap,int arg,int flags)5551 static bool ulptx_intr_handler(struct adapter *adap, int arg, int flags)
5552 {
5553 static const struct intr_details ulptx_intr_details[] = {
5554 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
5555 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" },
5556 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" },
5557 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" },
5558 { 0x0fffffff, "ULPTX parity error" },
5559 { 0 }
5560 };
5561 static const struct intr_info ulptx_intr_info = {
5562 .name = "ULP_TX_INT_CAUSE",
5563 .cause_reg = A_ULP_TX_INT_CAUSE,
5564 .enable_reg = A_ULP_TX_INT_ENABLE,
5565 .fatal = 0x0fffffff,
5566 .flags = IHF_FATAL_IFF_ENABLED,
5567 .details = ulptx_intr_details,
5568 .actions = NULL,
5569 };
5570 static const struct intr_info ulptx_intr_info2 = {
5571 .name = "ULP_TX_INT_CAUSE_2",
5572 .cause_reg = A_ULP_TX_INT_CAUSE_2,
5573 .enable_reg = A_ULP_TX_INT_ENABLE_2,
5574 .fatal = 0xffffffff,
5575 .flags = IHF_FATAL_IFF_ENABLED,
5576 .details = NULL,
5577 .actions = NULL,
5578 };
5579 static const struct intr_info ulptx_intr_info3 = {
5580 .name = "ULP_TX_INT_CAUSE_3",
5581 .cause_reg = A_ULP_TX_INT_CAUSE_3,
5582 .enable_reg = A_ULP_TX_INT_ENABLE_3,
5583 .fatal = 0xffffffff,
5584 .flags = IHF_FATAL_IFF_ENABLED,
5585 .details = NULL,
5586 .actions = NULL,
5587 };
5588 static const struct intr_info ulptx_intr_info4 = {
5589 .name = "ULP_TX_INT_CAUSE_4",
5590 .cause_reg = A_ULP_TX_INT_CAUSE_4,
5591 .enable_reg = A_ULP_TX_INT_ENABLE_4,
5592 .fatal = 0xffffffff,
5593 .flags = IHF_FATAL_IFF_ENABLED,
5594 .details = NULL,
5595 .actions = NULL,
5596 };
5597 static const struct intr_info ulptx_intr_info5 = {
5598 .name = "ULP_TX_INT_CAUSE_5",
5599 .cause_reg = A_ULP_TX_INT_CAUSE_5,
5600 .enable_reg = A_ULP_TX_INT_ENABLE_5,
5601 .fatal = 0xffffffff,
5602 .flags = IHF_FATAL_IFF_ENABLED,
5603 .details = NULL,
5604 .actions = NULL,
5605 };
5606 static const struct intr_info ulptx_intr_info6 = {
5607 .name = "ULP_TX_INT_CAUSE_6",
5608 .cause_reg = A_ULP_TX_INT_CAUSE_6,
5609 .enable_reg = A_ULP_TX_INT_ENABLE_6,
5610 .fatal = 0xffffffff,
5611 .flags = IHF_FATAL_IFF_ENABLED,
5612 .details = NULL,
5613 .actions = NULL,
5614 };
5615 static const struct intr_info ulptx_intr_info7 = {
5616 .name = "ULP_TX_INT_CAUSE_7",
5617 .cause_reg = A_ULP_TX_INT_CAUSE_7,
5618 .enable_reg = A_ULP_TX_INT_ENABLE_7,
5619 .fatal = 0,
5620 .flags = 0,
5621 .details = NULL,
5622 .actions = NULL,
5623 };
5624 static const struct intr_info ulptx_intr_info8 = {
5625 .name = "ULP_TX_INT_CAUSE_8",
5626 .cause_reg = A_ULP_TX_INT_CAUSE_8,
5627 .enable_reg = A_ULP_TX_INT_ENABLE_8,
5628 .fatal = 0,
5629 .flags = 0,
5630 .details = NULL,
5631 .actions = NULL,
5632 };
5633 bool fatal = false;
5634
5635 fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, flags);
5636 if (chip_id(adap) > CHELSIO_T4)
5637 fatal |= t4_handle_intr(adap, &ulptx_intr_info2, 0, flags);
5638 if (chip_id(adap) > CHELSIO_T6) {
5639 fatal |= t4_handle_intr(adap, &ulptx_intr_info3, 0, flags);
5640 fatal |= t4_handle_intr(adap, &ulptx_intr_info4, 0, flags);
5641 fatal |= t4_handle_intr(adap, &ulptx_intr_info5, 0, flags);
5642 fatal |= t4_handle_intr(adap, &ulptx_intr_info6, 0, flags);
5643 fatal |= t4_handle_intr(adap, &ulptx_intr_info7, 0, flags);
5644 fatal |= t4_handle_intr(adap, &ulptx_intr_info8, 0, flags);
5645 }
5646
5647 return (fatal);
5648 }
5649
pmtx_dump_dbg_stats(struct adapter * adap,int arg,int flags)5650 static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, int flags)
5651 {
5652 int i;
5653 u32 data[17];
5654
5655 if (flags & IHF_NO_SHOW)
5656 return (false);
5657
5658 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
5659 ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
5660 for (i = 0; i < ARRAY_SIZE(data); i++) {
5661 CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i,
5662 A_PM_TX_DBG_STAT0 + i, data[i]);
5663 }
5664
5665 return (false);
5666 }
5667
5668 /*
5669 * PM TX interrupt handler.
5670 */
pmtx_intr_handler(struct adapter * adap,int arg,int flags)5671 static bool pmtx_intr_handler(struct adapter *adap, int arg, int flags)
5672 {
5673 static const struct intr_details pmtx_int_cause_fields[] = {
5674 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
5675 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
5676 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
5677 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" },
5678 { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" },
5679 { 0x00f00000, "PMTX icspi FIFO Rx framing error" },
5680 { 0x000f0000, "PMTX icspi FIFO Tx framing error" },
5681 { 0x0000f000, "PMTX oespi FIFO Rx framing error" },
5682 { 0x00000f00, "PMTX oespi FIFO Tx framing error" },
5683 { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" },
5684 { F_OESPI_PAR_ERROR, "PMTX oespi parity error" },
5685 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" },
5686 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" },
5687 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
5688 { 0 }
5689 };
5690 static const struct intr_action pmtx_int_cause_actions[] = {
5691 { 0xffffffff, -1, pmtx_dump_dbg_stats },
5692 { 0 },
5693 };
5694 static const struct intr_info pmtx_int_cause = {
5695 .name = "PM_TX_INT_CAUSE",
5696 .cause_reg = A_PM_TX_INT_CAUSE,
5697 .enable_reg = A_PM_TX_INT_ENABLE,
5698 .fatal = 0xffffffff,
5699 .flags = 0,
5700 .details = pmtx_int_cause_fields,
5701 .actions = pmtx_int_cause_actions,
5702 };
5703
5704 return (t4_handle_intr(adap, &pmtx_int_cause, 0, flags));
5705 }
5706
5707 /*
5708 * PM RX interrupt handler.
5709 */
pmrx_intr_handler(struct adapter * adap,int arg,int flags)5710 static bool pmrx_intr_handler(struct adapter *adap, int arg, int flags)
5711 {
5712 static const struct intr_details pmrx_int_cause_fields[] = {
5713 /* T6+ */
5714 { 0x18000000, "PMRX ospi overflow" },
5715 { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
5716 { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" },
5717 { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" },
5718 { F_SDC_ERR, "PMRX SDC error" },
5719
5720 /* T4+ */
5721 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" },
5722 { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" },
5723 { 0x0003c000, "PMRX iespi Rx framing error" },
5724 { 0x00003c00, "PMRX iespi Tx framing error" },
5725 { 0x00000300, "PMRX ocspi Rx framing error" },
5726 { 0x000000c0, "PMRX ocspi Tx framing error" },
5727 { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" },
5728 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" },
5729 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" },
5730 { F_IESPI_PAR_ERROR, "PMRX iespi parity error" },
5731 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
5732 { 0 }
5733 };
5734 static const struct intr_info pmrx_int_cause = {
5735 .name = "PM_RX_INT_CAUSE",
5736 .cause_reg = A_PM_RX_INT_CAUSE,
5737 .enable_reg = A_PM_RX_INT_ENABLE,
5738 .fatal = 0x1fffffff,
5739 .flags = IHF_FATAL_IFF_ENABLED,
5740 .details = pmrx_int_cause_fields,
5741 .actions = NULL,
5742 };
5743
5744 return (t4_handle_intr(adap, &pmrx_int_cause, 0, flags));
5745 }
5746
5747 /*
5748 * CPL switch interrupt handler.
5749 */
cplsw_intr_handler(struct adapter * adap,int arg,int flags)5750 static bool cplsw_intr_handler(struct adapter *adap, int arg, int flags)
5751 {
5752 static const struct intr_details cplsw_int_cause_fields[] = {
5753 /* T5+ */
5754 { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
5755 { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
5756
5757 /* T4+ */
5758 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" },
5759 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" },
5760 { F_TP_FRAMING_ERROR, "CPLSW TP framing error" },
5761 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" },
5762 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" },
5763 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
5764 { 0 }
5765 };
5766 static const struct intr_info cplsw_int_cause = {
5767 .name = "CPL_INTR_CAUSE",
5768 .cause_reg = A_CPL_INTR_CAUSE,
5769 .enable_reg = A_CPL_INTR_ENABLE,
5770 .fatal = 0xffffffff,
5771 .flags = IHF_FATAL_IFF_ENABLED,
5772 .details = cplsw_int_cause_fields,
5773 .actions = NULL,
5774 };
5775
5776 return (t4_handle_intr(adap, &cplsw_int_cause, 0, flags));
5777 }
5778
5779 #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
5780 #define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR)
5781 #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \
5782 F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \
5783 F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \
5784 F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR)
5785 #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
5786 F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
5787 F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
5788 #define T7_LE_FATAL_MASK (T6_LE_FATAL_MASK | F_CACHESRAMPERR | F_CACHEINTPERR)
5789
5790 /*
5791 * LE interrupt handler.
5792 */
le_intr_handler(struct adapter * adap,int arg,int flags)5793 static bool le_intr_handler(struct adapter *adap, int arg, int flags)
5794 {
5795 static const struct intr_details le_intr_details[] = {
5796 { F_REQQPARERR, "LE request queue parity error" },
5797 { F_UNKNOWNCMD, "LE unknown command" },
5798 { F_ACTRGNFULL, "LE active region full" },
5799 { F_PARITYERR, "LE parity error" },
5800 { F_LIPMISS, "LE LIP miss" },
5801 { F_LIP0, "LE 0 LIP error" },
5802 { 0 }
5803 };
5804 static const struct intr_details t6_le_intr_details[] = {
5805 { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" },
5806 { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" },
5807 { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" },
5808 { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" },
5809 { F_TOTCNTERR, "LE total active < TCAM count" },
5810 { F_CMDPRSRINTERR, "LE internal error in parser" },
5811 { F_CMDTIDERR, "Incorrect tid in LE command" },
5812 { F_T6_ACTRGNFULL, "LE active region full" },
5813 { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" },
5814 { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" },
5815 { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" },
5816 { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" },
5817 { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" },
5818 { F_TCAMACCFAIL, "LE TCAM access failure" },
5819 { F_T6_UNKNOWNCMD, "LE unknown command" },
5820 { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" },
5821 { F_T6_LIPMISS, "LE CLIP lookup miss" },
5822 { T6_LE_PERRCRC_MASK, "LE parity/CRC error" },
5823 { 0 }
5824 };
5825 struct intr_info le_intr_info = {
5826 .name = "LE_DB_INT_CAUSE",
5827 .cause_reg = A_LE_DB_INT_CAUSE,
5828 .enable_reg = A_LE_DB_INT_ENABLE,
5829 .fatal = 0,
5830 .flags = IHF_FATAL_IFF_ENABLED,
5831 .details = NULL,
5832 .actions = NULL,
5833 };
5834
5835 if (chip_id(adap) <= CHELSIO_T5) {
5836 le_intr_info.details = le_intr_details;
5837 le_intr_info.fatal = T5_LE_FATAL_MASK;
5838 } else {
5839 le_intr_info.details = t6_le_intr_details;
5840 if (chip_id(adap) < CHELSIO_T7)
5841 le_intr_info.fatal = T6_LE_FATAL_MASK;
5842 else
5843 le_intr_info.fatal = T7_LE_FATAL_MASK;
5844 }
5845
5846 return (t4_handle_intr(adap, &le_intr_info, 0, flags));
5847 }
5848
5849 /*
5850 * MPS interrupt handler.
5851 */
mps_intr_handler(struct adapter * adap,int arg,int flags)5852 static bool mps_intr_handler(struct adapter *adap, int arg, int flags)
5853 {
5854 static const struct intr_details mps_rx_perr_intr_details[] = {
5855 { 0xffffffff, "MPS Rx parity error" },
5856 { 0 }
5857 };
5858 static const struct intr_info mps_rx_perr_intr_info = {
5859 .name = "MPS_RX_PERR_INT_CAUSE",
5860 .cause_reg = A_MPS_RX_PERR_INT_CAUSE,
5861 .enable_reg = A_MPS_RX_PERR_INT_ENABLE,
5862 .fatal = 0xffffffff,
5863 .flags = IHF_FATAL_IFF_ENABLED,
5864 .details = mps_rx_perr_intr_details,
5865 .actions = NULL,
5866 };
5867 static const struct intr_info mps_rx_perr_intr_info2 = {
5868 .name = "MPS_RX_PERR_INT_CAUSE2",
5869 .cause_reg = A_MPS_RX_PERR_INT_CAUSE2,
5870 .enable_reg = A_MPS_RX_PERR_INT_ENABLE2,
5871 .fatal = 0xffffffff,
5872 .flags = IHF_FATAL_IFF_ENABLED,
5873 .details = NULL,
5874 .actions = NULL,
5875 };
5876 static const struct intr_info mps_rx_perr_intr_info3 = {
5877 .name = "MPS_RX_PERR_INT_CAUSE3",
5878 .cause_reg = A_MPS_RX_PERR_INT_CAUSE3,
5879 .enable_reg = A_MPS_RX_PERR_INT_ENABLE3,
5880 .fatal = 0xffffffff,
5881 .flags = IHF_FATAL_IFF_ENABLED,
5882 .details = NULL,
5883 .actions = NULL,
5884 };
5885 static const struct intr_info mps_rx_perr_intr_info4 = {
5886 .name = "MPS_RX_PERR_INT_CAUSE4",
5887 .cause_reg = A_MPS_RX_PERR_INT_CAUSE4,
5888 .enable_reg = A_MPS_RX_PERR_INT_ENABLE4,
5889 .fatal = 0xffffffff,
5890 .flags = IHF_FATAL_IFF_ENABLED,
5891 .details = NULL,
5892 .actions = NULL,
5893 };
5894 static const struct intr_info mps_rx_perr_intr_info5 = {
5895 .name = "MPS_RX_PERR_INT_CAUSE5",
5896 .cause_reg = A_MPS_RX_PERR_INT_CAUSE5,
5897 .enable_reg = A_MPS_RX_PERR_INT_ENABLE5,
5898 .fatal = 0xffffffff,
5899 .flags = IHF_FATAL_IFF_ENABLED,
5900 .details = NULL,
5901 .actions = NULL,
5902 };
5903 static const struct intr_info mps_rx_perr_intr_info6 = {
5904 .name = "MPS_RX_PERR_INT_CAUSE6",
5905 .cause_reg = A_MPS_RX_PERR_INT_CAUSE6,
5906 .enable_reg = A_MPS_RX_PERR_INT_ENABLE6,
5907 .fatal = 0xffffffff,
5908 .flags = IHF_FATAL_IFF_ENABLED,
5909 .details = NULL,
5910 .actions = NULL,
5911 };
5912 static const struct intr_details mps_tx_intr_details[] = {
5913 { F_PORTERR, "MPS Tx destination port is disabled" },
5914 { F_FRMERR, "MPS Tx framing error" },
5915 { F_SECNTERR, "MPS Tx SOP/EOP error" },
5916 { F_BUBBLE, "MPS Tx underflow" },
5917 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" },
5918 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" },
5919 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" },
5920 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" },
5921 { 0 }
5922 };
5923 static const struct intr_info mps_tx_intr_info = {
5924 .name = "MPS_TX_INT_CAUSE",
5925 .cause_reg = A_MPS_TX_INT_CAUSE,
5926 .enable_reg = A_MPS_TX_INT_ENABLE,
5927 .fatal = 0x1ffff,
5928 .flags = IHF_FATAL_IFF_ENABLED,
5929 .details = mps_tx_intr_details,
5930 .actions = NULL,
5931 };
5932 static const struct intr_info mps_tx_intr_info2 = {
5933 .name = "MPS_TX_INT2_CAUSE",
5934 .cause_reg = A_MPS_TX_INT2_CAUSE,
5935 .enable_reg = A_MPS_TX_INT2_ENABLE,
5936 .fatal = 0xffffffff,
5937 .flags = IHF_FATAL_IFF_ENABLED,
5938 .details = NULL,
5939 .actions = NULL,
5940 };
5941 static const struct intr_info mps_tx_intr_info3 = {
5942 .name = "MPS_TX_INT3_CAUSE",
5943 .cause_reg = A_MPS_TX_INT3_CAUSE,
5944 .enable_reg = A_MPS_TX_INT3_ENABLE,
5945 .fatal = 0xffffffff,
5946 .flags = IHF_FATAL_IFF_ENABLED,
5947 .details = NULL,
5948 .actions = NULL,
5949 };
5950 static const struct intr_info mps_tx_intr_info4 = {
5951 .name = "MPS_TX_INT4_CAUSE",
5952 .cause_reg = A_MPS_TX_INT4_CAUSE,
5953 .enable_reg = A_MPS_TX_INT4_ENABLE,
5954 .fatal = 0xffffffff,
5955 .flags = IHF_FATAL_IFF_ENABLED,
5956 .details = NULL,
5957 .actions = NULL,
5958 };
5959 static const struct intr_details mps_trc_intr_details[] = {
5960 { F_MISCPERR, "MPS TRC misc parity error" },
5961 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
5962 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" },
5963 { 0 }
5964 };
5965 static const struct intr_info mps_trc_intr_info = {
5966 .name = "MPS_TRC_INT_CAUSE",
5967 .cause_reg = A_MPS_TRC_INT_CAUSE,
5968 .enable_reg = A_MPS_TRC_INT_ENABLE,
5969 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
5970 .flags = 0,
5971 .details = mps_trc_intr_details,
5972 .actions = NULL,
5973 };
5974 static const struct intr_info t7_mps_trc_intr_info = {
5975 .name = "MPS_TRC_INT_CAUSE",
5976 .cause_reg = A_T7_MPS_TRC_INT_CAUSE,
5977 .enable_reg = A_T7_MPS_TRC_INT_ENABLE,
5978 .fatal = 0xffffffff,
5979 .flags = IHF_FATAL_IFF_ENABLED,
5980 .details = mps_trc_intr_details,
5981 .actions = NULL,
5982 };
5983 static const struct intr_info t7_mps_trc_intr_info2 = {
5984 .name = "MPS_TRC_INT_CAUSE2",
5985 .cause_reg = A_MPS_TRC_INT_CAUSE2,
5986 .enable_reg = A_MPS_TRC_INT_ENABLE2,
5987 .fatal = 0xffffffff,
5988 .flags = IHF_FATAL_IFF_ENABLED,
5989 .details = NULL,
5990 .actions = NULL,
5991 };
5992 static const struct intr_details mps_stat_sram_intr_details[] = {
5993 { 0xffffffff, "MPS statistics SRAM parity error" },
5994 { 0 }
5995 };
5996 static const struct intr_info mps_stat_sram_intr_info = {
5997 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM",
5998 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5999 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
6000 .fatal = 0x1fffffff,
6001 .flags = IHF_FATAL_IFF_ENABLED,
6002 .details = mps_stat_sram_intr_details,
6003 .actions = NULL,
6004 };
6005 static const struct intr_details mps_stat_tx_intr_details[] = {
6006 { 0xffffff, "MPS statistics Tx FIFO parity error" },
6007 { 0 }
6008 };
6009 static const struct intr_info mps_stat_tx_intr_info = {
6010 .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO",
6011 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
6012 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
6013 .fatal = 0xffffff,
6014 .flags = IHF_FATAL_IFF_ENABLED,
6015 .details = mps_stat_tx_intr_details,
6016 .actions = NULL,
6017 };
6018 static const struct intr_details mps_stat_rx_intr_details[] = {
6019 { 0xffffff, "MPS statistics Rx FIFO parity error" },
6020 { 0 }
6021 };
6022 static const struct intr_info mps_stat_rx_intr_info = {
6023 .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO",
6024 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
6025 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO,
6026 .fatal = 0xffffff,
6027 .flags = 0,
6028 .details = mps_stat_rx_intr_details,
6029 .actions = NULL,
6030 };
6031 static const struct intr_details mps_cls_intr_details[] = {
6032 { F_HASHSRAM, "MPS hash SRAM parity error" },
6033 { F_MATCHTCAM, "MPS match TCAM parity error" },
6034 { F_MATCHSRAM, "MPS match SRAM parity error" },
6035 { 0 }
6036 };
6037 static const struct intr_info mps_cls_intr_info = {
6038 .name = "MPS_CLS_INT_CAUSE",
6039 .cause_reg = A_MPS_CLS_INT_CAUSE,
6040 .enable_reg = A_MPS_CLS_INT_ENABLE,
6041 .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM,
6042 .flags = 0,
6043 .details = mps_cls_intr_details,
6044 .actions = NULL,
6045 };
6046 static const struct intr_details mps_stat_sram1_intr_details[] = {
6047 { 0xff, "MPS statistics SRAM1 parity error" },
6048 { 0 }
6049 };
6050 static const struct intr_info mps_stat_sram1_intr_info = {
6051 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1",
6052 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1,
6053 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1,
6054 .fatal = 0xff,
6055 .flags = 0,
6056 .details = mps_stat_sram1_intr_details,
6057 .actions = NULL,
6058 };
6059 bool fatal = false;
6060
6061 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, flags);
6062 if (chip_id(adap) > CHELSIO_T6) {
6063 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info2, 0, flags);
6064 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info3, 0, flags);
6065 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info4, 0, flags);
6066 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info5, 0, flags);
6067 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info6, 0, flags);
6068 }
6069 fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, flags);
6070 if (chip_id(adap) > CHELSIO_T6) {
6071 fatal |= t4_handle_intr(adap, &mps_tx_intr_info2, 0, flags);
6072 fatal |= t4_handle_intr(adap, &mps_tx_intr_info3, 0, flags);
6073 fatal |= t4_handle_intr(adap, &mps_tx_intr_info4, 0, flags);
6074 fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, flags);
6075 fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info2, 0, flags);
6076 } else
6077 fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, flags);
6078 fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, flags);
6079 fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, flags);
6080 fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, flags);
6081 fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, flags);
6082 if (chip_id(adap) > CHELSIO_T4)
6083 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0, flags);
6084
6085 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
6086 t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */
6087
6088 return (fatal);
6089
6090 }
6091
6092 /*
6093 * EDC/MC interrupt handler.
6094 */
mem_intr_handler(struct adapter * adap,int idx,int flags)6095 static bool mem_intr_handler(struct adapter *adap, int idx, int flags)
6096 {
6097 static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
6098 unsigned int count_reg, v;
6099 static const struct intr_details mem_intr_details[] = {
6100 { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" },
6101 { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" },
6102 { F_PERR_INT_CAUSE, "FIFO parity error" },
6103 { 0 }
6104 };
6105 char rname[32];
6106 struct intr_info ii = {
6107 .name = &rname[0],
6108 .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
6109 .details = mem_intr_details,
6110 .flags = 0,
6111 .actions = NULL,
6112 };
6113 bool fatal = false;
6114 int i = 0;
6115
6116 switch (idx) {
6117 case MEM_EDC1: i = 1;
6118 /* fall through */
6119 case MEM_EDC0:
6120 snprintf(rname, sizeof(rname), "EDC%u_INT_CAUSE", i);
6121 if (is_t4(adap)) {
6122 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, i);
6123 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, i);
6124 count_reg = EDC_REG(A_EDC_ECC_STATUS, i);
6125 } else {
6126 ii.cause_reg = EDC_T5_REG(A_EDC_H_INT_CAUSE, i);
6127 ii.enable_reg = EDC_T5_REG(A_EDC_H_INT_ENABLE, i);
6128 count_reg = EDC_T5_REG(A_EDC_H_ECC_STATUS, i);
6129 }
6130 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6131 if (chip_id(adap) > CHELSIO_T6) {
6132 snprintf(rname, sizeof(rname), "EDC%u_PAR_CAUSE", i);
6133 ii.cause_reg = EDC_T5_REG(A_EDC_H_PAR_CAUSE, i);
6134 ii.enable_reg = EDC_T5_REG(A_EDC_H_PAR_ENABLE, i);
6135 ii.fatal = 0xffffffff;
6136 ii.details = NULL;
6137 ii.flags = IHF_FATAL_IFF_ENABLED;
6138 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6139 }
6140 break;
6141 case MEM_MC1:
6142 if (is_t4(adap) || is_t6(adap))
6143 return (false);
6144 i = 1;
6145 /* fall through */
6146 case MEM_MC0:
6147 snprintf(rname, sizeof(rname), "MC%u_INT_CAUSE", i);
6148 if (is_t4(adap)) {
6149 ii.cause_reg = A_MC_INT_CAUSE;
6150 ii.enable_reg = A_MC_INT_ENABLE;
6151 count_reg = A_MC_ECC_STATUS;
6152 } else if (chip_id(adap) < CHELSIO_T7) {
6153 ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, i);
6154 ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, i);
6155 count_reg = MC_REG(A_MC_P_ECC_STATUS, i);
6156 } else {
6157 ii.cause_reg = MC_T7_REG(A_T7_MC_P_INT_CAUSE, i);
6158 ii.enable_reg = MC_T7_REG(A_T7_MC_P_INT_ENABLE, i);
6159 count_reg = MC_T7_REG(A_T7_MC_P_ECC_STATUS, i);
6160 }
6161 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6162
6163 snprintf(rname, sizeof(rname), "MC%u_PAR_CAUSE", i);
6164 if (is_t4(adap)) {
6165 ii.cause_reg = A_MC_PAR_CAUSE;
6166 ii.enable_reg = A_MC_PAR_ENABLE;
6167 } else if (chip_id(adap) < CHELSIO_T7) {
6168 ii.cause_reg = MC_REG(A_MC_P_PAR_CAUSE, i);
6169 ii.enable_reg = MC_REG(A_MC_P_PAR_ENABLE, i);
6170 } else {
6171 ii.cause_reg = MC_T7_REG(A_T7_MC_P_PAR_CAUSE, i);
6172 ii.enable_reg = MC_T7_REG(A_T7_MC_P_PAR_ENABLE, i);
6173 }
6174 ii.fatal = 0xffffffff;
6175 ii.details = NULL;
6176 ii.flags = IHF_FATAL_IFF_ENABLED;
6177 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6178
6179 if (chip_id(adap) > CHELSIO_T6) {
6180 snprintf(rname, sizeof(rname), "MC%u_DDRCTL_INT_CAUSE", i);
6181 ii.cause_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_CAUSE, i);
6182 ii.enable_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_ENABLE, i);
6183 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6184 }
6185 break;
6186 }
6187
6188 v = t4_read_reg(adap, count_reg);
6189 if (v != 0) {
6190 if (G_ECC_UECNT(v) != 0 && !(flags & IHF_NO_SHOW)) {
6191 CH_ALERT(adap,
6192 " %s: %u uncorrectable ECC data error(s)\n",
6193 name[idx], G_ECC_UECNT(v));
6194 }
6195 if (G_ECC_CECNT(v) != 0 && !(flags & IHF_NO_SHOW)) {
6196 if (idx <= MEM_EDC1)
6197 t4_edc_err_read(adap, idx);
6198 CH_WARN_RATELIMIT(adap,
6199 " %s: %u correctable ECC data error(s)\n",
6200 name[idx], G_ECC_CECNT(v));
6201 }
6202 t4_write_reg(adap, count_reg, 0xffffffff);
6203 }
6204
6205 return (fatal);
6206 }
6207
ma_wrap_status(struct adapter * adap,int arg,int flags)6208 static bool ma_wrap_status(struct adapter *adap, int arg, int flags)
6209 {
6210 u32 v;
6211
6212 v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
6213 if (!(flags & IHF_NO_SHOW)) {
6214 CH_ALERT(adap,
6215 " MA address wrap-around by client %u to address %#x\n",
6216 G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
6217 }
6218 t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
6219
6220 return (false);
6221 }
6222
6223
6224 /*
6225 * MA interrupt handler.
6226 */
ma_intr_handler(struct adapter * adap,int arg,int flags)6227 static bool ma_intr_handler(struct adapter *adap, int arg, int flags)
6228 {
6229 static const struct intr_action ma_intr_actions[] = {
6230 { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
6231 { 0 },
6232 };
6233 static const struct intr_info ma_intr_info = {
6234 .name = "MA_INT_CAUSE",
6235 .cause_reg = A_MA_INT_CAUSE,
6236 .enable_reg = A_MA_INT_ENABLE,
6237 .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE,
6238 .flags = IHF_FATAL_IFF_ENABLED,
6239 .details = NULL,
6240 .actions = ma_intr_actions,
6241 };
6242 static const struct intr_info ma_perr_status1 = {
6243 .name = "MA_PARITY_ERROR_STATUS1",
6244 .cause_reg = A_MA_PARITY_ERROR_STATUS1,
6245 .enable_reg = A_MA_PARITY_ERROR_ENABLE1,
6246 .fatal = 0xffffffff,
6247 .flags = 0,
6248 .details = NULL,
6249 .actions = NULL,
6250 };
6251 static const struct intr_info ma_perr_status2 = {
6252 .name = "MA_PARITY_ERROR_STATUS2",
6253 .cause_reg = A_MA_PARITY_ERROR_STATUS2,
6254 .enable_reg = A_MA_PARITY_ERROR_ENABLE2,
6255 .fatal = 0xffffffff,
6256 .flags = 0,
6257 .details = NULL,
6258 .actions = NULL,
6259 };
6260 bool fatal;
6261
6262 fatal = false;
6263 fatal |= t4_handle_intr(adap, &ma_intr_info, 0, flags);
6264 fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, flags);
6265 if (chip_id(adap) > CHELSIO_T4)
6266 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, flags);
6267
6268 return (fatal);
6269 }
6270
6271 /*
6272 * SMB interrupt handler.
6273 */
smb_intr_handler(struct adapter * adap,int arg,int flags)6274 static bool smb_intr_handler(struct adapter *adap, int arg, int flags)
6275 {
6276 static const struct intr_details smb_int_cause_fields[] = {
6277 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
6278 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
6279 { F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
6280 { 0 }
6281 };
6282 static const struct intr_info smb_int_cause = {
6283 .name = "SMB_INT_CAUSE",
6284 .cause_reg = A_SMB_INT_CAUSE,
6285 .enable_reg = A_SMB_INT_ENABLE,
6286 .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
6287 .flags = 0,
6288 .details = smb_int_cause_fields,
6289 .actions = NULL,
6290 };
6291 return (t4_handle_intr(adap, &smb_int_cause, 0, flags));
6292 }
6293
6294 /*
6295 * NC-SI interrupt handler.
6296 */
ncsi_intr_handler(struct adapter * adap,int arg,int flags)6297 static bool ncsi_intr_handler(struct adapter *adap, int arg, int flags)
6298 {
6299 static const struct intr_details ncsi_int_cause_fields[] = {
6300 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
6301 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
6302 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
6303 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
6304 { 0 }
6305 };
6306 static const struct intr_info ncsi_int_cause = {
6307 .name = "NCSI_INT_CAUSE",
6308 .cause_reg = A_NCSI_INT_CAUSE,
6309 .enable_reg = A_NCSI_INT_ENABLE,
6310 .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
6311 F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
6312 .flags = 0,
6313 .details = ncsi_int_cause_fields,
6314 .actions = NULL,
6315 };
6316 static const struct intr_info ncsi_xgmac0_int_cause = {
6317 .name = "NCSI_XGMAC0_INT_CAUSE",
6318 .cause_reg = A_NCSI_XGMAC0_INT_CAUSE,
6319 .enable_reg = A_NCSI_XGMAC0_INT_ENABLE,
6320 .fatal = 0,
6321 .flags = 0,
6322 .details = NULL,
6323 .actions = NULL,
6324 };
6325 bool fatal = false;
6326
6327 fatal |= t4_handle_intr(adap, &ncsi_int_cause, 0, flags);
6328 if (chip_id(adap) > CHELSIO_T6)
6329 fatal |= t4_handle_intr(adap, &ncsi_xgmac0_int_cause, 0, flags);
6330 return (fatal);
6331 }
6332
6333 /*
6334 * MAC interrupt handler.
6335 */
mac_intr_handler(struct adapter * adap,int port,int flags)6336 static bool mac_intr_handler(struct adapter *adap, int port, int flags)
6337 {
6338 static const struct intr_info mac_int_cause_cmn = {
6339 .name = "MAC_INT_CAUSE_CMN",
6340 .cause_reg = A_MAC_INT_CAUSE_CMN,
6341 .enable_reg = A_MAC_INT_EN_CMN,
6342 .fatal = 0,
6343 .flags = 0,
6344 .details = NULL,
6345 .actions = NULL,
6346 };
6347 static const struct intr_info mac_perr_cause_mtip = {
6348 .name = "MAC_PERR_INT_CAUSE_MTIP",
6349 .cause_reg = A_MAC_PERR_INT_CAUSE_MTIP,
6350 .enable_reg = A_MAC_PERR_INT_EN_MTIP,
6351 .fatal = 0xffffffff,
6352 .flags = IHF_FATAL_IFF_ENABLED | IHF_IGNORE_IF_DISABLED,
6353 .details = NULL,
6354 .actions = NULL,
6355 };
6356 static const struct intr_info mac_cerr_cause_mtip = {
6357 .name = "MAC_CERR_INT_CAUSE_MTIP",
6358 .cause_reg = A_MAC_CERR_INT_CAUSE_MTIP,
6359 .enable_reg = A_MAC_CERR_INT_EN_MTIP,
6360 .fatal = 0,
6361 .flags = 0,
6362 .details = NULL,
6363 .actions = NULL,
6364 };
6365 static const struct intr_info mac_ios_int_cause_quad0 = {
6366 .name = "MAC_IOS_INTR_CAUSE_QUAD0",
6367 .cause_reg = A_MAC_IOS_INTR_CAUSE_QUAD0,
6368 .enable_reg = A_MAC_IOS_INTR_EN_QUAD0,
6369 .fatal = 0,
6370 .flags = 0,
6371 .details = NULL,
6372 .actions = NULL,
6373 };
6374 static const struct intr_info mac_ios_int_cause_quad1 = {
6375 .name = "MAC_IOS_INTR_CAUSE_QUAD1",
6376 .cause_reg = A_MAC_IOS_INTR_CAUSE_QUAD1,
6377 .enable_reg = A_MAC_IOS_INTR_EN_QUAD1,
6378 .fatal = 0,
6379 .flags = 0,
6380 .details = NULL,
6381 .actions = NULL,
6382 };
6383 static const struct intr_details mac_intr_details[] = {
6384 { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
6385 { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
6386 { 0 }
6387 };
6388 char name[32];
6389 struct intr_info ii;
6390 bool fatal = false;
6391
6392 if (port > 1 && is_t6(adap))
6393 return (false);
6394
6395 if (is_t4(adap)) {
6396 snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
6397 ii.name = &name[0];
6398 ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
6399 ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN);
6400 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
6401 ii.flags = 0;
6402 ii.details = mac_intr_details;
6403 ii.actions = NULL;
6404 } else if (chip_id(adap) < CHELSIO_T7) {
6405 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
6406 ii.name = &name[0];
6407 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
6408 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN);
6409 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
6410 ii.flags = 0;
6411 ii.details = mac_intr_details;
6412 ii.actions = NULL;
6413 } else {
6414 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
6415 ii.name = &name[0];
6416 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
6417 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
6418 ii.fatal = 0xffffffff;
6419 ii.flags = IHF_FATAL_IFF_ENABLED;
6420 ii.details = NULL;
6421 ii.actions = NULL;
6422 }
6423 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6424 if (is_t4(adap))
6425 return (fatal);
6426
6427 MPASS(chip_id(adap) >= CHELSIO_T5);
6428 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
6429 if (chip_id(adap) > CHELSIO_T6) {
6430 ii.name = &name[0];
6431 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
6432 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
6433 ii.fatal = 0xffffffff;
6434 ii.flags = IHF_FATAL_IFF_ENABLED;
6435 ii.details = NULL;
6436 ii.actions = NULL;
6437 } else {
6438 ii.name = &name[0];
6439 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
6440 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
6441 ii.fatal = 0xffffffff;
6442 ii.flags = IHF_FATAL_IFF_ENABLED;
6443 ii.details = NULL;
6444 ii.actions = NULL;
6445 }
6446 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6447 if (is_t5(adap))
6448 return (fatal);
6449
6450 MPASS(chip_id(adap) >= CHELSIO_T6);
6451 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
6452 if (chip_id(adap) > CHELSIO_T6) {
6453 ii.name = &name[0];
6454 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
6455 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
6456 ii.fatal = 0xffffffff;
6457 ii.flags = IHF_FATAL_IFF_ENABLED;
6458 ii.details = NULL;
6459 ii.actions = NULL;
6460 } else {
6461 ii.name = &name[0];
6462 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
6463 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
6464 ii.fatal = 0xffffffff;
6465 ii.flags = IHF_FATAL_IFF_ENABLED;
6466 ii.details = NULL;
6467 ii.actions = NULL;
6468 }
6469 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6470 if (is_t6(adap))
6471 return (fatal);
6472
6473 MPASS(chip_id(adap) >= CHELSIO_T7);
6474 fatal |= t4_handle_intr(adap, &mac_int_cause_cmn, 0, flags);
6475 fatal |= t4_handle_intr(adap, &mac_perr_cause_mtip, 0, flags);
6476 fatal |= t4_handle_intr(adap, &mac_cerr_cause_mtip, 0, flags);
6477 fatal |= t4_handle_intr(adap, &mac_ios_int_cause_quad0, 0, flags);
6478 fatal |= t4_handle_intr(adap, &mac_ios_int_cause_quad1, 0, flags);
6479
6480 return (fatal);
6481 }
6482
pl_timeout_status(struct adapter * adap,int arg,int flags)6483 static bool pl_timeout_status(struct adapter *adap, int arg, int flags)
6484 {
6485 if (flags & IHF_NO_SHOW)
6486 return (false);
6487
6488 CH_ALERT(adap, " PL_TIMEOUT_STATUS 0x%08x 0x%08x\n",
6489 t4_read_reg(adap, A_PL_TIMEOUT_STATUS0),
6490 t4_read_reg(adap, A_PL_TIMEOUT_STATUS1));
6491
6492 return (false);
6493 }
6494
plpl_intr_handler(struct adapter * adap,int arg,int flags)6495 static bool plpl_intr_handler(struct adapter *adap, int arg, int flags)
6496 {
6497 static const struct intr_details plpl_int_cause_fields[] = {
6498 { F_PL_BUSPERR, "Bus parity error" },
6499 { F_FATALPERR, "Fatal parity error" },
6500 { F_INVALIDACCESS, "Global reserved memory access" },
6501 { F_TIMEOUT, "Bus timeout" },
6502 { F_PLERR, "Module reserved access" },
6503 { F_PERRVFID, "VFID_MAP parity error" },
6504 { 0 }
6505 };
6506 static const struct intr_action plpl_int_cause_actions[] = {
6507 { F_TIMEOUT, -1, pl_timeout_status },
6508 { 0 },
6509 };
6510 static const struct intr_info plpl_int_cause = {
6511 .name = "PL_PL_INT_CAUSE",
6512 .cause_reg = A_PL_PL_INT_CAUSE,
6513 .enable_reg = A_PL_PL_INT_ENABLE,
6514 .fatal = F_FATALPERR | F_PERRVFID,
6515 .flags = IHF_FATAL_IFF_ENABLED | IHF_IGNORE_IF_DISABLED,
6516 .details = plpl_int_cause_fields,
6517 .actions = plpl_int_cause_actions,
6518 };
6519
6520 return (t4_handle_intr(adap, &plpl_int_cause, 0, flags));
6521 }
6522
6523 /* similar to t4_port_reg */
6524 static inline u32
t7_tlstx_reg(u8 instance,u8 channel,u32 reg)6525 t7_tlstx_reg(u8 instance, u8 channel, u32 reg)
6526 {
6527 MPASS(instance <= 1);
6528 MPASS(channel < NUM_TLS_TX_CH_INSTANCES);
6529 return (instance * (CRYPTO_1_BASE_ADDR - CRYPTO_0_BASE_ADDR) +
6530 TLS_TX_CH_REG(reg, channel));
6531 }
6532
6533 /*
6534 * CRYPTO (aka TLS_TX) interrupt handler.
6535 */
tlstx_intr_handler(struct adapter * adap,int idx,int flags)6536 static bool tlstx_intr_handler(struct adapter *adap, int idx, int flags)
6537 {
6538 static const struct intr_details tlstx_int_cause_fields[] = {
6539 { F_KEX_CERR, "KEX SRAM Correctable error" },
6540 { F_KEYLENERR, "IPsec Key length error" },
6541 { F_INTF1_PERR, "Input Interface1 parity error" },
6542 { F_INTF0_PERR, "Input Interface0 parity error" },
6543 { F_KEX_PERR, "KEX SRAM Parity error" },
6544 { 0 }
6545 };
6546 struct intr_info ii = {
6547 .fatal = F_KEX_PERR | F_INTF0_PERR | F_INTF1_PERR,
6548 .flags = IHF_FATAL_IFF_ENABLED,
6549 .details = tlstx_int_cause_fields,
6550 .actions = NULL,
6551 };
6552 char name[32];
6553 int ch;
6554 bool fatal = false;
6555
6556 for (ch = 0; ch < NUM_TLS_TX_CH_INSTANCES; ch++) {
6557 snprintf(name, sizeof(name), "TLSTX%u_CH%u_INT_CAUSE", idx, ch);
6558 ii.name = &name[0];
6559 ii.cause_reg = t7_tlstx_reg(idx, ch, A_TLS_TX_CH_INT_CAUSE);
6560 ii.enable_reg = t7_tlstx_reg(idx, ch, A_TLS_TX_CH_INT_ENABLE);
6561 fatal |= t4_handle_intr(adap, &ii, 0, flags);
6562 }
6563
6564 return (fatal);
6565 }
6566
6567 /*
6568 * HMA interrupt handler.
6569 */
hma_intr_handler(struct adapter * adap,int idx,int flags)6570 static bool hma_intr_handler(struct adapter *adap, int idx, int flags)
6571 {
6572 static const struct intr_details hma_int_cause_fields[] = {
6573 { F_GK_UF_INT_CAUSE, "Gatekeeper underflow" },
6574 { F_IDTF_INT_CAUSE, "Invalid descriptor fault" },
6575 { F_OTF_INT_CAUSE, "Offset translation fault" },
6576 { F_RTF_INT_CAUSE, "Region translation fault" },
6577 { F_PCIEMST_INT_CAUSE, "PCIe master access error" },
6578 { F_MAMST_INT_CAUSE, "MA master access error" },
6579 { 1, "FIFO parity error" },
6580 { 0 }
6581 };
6582 static const struct intr_info hma_int_cause = {
6583 .name = "HMA_INT_CAUSE",
6584 .cause_reg = A_HMA_INT_CAUSE,
6585 .enable_reg = A_HMA_INT_ENABLE,
6586 .fatal = 7,
6587 .flags = 0,
6588 .details = hma_int_cause_fields,
6589 .actions = NULL,
6590 };
6591
6592 return (t4_handle_intr(adap, &hma_int_cause, 0, flags));
6593 }
6594
6595 /*
6596 * CRYPTO_KEY interrupt handler.
6597 */
cryptokey_intr_handler(struct adapter * adap,int idx,int flags)6598 static bool cryptokey_intr_handler(struct adapter *adap, int idx, int flags)
6599 {
6600 static const struct intr_details cryptokey_int_cause_fields[] = {
6601 { F_MA_FIFO_PERR, "MA arbiter FIFO parity error" },
6602 { F_MA_RSP_PERR, "MA response IF parity error" },
6603 { F_ING_CACHE_DATA_PERR, "Ingress key cache data parity error" },
6604 { F_ING_CACHE_TAG_PERR, "Ingress key cache tag parity error" },
6605 { F_LKP_KEY_REQ_PERR, "Ingress key req parity error" },
6606 { F_LKP_CLIP_TCAM_PERR, "Ingress LKP CLIP TCAM parity error" },
6607 { F_LKP_MAIN_TCAM_PERR, "Ingress LKP main TCAM parity error" },
6608 { F_EGR_KEY_REQ_PERR, "Egress key req or FIFO3 parity error" },
6609 { F_EGR_CACHE_DATA_PERR, "Egress key cache data parity error" },
6610 { F_EGR_CACHE_TAG_PERR, "Egress key cache tag parity error" },
6611 { F_CIM_PERR, "CIM interface parity error" },
6612 { F_MA_INV_RSP_TAG, "MA invalid response tag" },
6613 { F_ING_KEY_RANGE_ERR, "Ingress key range error" },
6614 { F_ING_MFIFO_OVFL, "Ingress MFIFO overflow" },
6615 { F_LKP_REQ_OVFL, "Ingress lookup FIFO overflow" },
6616 { F_EOK_WAIT_ERR, "EOK wait error" },
6617 { F_EGR_KEY_RANGE_ERR, "Egress key range error" },
6618 { F_EGR_MFIFO_OVFL, "Egress MFIFO overflow" },
6619 { F_SEQ_WRAP_HP_OVFL, "Sequence wrap (hi-pri)" },
6620 { F_SEQ_WRAP_LP_OVFL, "Sequence wrap (lo-pri)" },
6621 { F_EGR_SEQ_WRAP_HP, "Egress sequence wrap (hi-pri)" },
6622 { F_EGR_SEQ_WRAP_LP, "Egress sequence wrap (lo-pri)" },
6623 { 0 }
6624 };
6625 static const struct intr_info cryptokey_int_cause = {
6626 .name = "CRYPTO_KEY_INT_CAUSE",
6627 .cause_reg = A_CRYPTO_KEY_INT_CAUSE,
6628 .enable_reg = A_CRYPTO_KEY_INT_ENABLE,
6629 .fatal = 0xffffffff,
6630 .flags = IHF_FATAL_IFF_ENABLED,
6631 .details = cryptokey_int_cause_fields,
6632 .actions = NULL,
6633 };
6634
6635 return (t4_handle_intr(adap, &cryptokey_int_cause, 0, flags));
6636 }
6637
6638 /*
6639 * GCACHE interrupt handler.
6640 */
gcache_intr_handler(struct adapter * adap,int idx,int flags)6641 static bool gcache_intr_handler(struct adapter *adap, int idx, int flags)
6642 {
6643 static const struct intr_details gcache_int_cause_fields[] = {
6644 { F_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE, "GC1 SRAM rsp dataq perr" },
6645 { F_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE, "GC0 SRAM rsp dataq perr" },
6646 { F_GC1_WQDATA_FIFO_PERR_INT_CAUSE, "GC1 wqdata FIFO perr" },
6647 { F_GC0_WQDATA_FIFO_PERR_INT_CAUSE, "GC0 wqdata FIFO perr" },
6648 { F_GC1_RDTAG_QUEUE_PERR_INT_CAUSE, "GC1 rdtag queue perr" },
6649 { F_GC0_RDTAG_QUEUE_PERR_INT_CAUSE, "GC0 rdtag queue perr" },
6650 { F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE, "GC1 SRAM rdtag queue perr" },
6651 { F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE, "GC0 SRAM rdtag queue perr" },
6652 { F_GC1_RSP_PERR_INT_CAUSE, "GC1 rsp perr" },
6653 { F_GC0_RSP_PERR_INT_CAUSE, "GC0 rsp perr" },
6654 { F_GC1_LRU_UERR_INT_CAUSE, "GC1 lru uerr" },
6655 { F_GC0_LRU_UERR_INT_CAUSE, "GC0 lru uerr" },
6656 { F_GC1_TAG_UERR_INT_CAUSE, "GC1 tag uerr" },
6657 { F_GC0_TAG_UERR_INT_CAUSE, "GC0 tag uerr" },
6658 { F_GC1_LRU_CERR_INT_CAUSE, "GC1 lru cerr" },
6659 { F_GC0_LRU_CERR_INT_CAUSE, "GC0 lru cerr" },
6660 { F_GC1_TAG_CERR_INT_CAUSE, "GC1 tag cerr" },
6661 { F_GC0_TAG_CERR_INT_CAUSE, "GC0 tag cerr" },
6662 { F_GC1_CE_INT_CAUSE, "GC1 correctable error" },
6663 { F_GC0_CE_INT_CAUSE, "GC0 correctable error" },
6664 { F_GC1_UE_INT_CAUSE, "GC1 uncorrectable error" },
6665 { F_GC0_UE_INT_CAUSE, "GC0 uncorrectable error" },
6666 { F_GC1_CMD_PAR_INT_CAUSE, "GC1 cmd perr" },
6667 { F_GC1_DATA_PAR_INT_CAUSE, "GC1 data perr" },
6668 { F_GC0_CMD_PAR_INT_CAUSE, "GC0 cmd perr" },
6669 { F_GC0_DATA_PAR_INT_CAUSE, "GC0 data perr" },
6670 { F_ILLADDRACCESS1_INT_CAUSE, "GC1 illegal address access" },
6671 { F_ILLADDRACCESS0_INT_CAUSE, "GC0 illegal address access" },
6672 { 0 }
6673 };
6674 static const struct intr_info gcache_perr_cause = {
6675 .name = "GCACHE_PAR_CAUSE",
6676 .cause_reg = A_GCACHE_PAR_CAUSE,
6677 .enable_reg = A_GCACHE_PAR_ENABLE,
6678 .fatal = 0xffffffff,
6679 .flags = IHF_FATAL_IFF_ENABLED,
6680 .details = NULL,
6681 .actions = NULL,
6682 };
6683 static const struct intr_info gcache_int_cause = {
6684 .name = "GCACHE_INT_CAUSE",
6685 .cause_reg = A_GCACHE_INT_CAUSE,
6686 .enable_reg = A_GCACHE_INT_ENABLE,
6687 .fatal = 0,
6688 .flags = 0,
6689 .details = gcache_int_cause_fields,
6690 .actions = NULL,
6691 };
6692 bool fatal = false;
6693
6694 fatal |= t4_handle_intr(adap, &gcache_int_cause, 0, flags);
6695 fatal |= t4_handle_intr(adap, &gcache_perr_cause, 0, flags);
6696
6697 return (fatal);
6698 }
6699
6700 /*
6701 * ARM interrupt handler.
6702 */
arm_intr_handler(struct adapter * adap,int idx,int flags)6703 static bool arm_intr_handler(struct adapter *adap, int idx, int flags)
6704 {
6705 static const struct intr_info arm_perr_cause0 = {
6706 .name = "ARM_PERR_INT_CAUSE0",
6707 .cause_reg = A_ARM_PERR_INT_CAUSE0,
6708 .enable_reg = A_ARM_PERR_INT_ENB0,
6709 .fatal = 0xffffffff,
6710 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6711 .details = NULL,
6712 .actions = NULL,
6713 };
6714 static const struct intr_info arm_perr_cause1 = {
6715 .name = "ARM_PERR_INT_CAUSE1",
6716 .cause_reg = A_ARM_PERR_INT_CAUSE1,
6717 .enable_reg = A_ARM_PERR_INT_ENB1,
6718 .fatal = 0xffffffff,
6719 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6720 .details = NULL,
6721 .actions = NULL,
6722 };
6723 static const struct intr_info arm_perr_cause2 = {
6724 .name = "ARM_PERR_INT_CAUSE2",
6725 .cause_reg = A_ARM_PERR_INT_CAUSE2,
6726 .enable_reg = A_ARM_PERR_INT_ENB2,
6727 .fatal = 0xffffffff,
6728 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6729 .details = NULL,
6730 .actions = NULL,
6731 };
6732 static const struct intr_info arm_cerr_cause0 = {
6733 .name = "ARM_CERR_INT_CAUSE",
6734 .cause_reg = A_ARM_CERR_INT_CAUSE0,
6735 .enable_reg = A_ARM_CERR_INT_ENB0,
6736 .fatal = 0,
6737 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6738 .details = NULL,
6739 .actions = NULL,
6740 };
6741 static const struct intr_info arm_err_cause0 = {
6742 .name = "ARM_ERR_INT_CAUSE",
6743 .cause_reg = A_ARM_ERR_INT_CAUSE0,
6744 .enable_reg = A_ARM_ERR_INT_ENB0,
6745 .fatal = 0,
6746 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6747 .details = NULL,
6748 .actions = NULL,
6749 };
6750 static const struct intr_info arm_periph_cause = {
6751 .name = "ARM_PERIPHERAL_INT_CAUSE",
6752 .cause_reg = A_ARM_PERIPHERAL_INT_CAUSE,
6753 .enable_reg = A_ARM_PERIPHERAL_INT_ENB,
6754 .fatal = 0,
6755 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6756 .details = NULL,
6757 .actions = NULL,
6758 };
6759 static const struct intr_info arm_nvme_db_emu_cause = {
6760 .name = "ARM_NVME_DB_EMU_INT_CAUSE",
6761 .cause_reg = A_ARM_NVME_DB_EMU_INT_CAUSE,
6762 .enable_reg = A_ARM_NVME_DB_EMU_INT_ENABLE,
6763 .fatal = 0,
6764 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6765 .details = NULL,
6766 .actions = NULL,
6767 };
6768 bool fatal = false;
6769
6770 fatal |= t4_handle_intr(adap, &arm_perr_cause0, 0, flags);
6771 fatal |= t4_handle_intr(adap, &arm_perr_cause1, 0, flags);
6772 fatal |= t4_handle_intr(adap, &arm_perr_cause2, 0, flags);
6773 fatal |= t4_handle_intr(adap, &arm_cerr_cause0, 0, flags);
6774 fatal |= t4_handle_intr(adap, &arm_err_cause0, 0, flags);
6775 fatal |= t4_handle_intr(adap, &arm_periph_cause, 0, flags);
6776 fatal |= t4_handle_intr(adap, &arm_nvme_db_emu_cause, 0, flags);
6777
6778 return (fatal);
6779 }
6780
6781 static inline uint32_t
get_perr_ucause(struct adapter * sc,const struct intr_info * ii)6782 get_perr_ucause(struct adapter *sc, const struct intr_info *ii)
6783 {
6784 uint32_t cause;
6785
6786 cause = t4_read_reg(sc, ii->cause_reg);
6787 if (ii->flags & IHF_IGNORE_IF_DISABLED)
6788 cause &= t4_read_reg(sc, ii->enable_reg);
6789 return (cause);
6790 }
6791
6792 static uint32_t
t4_perr_to_ic(struct adapter * adap,uint32_t perr)6793 t4_perr_to_ic(struct adapter *adap, uint32_t perr)
6794 {
6795 uint32_t mask;
6796
6797 if (adap->chip_params->nchan > 2)
6798 mask = F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3;
6799 else
6800 mask = F_MAC0 | F_MAC1;
6801 return (perr & mask ? perr | mask : perr);
6802 }
6803
6804 static uint32_t
t7_perr_to_ic1(uint32_t perr)6805 t7_perr_to_ic1(uint32_t perr)
6806 {
6807 uint32_t cause = 0;
6808
6809 if (perr & F_T7_PL_PERR_ULP_TX)
6810 cause |= F_T7_ULP_TX;
6811 if (perr & F_T7_PL_PERR_SGE)
6812 cause |= F_T7_SGE;
6813 if (perr & F_T7_PL_PERR_HMA)
6814 cause |= F_T7_HMA;
6815 if (perr & F_T7_PL_PERR_CPL_SWITCH)
6816 cause |= F_T7_CPL_SWITCH;
6817 if (perr & F_T7_PL_PERR_ULP_RX)
6818 cause |= F_T7_ULP_RX;
6819 if (perr & F_T7_PL_PERR_PM_RX)
6820 cause |= F_T7_PM_RX;
6821 if (perr & F_T7_PL_PERR_PM_TX)
6822 cause |= F_T7_PM_TX;
6823 if (perr & F_T7_PL_PERR_MA)
6824 cause |= F_T7_MA;
6825 if (perr & F_T7_PL_PERR_TP)
6826 cause |= F_T7_TP;
6827 if (perr & F_T7_PL_PERR_LE)
6828 cause |= F_T7_LE;
6829 if (perr & F_T7_PL_PERR_EDC1)
6830 cause |= F_T7_EDC1;
6831 if (perr & F_T7_PL_PERR_EDC0)
6832 cause |= F_T7_EDC0;
6833 if (perr & F_T7_PL_PERR_MC1)
6834 cause |= F_T7_MC1;
6835 if (perr & F_T7_PL_PERR_MC0)
6836 cause |= F_T7_MC0;
6837 if (perr & F_T7_PL_PERR_PCIE)
6838 cause |= F_T7_PCIE;
6839 if (perr & F_T7_PL_PERR_UART)
6840 cause |= F_T7_UART;
6841 if (perr & F_T7_PL_PERR_PMU)
6842 cause |= F_PMU;
6843 if (perr & F_T7_PL_PERR_MAC)
6844 cause |= F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3;
6845 if (perr & F_T7_PL_PERR_SMB)
6846 cause |= F_SMB;
6847 if (perr & F_T7_PL_PERR_SF)
6848 cause |= F_SF;
6849 if (perr & F_T7_PL_PERR_PL)
6850 cause |= F_PL;
6851 if (perr & F_T7_PL_PERR_NCSI)
6852 cause |= F_NCSI;
6853 if (perr & F_T7_PL_PERR_MPS)
6854 cause |= F_MPS;
6855 if (perr & F_T7_PL_PERR_MI)
6856 cause |= F_MI;
6857 if (perr & F_T7_PL_PERR_DBG)
6858 cause |= F_DBG;
6859 if (perr & F_T7_PL_PERR_I2CM)
6860 cause |= F_I2CM;
6861 if (perr & F_T7_PL_PERR_CIM)
6862 cause |= F_CIM;
6863
6864 return (cause);
6865 }
6866
6867 static uint32_t
t7_perr_to_ic2(uint32_t perr)6868 t7_perr_to_ic2(uint32_t perr)
6869 {
6870 uint32_t cause = 0;
6871
6872 if (perr & F_T7_PL_PERR_CRYPTO_KEY)
6873 cause |= F_CRYPTO_KEY;
6874 if (perr & F_T7_PL_PERR_CRYPTO1)
6875 cause |= F_CRYPTO1;
6876 if (perr & F_T7_PL_PERR_CRYPTO0)
6877 cause |= F_CRYPTO0;
6878 if (perr & F_T7_PL_PERR_GCACHE)
6879 cause |= F_GCACHE;
6880 if (perr & F_T7_PL_PERR_ARM)
6881 cause |= F_ARM;
6882
6883 return (cause);
6884 }
6885
6886 /**
6887 * t4_slow_intr_handler - control path interrupt handler
6888 * @adap: the adapter
6889 *
6890 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
6891 * The designation 'slow' is because it involves register reads, while
6892 * data interrupts typically don't involve any MMIOs.
6893 */
t4_slow_intr_handler(struct adapter * adap,int flags)6894 bool t4_slow_intr_handler(struct adapter *adap, int flags)
6895 {
6896 static const struct intr_details pl_int_cause_fields[] = {
6897 { F_MC1, "MC1" },
6898 { F_UART, "UART" },
6899 { F_ULP_TX, "ULP TX" },
6900 { F_SGE, "SGE" },
6901 { F_HMA, "HMA" },
6902 { F_CPL_SWITCH, "CPL Switch" },
6903 { F_ULP_RX, "ULP RX" },
6904 { F_PM_RX, "PM RX" },
6905 { F_PM_TX, "PM TX" },
6906 { F_MA, "MA" },
6907 { F_TP, "TP" },
6908 { F_LE, "LE" },
6909 { F_EDC1, "EDC1" },
6910 { F_EDC0, "EDC0" },
6911 { F_MC, "MC0" },
6912 { F_PCIE, "PCIE" },
6913 { F_PMU, "PMU" },
6914 { F_MAC3, "MAC3" },
6915 { F_MAC2, "MAC2" },
6916 { F_MAC1, "MAC1" },
6917 { F_MAC0, "MAC0" },
6918 { F_SMB, "SMB" },
6919 { F_SF, "SF" },
6920 { F_PL, "PL" },
6921 { F_NCSI, "NC-SI" },
6922 { F_MPS, "MPS" },
6923 { F_MI, "MI" },
6924 { F_DBG, "DBG" },
6925 { F_I2CM, "I2CM" },
6926 { F_CIM, "CIM" },
6927 { 0 }
6928 };
6929 static const struct intr_action pl_int_cause_actions[] = {
6930 { F_ULP_TX, -1, ulptx_intr_handler },
6931 { F_SGE, -1, sge_intr_handler },
6932 { F_CPL_SWITCH, -1, cplsw_intr_handler },
6933 { F_ULP_RX, -1, ulprx_intr_handler },
6934 { F_PM_RX, -1, pmtx_intr_handler },
6935 { F_PM_TX, -1, pmtx_intr_handler },
6936 { F_MA, -1, ma_intr_handler },
6937 { F_TP, -1, tp_intr_handler },
6938 { F_LE, -1, le_intr_handler },
6939 { F_EDC0, MEM_EDC0, mem_intr_handler },
6940 { F_EDC1, MEM_EDC1, mem_intr_handler },
6941 { F_MC0, MEM_MC0, mem_intr_handler },
6942 { F_MC1, MEM_MC1, mem_intr_handler },
6943 { F_PCIE, -1, pcie_intr_handler },
6944 { F_MAC0, 0, mac_intr_handler },
6945 { F_MAC1, 1, mac_intr_handler },
6946 { F_MAC2, 2, mac_intr_handler },
6947 { F_MAC3, 3, mac_intr_handler },
6948 { F_SMB, -1, smb_intr_handler },
6949 { F_PL, -1, plpl_intr_handler },
6950 { F_NCSI, -1, ncsi_intr_handler },
6951 { F_MPS, -1, mps_intr_handler },
6952 { F_CIM, -1, cim_intr_handler },
6953 { 0 }
6954 };
6955 static const struct intr_info pl_int_cause = {
6956 .name = "PL_INT_CAUSE",
6957 .cause_reg = A_PL_INT_CAUSE,
6958 .enable_reg = A_PL_INT_ENABLE,
6959 .fatal = 0,
6960 .flags = IHF_IGNORE_IF_DISABLED,
6961 .details = pl_int_cause_fields,
6962 .actions = pl_int_cause_actions,
6963 };
6964 static const struct intr_info pl_perr_cause = {
6965 .name = "PL_PERR_CAUSE",
6966 .cause_reg = A_PL_PERR_CAUSE,
6967 .enable_reg = A_PL_PERR_ENABLE,
6968 .fatal = 0xffffffff,
6969 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
6970 .details = pl_int_cause_fields,
6971 .actions = NULL,
6972 };
6973 static const struct intr_details t7_pl_int_cause_fields[] = {
6974 { F_T7_FLR, "FLR" },
6975 { F_T7_SW_CIM, "SW CIM" },
6976 { F_T7_ULP_TX, "ULP TX" },
6977 { F_T7_SGE, "SGE" },
6978 { F_T7_HMA, "HMA" },
6979 { F_T7_CPL_SWITCH, "CPL Switch" },
6980 { F_T7_ULP_RX, "ULP RX" },
6981 { F_T7_PM_RX, "PM RX" },
6982 { F_T7_PM_TX, "PM TX" },
6983 { F_T7_MA, "MA" },
6984 { F_T7_TP, "TP" },
6985 { F_T7_LE, "LE" },
6986 { F_T7_EDC1, "EDC1" },
6987 { F_T7_EDC0, "EDC0" },
6988 { F_T7_MC1, "MC1" },
6989 { F_T7_MC0, "MC0" },
6990 { F_T7_PCIE, "PCIE" },
6991 { F_T7_UART, "UART" },
6992 { F_PMU, "PMU" },
6993 { F_MAC3, "MAC3" },
6994 { F_MAC2, "MAC2" },
6995 { F_MAC1, "MAC1" },
6996 { F_MAC0, "MAC0" },
6997 { F_SMB, "SMB" },
6998 { F_SF, "SF" },
6999 { F_PL, "PL" },
7000 { F_NCSI, "NC-SI" },
7001 { F_MPS, "MPS" },
7002 { F_MI, "MI" },
7003 { F_DBG, "DBG" },
7004 { F_I2CM, "I2CM" },
7005 { F_CIM, "CIM" },
7006 { 0 }
7007 };
7008 static const struct intr_action t7_pl_int_cause_actions[] = {
7009 { F_T7_ULP_TX, -1, ulptx_intr_handler },
7010 { F_T7_SGE, -1, sge_intr_handler },
7011 { F_T7_HMA, -1, hma_intr_handler },
7012 { F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
7013 { F_T7_ULP_RX, -1, ulprx_intr_handler },
7014 { F_T7_PM_RX, -1, pmrx_intr_handler },
7015 { F_T7_PM_TX, -1, pmtx_intr_handler },
7016 { F_T7_MA, -1, ma_intr_handler },
7017 { F_T7_TP, -1, tp_intr_handler },
7018 { F_T7_LE, -1, le_intr_handler },
7019 { F_T7_EDC0, MEM_EDC0, mem_intr_handler },
7020 { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
7021 { F_T7_MC0, MEM_MC0, mem_intr_handler },
7022 { F_T7_MC1, MEM_MC1, mem_intr_handler },
7023 { F_T7_PCIE, -1, pcie_intr_handler },
7024 { F_MAC0, 0, mac_intr_handler },
7025 { F_MAC1, 1, mac_intr_handler },
7026 { F_MAC2, 2, mac_intr_handler },
7027 { F_MAC3, 3, mac_intr_handler },
7028 { F_SMB, -1, smb_intr_handler },
7029 { F_PL, -1, plpl_intr_handler },
7030 { F_NCSI, -1, ncsi_intr_handler },
7031 { F_MPS, -1, mps_intr_handler },
7032 { F_CIM, -1, cim_intr_handler },
7033 { 0 }
7034 };
7035 static const struct intr_info t7_pl_int_cause = {
7036 .name = "PL_INT_CAUSE",
7037 .cause_reg = A_PL_INT_CAUSE,
7038 .enable_reg = A_PL_INT_ENABLE,
7039 .fatal = 0,
7040 .flags = IHF_IGNORE_IF_DISABLED,
7041 .details = t7_pl_int_cause_fields,
7042 .actions = t7_pl_int_cause_actions,
7043 };
7044 static const struct intr_details t7_pl_int_cause2_fields[] = {
7045 { F_CRYPTO_KEY, "CRYPTO KEY" },
7046 { F_CRYPTO1, "CRYPTO1" },
7047 { F_CRYPTO0, "CRYPTO0" },
7048 { F_GCACHE, "GCACHE" },
7049 { F_ARM, "ARM" },
7050 { 0 }
7051 };
7052 static const struct intr_action t7_pl_int_cause2_actions[] = {
7053 { F_CRYPTO_KEY, -1, cryptokey_intr_handler },
7054 { F_CRYPTO1, 1, tlstx_intr_handler },
7055 { F_CRYPTO0, 0, tlstx_intr_handler },
7056 { F_GCACHE, -1, gcache_intr_handler },
7057 { F_ARM, -1, arm_intr_handler },
7058 { 0 }
7059 };
7060 static const struct intr_info t7_pl_int_cause2 = {
7061 .name = "PL_INT_CAUSE2",
7062 .cause_reg = A_PL_INT_CAUSE2,
7063 .enable_reg = A_PL_INT_ENABLE2,
7064 .fatal = 0,
7065 .flags = IHF_IGNORE_IF_DISABLED,
7066 .details = t7_pl_int_cause2_fields,
7067 .actions = t7_pl_int_cause2_actions,
7068 };
7069 static const struct intr_details t7_pl_perr_cause_fields[] = {
7070 { F_T7_PL_PERR_CRYPTO_KEY, "CRYPTO KEY" },
7071 { F_T7_PL_PERR_CRYPTO1, "CRYPTO1" },
7072 { F_T7_PL_PERR_CRYPTO0, "CRYPTO0" },
7073 { F_T7_PL_PERR_GCACHE, "GCACHE" },
7074 { F_T7_PL_PERR_ARM, "ARM" },
7075 { F_T7_PL_PERR_ULP_TX, "ULP TX" },
7076 { F_T7_PL_PERR_SGE, "SGE" },
7077 { F_T7_PL_PERR_HMA, "HMA" },
7078 { F_T7_PL_PERR_CPL_SWITCH, "CPL Switch" },
7079 { F_T7_PL_PERR_ULP_RX, "ULP RX" },
7080 { F_T7_PL_PERR_PM_RX, "PM RX" },
7081 { F_T7_PL_PERR_PM_TX, "PM TX" },
7082 { F_T7_PL_PERR_MA, "MA" },
7083 { F_T7_PL_PERR_TP, "TP" },
7084 { F_T7_PL_PERR_LE, "LE" },
7085 { F_T7_PL_PERR_EDC1, "EDC1" },
7086 { F_T7_PL_PERR_EDC0, "EDC0" },
7087 { F_T7_PL_PERR_MC1, "MC1" },
7088 { F_T7_PL_PERR_MC0, "MC0" },
7089 { F_T7_PL_PERR_PCIE, "PCIE" },
7090 { F_T7_PL_PERR_UART, "UART" },
7091 { F_T7_PL_PERR_PMU, "PMU" },
7092 { F_T7_PL_PERR_MAC, "MAC" },
7093 { F_T7_PL_PERR_SMB, "SMB" },
7094 { F_T7_PL_PERR_SF, "SF" },
7095 { F_T7_PL_PERR_PL, "PL" },
7096 { F_T7_PL_PERR_NCSI, "NC-SI" },
7097 { F_T7_PL_PERR_MPS, "MPS" },
7098 { F_T7_PL_PERR_MI, "MI" },
7099 { F_T7_PL_PERR_DBG, "DBG" },
7100 { F_T7_PL_PERR_I2CM, "I2CM" },
7101 { F_T7_PL_PERR_CIM, "CIM" },
7102 { 0 }
7103 };
7104 static const struct intr_info t7_pl_perr_cause = {
7105 .name = "PL_PERR_CAUSE",
7106 .cause_reg = A_PL_PERR_CAUSE,
7107 .enable_reg = A_PL_PERR_ENABLE,
7108 .fatal = 0xffffffff,
7109 .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
7110 .details = t7_pl_perr_cause_fields,
7111 .actions = NULL,
7112 };
7113 bool fatal = false;
7114 uint32_t perr;
7115
7116 if (chip_id(adap) < CHELSIO_T7) {
7117 perr = get_perr_ucause(adap, &pl_perr_cause);
7118 fatal |= t4_handle_intr(adap, &pl_perr_cause, 0,
7119 flags & ~(IHF_CLR_ALL_SET | IHF_CLR_ALL_UNIGNORED));
7120 fatal |= t4_handle_intr(adap, &pl_int_cause,
7121 t4_perr_to_ic(adap, perr), flags);
7122 t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
7123 (void)t4_read_reg(adap, pl_perr_cause.cause_reg);
7124 } else {
7125 perr = get_perr_ucause(adap, &t7_pl_perr_cause);
7126 fatal |= t4_handle_intr(adap, &t7_pl_perr_cause, 0,
7127 flags & ~(IHF_CLR_ALL_SET | IHF_CLR_ALL_UNIGNORED));
7128 fatal |= t4_handle_intr(adap, &t7_pl_int_cause,
7129 t7_perr_to_ic1(perr), flags);
7130 fatal |= t4_handle_intr(adap, &t7_pl_int_cause2,
7131 t7_perr_to_ic2(perr), flags);
7132 t4_write_reg(adap, t7_pl_perr_cause.cause_reg, perr);
7133 (void)t4_read_reg(adap, t7_pl_perr_cause.cause_reg);
7134 }
7135 return (fatal);
7136 }
7137
t4_intr_clear(struct adapter * adap)7138 void t4_intr_clear(struct adapter *adap)
7139 {
7140 #if 1
7141 if (chip_id(adap) >= CHELSIO_T7)
7142 t4_write_reg(adap, A_SGE_INT_CAUSE8, 0xffffffff);
7143 #endif
7144 (void)t4_slow_intr_handler(adap,
7145 IHF_NO_SHOW | IHF_RUN_ALL_ACTIONS | IHF_CLR_ALL_SET);
7146 }
7147
7148 /**
7149 * t4_intr_enable - enable interrupts
7150 * @adapter: the adapter whose interrupts should be enabled
7151 *
7152 * Enable PF-specific interrupts for the calling function and the top-level
7153 * interrupt concentrator for global interrupts. Interrupts are already
7154 * enabled at each module, here we just enable the roots of the interrupt
7155 * hierarchies.
7156 *
7157 * Note: this function should be called only when the driver manages
7158 * non PF-specific interrupts from the various HW modules. Only one PCI
7159 * function at a time should be doing this.
7160 */
t4_intr_enable(struct adapter * adap)7161 void t4_intr_enable(struct adapter *adap)
7162 {
7163 u32 mask, val;
7164
7165 if (adap->intr_flags & IHF_INTR_CLEAR_ON_INIT)
7166 t4_intr_clear(adap);
7167 if (chip_id(adap) <= CHELSIO_T5)
7168 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
7169 F_DBFIFO_LP_INT;
7170 else
7171 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
7172 val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
7173 F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
7174 F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
7175 F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
7176 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
7177 mask = val;
7178 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
7179 if (chip_id(adap) >= CHELSIO_T7)
7180 t4_write_reg(adap, A_SGE_INT_ENABLE4, 0xffffffff);
7181 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), F_PFSW | F_PFCIM);
7182 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
7183 #if 1
7184 if (chip_id(adap) >= CHELSIO_T7)
7185 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3, 0);
7186 #endif
7187 t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
7188 }
7189
7190 /**
7191 * t4_intr_disable - disable interrupts
7192 * @adap: the adapter whose interrupts should be disabled
7193 *
7194 * Disable interrupts. We only disable the top-level interrupt
7195 * concentrators. The caller must be a PCI function managing global
7196 * interrupts.
7197 */
t4_intr_disable(struct adapter * adap)7198 void t4_intr_disable(struct adapter *adap)
7199 {
7200
7201 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
7202 t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0);
7203 }
7204
7205 /**
7206 * hash_mac_addr - return the hash value of a MAC address
7207 * @addr: the 48-bit Ethernet MAC address
7208 *
7209 * Hashes a MAC address according to the hash function used by HW inexact
7210 * (hash) address matching.
7211 */
hash_mac_addr(const u8 * addr)7212 static int hash_mac_addr(const u8 *addr)
7213 {
7214 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
7215 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
7216 a ^= b;
7217 a ^= (a >> 12);
7218 a ^= (a >> 6);
7219 return a & 0x3f;
7220 }
7221
7222 /**
7223 * t4_config_rss_range - configure a portion of the RSS mapping table
7224 * @adapter: the adapter
7225 * @mbox: mbox to use for the FW command
7226 * @viid: virtual interface whose RSS subtable is to be written
7227 * @start: start entry in the table to write
7228 * @n: how many table entries to write
7229 * @rspq: values for the "response queue" (Ingress Queue) lookup table
7230 * @nrspq: number of values in @rspq
7231 *
7232 * Programs the selected part of the VI's RSS mapping table with the
7233 * provided values. If @nrspq < @n the supplied values are used repeatedly
7234 * until the full table range is populated.
7235 *
7236 * The caller must ensure the values in @rspq are in the range allowed for
7237 * @viid.
7238 */
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)7239 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
7240 int start, int n, const u16 *rspq, unsigned int nrspq)
7241 {
7242 int ret;
7243 const u16 *rsp = rspq;
7244 const u16 *rsp_end = rspq + nrspq;
7245 struct fw_rss_ind_tbl_cmd cmd;
7246
7247 memset(&cmd, 0, sizeof(cmd));
7248 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
7249 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7250 V_FW_RSS_IND_TBL_CMD_VIID(viid));
7251 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
7252
7253 /*
7254 * Each firmware RSS command can accommodate up to 32 RSS Ingress
7255 * Queue Identifiers. These Ingress Queue IDs are packed three to
7256 * a 32-bit word as 10-bit values with the upper remaining 2 bits
7257 * reserved.
7258 */
7259 while (n > 0) {
7260 int nq = min(n, 32);
7261 int nq_packed = 0;
7262 __be32 *qp = &cmd.iq0_to_iq2;
7263
7264 /*
7265 * Set up the firmware RSS command header to send the next
7266 * "nq" Ingress Queue IDs to the firmware.
7267 */
7268 cmd.niqid = cpu_to_be16(nq);
7269 cmd.startidx = cpu_to_be16(start);
7270
7271 /*
7272 * "nq" more done for the start of the next loop.
7273 */
7274 start += nq;
7275 n -= nq;
7276
7277 /*
7278 * While there are still Ingress Queue IDs to stuff into the
7279 * current firmware RSS command, retrieve them from the
7280 * Ingress Queue ID array and insert them into the command.
7281 */
7282 while (nq > 0) {
7283 /*
7284 * Grab up to the next 3 Ingress Queue IDs (wrapping
7285 * around the Ingress Queue ID array if necessary) and
7286 * insert them into the firmware RSS command at the
7287 * current 3-tuple position within the commad.
7288 */
7289 u16 qbuf[3];
7290 u16 *qbp = qbuf;
7291 int nqbuf = min(3, nq);
7292
7293 nq -= nqbuf;
7294 qbuf[0] = qbuf[1] = qbuf[2] = 0;
7295 while (nqbuf && nq_packed < 32) {
7296 nqbuf--;
7297 nq_packed++;
7298 *qbp++ = *rsp++;
7299 if (rsp >= rsp_end)
7300 rsp = rspq;
7301 }
7302 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
7303 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
7304 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
7305 }
7306
7307 /*
7308 * Send this portion of the RRS table update to the firmware;
7309 * bail out on any errors.
7310 */
7311 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
7312 if (ret)
7313 return ret;
7314 }
7315 return 0;
7316 }
7317
7318 /**
7319 * t4_config_glbl_rss - configure the global RSS mode
7320 * @adapter: the adapter
7321 * @mbox: mbox to use for the FW command
7322 * @mode: global RSS mode
7323 * @flags: mode-specific flags
7324 *
7325 * Sets the global RSS mode.
7326 */
t4_config_glbl_rss(struct adapter * adapter,int mbox,unsigned int mode,unsigned int flags)7327 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
7328 unsigned int flags)
7329 {
7330 struct fw_rss_glb_config_cmd c;
7331
7332 memset(&c, 0, sizeof(c));
7333 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
7334 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
7335 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7336 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
7337 c.u.manual.mode_pkd =
7338 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
7339 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
7340 c.u.basicvirtual.mode_keymode =
7341 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
7342 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
7343 } else
7344 return -EINVAL;
7345 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
7346 }
7347
7348 /**
7349 * t4_config_vi_rss - configure per VI RSS settings
7350 * @adapter: the adapter
7351 * @mbox: mbox to use for the FW command
7352 * @viid: the VI id
7353 * @flags: RSS flags
7354 * @defq: id of the default RSS queue for the VI.
7355 * @skeyidx: RSS secret key table index for non-global mode
7356 * @skey: RSS vf_scramble key for VI.
7357 *
7358 * Configures VI-specific RSS properties.
7359 */
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq,unsigned int skeyidx,unsigned int skey)7360 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
7361 unsigned int flags, unsigned int defq, unsigned int skeyidx,
7362 unsigned int skey)
7363 {
7364 struct fw_rss_vi_config_cmd c;
7365
7366 memset(&c, 0, sizeof(c));
7367 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
7368 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7369 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
7370 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7371 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
7372 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
7373 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
7374 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
7375 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
7376
7377 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
7378 }
7379
7380 /* Read an RSS table row */
rd_rss_row(struct adapter * adap,int row,u32 * val)7381 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
7382 {
7383 if (chip_id(adap) < CHELSIO_T7) {
7384 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
7385 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE,
7386 F_LKPTBLROWVLD, 1, 5, 0, val);
7387 } else {
7388 t4_write_reg(adap, A_TP_RSS_CONFIG_SRAM, 0xB0000 | row);
7389 return t7_wait_sram_done(adap, A_TP_RSS_CONFIG_SRAM,
7390 A_TP_RSS_LKP_TABLE, 5, 0, val);
7391 }
7392 }
7393
7394 /**
7395 * t4_read_rss - read the contents of the RSS mapping table
7396 * @adapter: the adapter
7397 * @map: holds the contents of the RSS mapping table
7398 *
7399 * Reads the contents of the RSS hash->queue mapping table.
7400 */
t4_read_rss(struct adapter * adapter,u16 * map)7401 int t4_read_rss(struct adapter *adapter, u16 *map)
7402 {
7403 u32 val;
7404 int i, ret;
7405 int rss_nentries = adapter->chip_params->rss_nentries;
7406
7407 for (i = 0; i < rss_nentries / 2; ++i) {
7408 ret = rd_rss_row(adapter, i, &val);
7409 if (ret)
7410 return ret;
7411 *map++ = G_LKPTBLQUEUE0(val);
7412 *map++ = G_LKPTBLQUEUE1(val);
7413 }
7414 return 0;
7415 }
7416
7417 /**
7418 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
7419 * @adap: the adapter
7420 * @cmd: TP fw ldst address space type
7421 * @vals: where the indirect register values are stored/written
7422 * @nregs: how many indirect registers to read/write
7423 * @start_idx: index of first indirect register to read/write
7424 * @rw: Read (1) or Write (0)
7425 * @sleep_ok: if true we may sleep while awaiting command completion
7426 *
7427 * Access TP indirect registers through LDST
7428 **/
t4_tp_fw_ldst_rw(struct adapter * adap,int cmd,u32 * vals,unsigned int nregs,unsigned int start_index,unsigned int rw,bool sleep_ok)7429 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
7430 unsigned int nregs, unsigned int start_index,
7431 unsigned int rw, bool sleep_ok)
7432 {
7433 int ret = 0;
7434 unsigned int i;
7435 struct fw_ldst_cmd c;
7436
7437 for (i = 0; i < nregs; i++) {
7438 memset(&c, 0, sizeof(c));
7439 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7440 F_FW_CMD_REQUEST |
7441 (rw ? F_FW_CMD_READ :
7442 F_FW_CMD_WRITE) |
7443 V_FW_LDST_CMD_ADDRSPACE(cmd));
7444 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7445
7446 c.u.addrval.addr = cpu_to_be32(start_index + i);
7447 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
7448 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
7449 sleep_ok);
7450 if (ret)
7451 return ret;
7452
7453 if (rw)
7454 vals[i] = be32_to_cpu(c.u.addrval.val);
7455 }
7456 return 0;
7457 }
7458
7459 /**
7460 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
7461 * @adap: the adapter
7462 * @reg_addr: Address Register
7463 * @reg_data: Data register
7464 * @buff: where the indirect register values are stored/written
7465 * @nregs: how many indirect registers to read/write
7466 * @start_index: index of first indirect register to read/write
7467 * @rw: READ(1) or WRITE(0)
7468 * @sleep_ok: if true we may sleep while awaiting command completion
7469 *
7470 * Read/Write TP indirect registers through LDST if possible.
7471 * Else, use backdoor access
7472 **/
t4_tp_indirect_rw(struct adapter * adap,u32 reg_addr,u32 reg_data,u32 * buff,u32 nregs,u32 start_index,int rw,bool sleep_ok)7473 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
7474 u32 *buff, u32 nregs, u32 start_index, int rw,
7475 bool sleep_ok)
7476 {
7477 int rc = -EINVAL;
7478 int cmd;
7479
7480 switch (reg_addr) {
7481 case A_TP_PIO_ADDR:
7482 cmd = FW_LDST_ADDRSPC_TP_PIO;
7483 break;
7484 case A_TP_TM_PIO_ADDR:
7485 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
7486 break;
7487 case A_TP_MIB_INDEX:
7488 cmd = FW_LDST_ADDRSPC_TP_MIB;
7489 break;
7490 default:
7491 goto indirect_access;
7492 }
7493
7494 if (t4_use_ldst(adap))
7495 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
7496 sleep_ok);
7497
7498 indirect_access:
7499
7500 if (rc) {
7501 if (rw)
7502 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
7503 start_index);
7504 else
7505 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
7506 start_index);
7507 }
7508 }
7509
7510 /**
7511 * t4_tp_pio_read - Read TP PIO registers
7512 * @adap: the adapter
7513 * @buff: where the indirect register values are written
7514 * @nregs: how many indirect registers to read
7515 * @start_index: index of first indirect register to read
7516 * @sleep_ok: if true we may sleep while awaiting command completion
7517 *
7518 * Read TP PIO Registers
7519 **/
t4_tp_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)7520 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
7521 u32 start_index, bool sleep_ok)
7522 {
7523 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
7524 start_index, 1, sleep_ok);
7525 }
7526
7527 /**
7528 * t4_tp_pio_write - Write TP PIO registers
7529 * @adap: the adapter
7530 * @buff: where the indirect register values are stored
7531 * @nregs: how many indirect registers to write
7532 * @start_index: index of first indirect register to write
7533 * @sleep_ok: if true we may sleep while awaiting command completion
7534 *
7535 * Write TP PIO Registers
7536 **/
t4_tp_pio_write(struct adapter * adap,const u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)7537 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
7538 u32 start_index, bool sleep_ok)
7539 {
7540 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7541 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
7542 }
7543
7544 /**
7545 * t4_tp_tm_pio_read - Read TP TM PIO registers
7546 * @adap: the adapter
7547 * @buff: where the indirect register values are written
7548 * @nregs: how many indirect registers to read
7549 * @start_index: index of first indirect register to read
7550 * @sleep_ok: if true we may sleep while awaiting command completion
7551 *
7552 * Read TP TM PIO Registers
7553 **/
t4_tp_tm_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)7554 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
7555 u32 start_index, bool sleep_ok)
7556 {
7557 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
7558 nregs, start_index, 1, sleep_ok);
7559 }
7560
7561 /**
7562 * t4_tp_mib_read - Read TP MIB registers
7563 * @adap: the adapter
7564 * @buff: where the indirect register values are written
7565 * @nregs: how many indirect registers to read
7566 * @start_index: index of first indirect register to read
7567 * @sleep_ok: if true we may sleep while awaiting command completion
7568 *
7569 * Read TP MIB Registers
7570 **/
t4_tp_mib_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)7571 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
7572 bool sleep_ok)
7573 {
7574 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
7575 start_index, 1, sleep_ok);
7576 }
7577
7578 /**
7579 * t4_read_rss_key - read the global RSS key
7580 * @adap: the adapter
7581 * @key: 10-entry array holding the 320-bit RSS key
7582 * @sleep_ok: if true we may sleep while awaiting command completion
7583 *
7584 * Reads the global 320-bit RSS key.
7585 */
t4_read_rss_key(struct adapter * adap,u32 * key,bool sleep_ok)7586 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
7587 {
7588 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
7589 }
7590
7591 /**
7592 * t4_write_rss_key - program one of the RSS keys
7593 * @adap: the adapter
7594 * @key: 10-entry array holding the 320-bit RSS key
7595 * @idx: which RSS key to write
7596 * @sleep_ok: if true we may sleep while awaiting command completion
7597 *
7598 * Writes one of the RSS keys with the given 320-bit value. If @idx is
7599 * 0..15 the corresponding entry in the RSS key table is written,
7600 * otherwise the global RSS key is written.
7601 */
t4_write_rss_key(struct adapter * adap,const u32 * key,int idx,bool sleep_ok)7602 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
7603 bool sleep_ok)
7604 {
7605 u8 rss_key_addr_cnt = 16;
7606 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
7607
7608 /*
7609 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
7610 * allows access to key addresses 16-63 by using KeyWrAddrX
7611 * as index[5:4](upper 2) into key table
7612 */
7613 if ((chip_id(adap) > CHELSIO_T5) &&
7614 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
7615 rss_key_addr_cnt = 32;
7616
7617 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
7618
7619 if (idx >= 0 && idx < rss_key_addr_cnt) {
7620 if (rss_key_addr_cnt > 16)
7621 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
7622 vrt | V_KEYWRADDRX(idx >> 4) |
7623 V_T6_VFWRADDR(idx) | F_KEYWREN);
7624 else
7625 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
7626 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
7627 }
7628 }
7629
7630 /**
7631 * t4_read_rss_pf_config - read PF RSS Configuration Table
7632 * @adapter: the adapter
7633 * @index: the entry in the PF RSS table to read
7634 * @valp: where to store the returned value
7635 * @sleep_ok: if true we may sleep while awaiting command completion
7636 *
7637 * Reads the PF RSS Configuration Table at the specified index and returns
7638 * the value found there.
7639 */
t4_read_rss_pf_config(struct adapter * adapter,unsigned int index,u32 * valp,bool sleep_ok)7640 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
7641 u32 *valp, bool sleep_ok)
7642 {
7643 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
7644 }
7645
7646 /**
7647 * t4_write_rss_pf_config - write PF RSS Configuration Table
7648 * @adapter: the adapter
7649 * @index: the entry in the VF RSS table to read
7650 * @val: the value to store
7651 * @sleep_ok: if true we may sleep while awaiting command completion
7652 *
7653 * Writes the PF RSS Configuration Table at the specified index with the
7654 * specified value.
7655 */
t4_write_rss_pf_config(struct adapter * adapter,unsigned int index,u32 val,bool sleep_ok)7656 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
7657 u32 val, bool sleep_ok)
7658 {
7659 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
7660 sleep_ok);
7661 }
7662
7663 /**
7664 * t4_read_rss_vf_config - read VF RSS Configuration Table
7665 * @adapter: the adapter
7666 * @index: the entry in the VF RSS table to read
7667 * @vfl: where to store the returned VFL
7668 * @vfh: where to store the returned VFH
7669 * @sleep_ok: if true we may sleep while awaiting command completion
7670 *
7671 * Reads the VF RSS Configuration Table at the specified index and returns
7672 * the (VFL, VFH) values found there.
7673 */
t4_read_rss_vf_config(struct adapter * adapter,unsigned int index,u32 * vfl,u32 * vfh,bool sleep_ok)7674 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
7675 u32 *vfl, u32 *vfh, bool sleep_ok)
7676 {
7677 u32 vrt, mask, data;
7678
7679 if (chip_id(adapter) <= CHELSIO_T5) {
7680 mask = V_VFWRADDR(M_VFWRADDR);
7681 data = V_VFWRADDR(index);
7682 } else {
7683 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
7684 data = V_T6_VFWRADDR(index);
7685 }
7686 /*
7687 * Request that the index'th VF Table values be read into VFL/VFH.
7688 */
7689 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
7690 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
7691 vrt |= data | F_VFRDEN;
7692 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
7693
7694 /*
7695 * Grab the VFL/VFH values ...
7696 */
7697 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
7698 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
7699 }
7700
7701 /**
7702 * t4_write_rss_vf_config - write VF RSS Configuration Table
7703 *
7704 * @adapter: the adapter
7705 * @index: the entry in the VF RSS table to write
7706 * @vfl: the VFL to store
7707 * @vfh: the VFH to store
7708 *
7709 * Writes the VF RSS Configuration Table at the specified index with the
7710 * specified (VFL, VFH) values.
7711 */
t4_write_rss_vf_config(struct adapter * adapter,unsigned int index,u32 vfl,u32 vfh,bool sleep_ok)7712 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
7713 u32 vfl, u32 vfh, bool sleep_ok)
7714 {
7715 u32 vrt, mask, data;
7716
7717 if (chip_id(adapter) <= CHELSIO_T5) {
7718 mask = V_VFWRADDR(M_VFWRADDR);
7719 data = V_VFWRADDR(index);
7720 } else {
7721 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
7722 data = V_T6_VFWRADDR(index);
7723 }
7724
7725 /*
7726 * Load up VFL/VFH with the values to be written ...
7727 */
7728 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
7729 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
7730
7731 /*
7732 * Write the VFL/VFH into the VF Table at index'th location.
7733 */
7734 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
7735 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
7736 vrt |= data | F_VFRDEN;
7737 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
7738 }
7739
7740 /**
7741 * t4_read_rss_pf_map - read PF RSS Map
7742 * @adapter: the adapter
7743 * @sleep_ok: if true we may sleep while awaiting command completion
7744 *
7745 * Reads the PF RSS Map register and returns its value.
7746 */
t4_read_rss_pf_map(struct adapter * adapter,bool sleep_ok)7747 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
7748 {
7749 u32 pfmap;
7750
7751 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
7752
7753 return pfmap;
7754 }
7755
7756 /**
7757 * t4_write_rss_pf_map - write PF RSS Map
7758 * @adapter: the adapter
7759 * @pfmap: PF RSS Map value
7760 *
7761 * Writes the specified value to the PF RSS Map register.
7762 */
t4_write_rss_pf_map(struct adapter * adapter,u32 pfmap,bool sleep_ok)7763 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
7764 {
7765 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
7766 }
7767
7768 /**
7769 * t4_read_rss_pf_mask - read PF RSS Mask
7770 * @adapter: the adapter
7771 * @sleep_ok: if true we may sleep while awaiting command completion
7772 *
7773 * Reads the PF RSS Mask register and returns its value.
7774 */
t4_read_rss_pf_mask(struct adapter * adapter,bool sleep_ok)7775 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
7776 {
7777 u32 pfmask;
7778
7779 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
7780
7781 return pfmask;
7782 }
7783
7784 /**
7785 * t4_write_rss_pf_mask - write PF RSS Mask
7786 * @adapter: the adapter
7787 * @pfmask: PF RSS Mask value
7788 *
7789 * Writes the specified value to the PF RSS Mask register.
7790 */
t4_write_rss_pf_mask(struct adapter * adapter,u32 pfmask,bool sleep_ok)7791 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
7792 {
7793 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
7794 }
7795
7796 /**
7797 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
7798 * @adap: the adapter
7799 * @v4: holds the TCP/IP counter values
7800 * @v6: holds the TCP/IPv6 counter values
7801 * @sleep_ok: if true we may sleep while awaiting command completion
7802 *
7803 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
7804 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
7805 */
t4_tp_get_tcp_stats(struct adapter * adap,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6,bool sleep_ok)7806 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
7807 struct tp_tcp_stats *v6, bool sleep_ok)
7808 {
7809 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
7810
7811 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
7812 #define STAT(x) val[STAT_IDX(x)]
7813 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
7814
7815 if (v4) {
7816 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
7817 A_TP_MIB_TCP_OUT_RST, sleep_ok);
7818 v4->tcp_out_rsts = STAT(OUT_RST);
7819 v4->tcp_in_segs = STAT64(IN_SEG);
7820 v4->tcp_out_segs = STAT64(OUT_SEG);
7821 v4->tcp_retrans_segs = STAT64(RXT_SEG);
7822 }
7823 if (v6) {
7824 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
7825 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
7826 v6->tcp_out_rsts = STAT(OUT_RST);
7827 v6->tcp_in_segs = STAT64(IN_SEG);
7828 v6->tcp_out_segs = STAT64(OUT_SEG);
7829 v6->tcp_retrans_segs = STAT64(RXT_SEG);
7830 }
7831 #undef STAT64
7832 #undef STAT
7833 #undef STAT_IDX
7834 }
7835
7836 /**
7837 * t4_tp_get_err_stats - read TP's error MIB counters
7838 * @adap: the adapter
7839 * @st: holds the counter values
7840 * @sleep_ok: if true we may sleep while awaiting command completion
7841 *
7842 * Returns the values of TP's error counters.
7843 */
t4_tp_get_err_stats(struct adapter * adap,struct tp_err_stats * st,bool sleep_ok)7844 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
7845 bool sleep_ok)
7846 {
7847 int nchan = adap->chip_params->nchan;
7848
7849 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
7850 sleep_ok);
7851
7852 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
7853 sleep_ok);
7854
7855 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
7856 sleep_ok);
7857
7858 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
7859 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
7860
7861 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
7862 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
7863
7864 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
7865 sleep_ok);
7866
7867 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
7868 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
7869
7870 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
7871 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
7872
7873 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
7874 sleep_ok);
7875 }
7876
7877 /**
7878 * t4_tp_get_err_stats - read TP's error MIB counters
7879 * @adap: the adapter
7880 * @st: holds the counter values
7881 * @sleep_ok: if true we may sleep while awaiting command completion
7882 *
7883 * Returns the values of TP's error counters.
7884 */
t4_tp_get_tnl_stats(struct adapter * adap,struct tp_tnl_stats * st,bool sleep_ok)7885 void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st,
7886 bool sleep_ok)
7887 {
7888 int nchan = adap->chip_params->nchan;
7889
7890 t4_tp_mib_read(adap, st->out_pkt, nchan, A_TP_MIB_TNL_OUT_PKT_0,
7891 sleep_ok);
7892 t4_tp_mib_read(adap, st->in_pkt, nchan, A_TP_MIB_TNL_IN_PKT_0,
7893 sleep_ok);
7894 }
7895
7896 /**
7897 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
7898 * @adap: the adapter
7899 * @st: holds the counter values
7900 *
7901 * Returns the values of TP's proxy counters.
7902 */
t4_tp_get_proxy_stats(struct adapter * adap,struct tp_proxy_stats * st,bool sleep_ok)7903 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
7904 bool sleep_ok)
7905 {
7906 int nchan = adap->chip_params->nchan;
7907
7908 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
7909 }
7910
7911 /**
7912 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
7913 * @adap: the adapter
7914 * @st: holds the counter values
7915 * @sleep_ok: if true we may sleep while awaiting command completion
7916 *
7917 * Returns the values of TP's CPL counters.
7918 */
t4_tp_get_cpl_stats(struct adapter * adap,struct tp_cpl_stats * st,bool sleep_ok)7919 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
7920 bool sleep_ok)
7921 {
7922 int nchan = adap->chip_params->nchan;
7923
7924 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
7925
7926 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
7927 }
7928
7929 /**
7930 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
7931 * @adap: the adapter
7932 * @st: holds the counter values
7933 *
7934 * Returns the values of TP's RDMA counters.
7935 */
t4_tp_get_rdma_stats(struct adapter * adap,struct tp_rdma_stats * st,bool sleep_ok)7936 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
7937 bool sleep_ok)
7938 {
7939 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
7940 sleep_ok);
7941
7942 if (chip_id(adap) >= CHELSIO_T7)
7943 /* read RDMA stats IN and OUT for all ports at once */
7944 t4_tp_mib_read(adap, &st->pkts_in[0], 28, A_TP_MIB_RDMA_IN_PKT_0,
7945 sleep_ok);
7946 }
7947
7948 /**
7949 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
7950 * @adap: the adapter
7951 * @idx: the port index
7952 * @st: holds the counter values
7953 * @sleep_ok: if true we may sleep while awaiting command completion
7954 *
7955 * Returns the values of TP's FCoE counters for the selected port.
7956 */
t4_get_fcoe_stats(struct adapter * adap,unsigned int idx,struct tp_fcoe_stats * st,bool sleep_ok)7957 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
7958 struct tp_fcoe_stats *st, bool sleep_ok)
7959 {
7960 u32 val[2];
7961
7962 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
7963 sleep_ok);
7964
7965 t4_tp_mib_read(adap, &st->frames_drop, 1,
7966 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
7967
7968 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
7969 sleep_ok);
7970
7971 st->octets_ddp = ((u64)val[0] << 32) | val[1];
7972 }
7973
7974 /**
7975 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
7976 * @adap: the adapter
7977 * @st: holds the counter values
7978 * @sleep_ok: if true we may sleep while awaiting command completion
7979 *
7980 * Returns the values of TP's counters for non-TCP directly-placed packets.
7981 */
t4_get_usm_stats(struct adapter * adap,struct tp_usm_stats * st,bool sleep_ok)7982 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
7983 bool sleep_ok)
7984 {
7985 u32 val[4];
7986
7987 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
7988
7989 st->frames = val[0];
7990 st->drops = val[1];
7991 st->octets = ((u64)val[2] << 32) | val[3];
7992 }
7993
7994 /**
7995 * t4_tp_get_tid_stats - read TP's tid MIB counters.
7996 * @adap: the adapter
7997 * @st: holds the counter values
7998 * @sleep_ok: if true we may sleep while awaiting command completion
7999 *
8000 * Returns the values of TP's counters for tids.
8001 */
t4_tp_get_tid_stats(struct adapter * adap,struct tp_tid_stats * st,bool sleep_ok)8002 void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st,
8003 bool sleep_ok)
8004 {
8005
8006 t4_tp_mib_read(adap, &st->del, 4, A_TP_MIB_TID_DEL, sleep_ok);
8007 }
8008
8009 /**
8010 * t4_read_mtu_tbl - returns the values in the HW path MTU table
8011 * @adap: the adapter
8012 * @mtus: where to store the MTU values
8013 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
8014 *
8015 * Reads the HW path MTU table.
8016 */
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)8017 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
8018 {
8019 u32 v;
8020 int i;
8021
8022 for (i = 0; i < NMTUS; ++i) {
8023 t4_write_reg(adap, A_TP_MTU_TABLE,
8024 V_MTUINDEX(0xff) | V_MTUVALUE(i));
8025 v = t4_read_reg(adap, A_TP_MTU_TABLE);
8026 mtus[i] = G_MTUVALUE(v);
8027 if (mtu_log)
8028 mtu_log[i] = G_MTUWIDTH(v);
8029 }
8030 }
8031
8032 /**
8033 * t4_read_cong_tbl - reads the congestion control table
8034 * @adap: the adapter
8035 * @incr: where to store the alpha values
8036 *
8037 * Reads the additive increments programmed into the HW congestion
8038 * control table.
8039 */
t4_read_cong_tbl(struct adapter * adap,u16 incr[NMTUS][NCCTRL_WIN])8040 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
8041 {
8042 unsigned int mtu, w;
8043
8044 for (mtu = 0; mtu < NMTUS; ++mtu)
8045 for (w = 0; w < NCCTRL_WIN; ++w) {
8046 t4_write_reg(adap, A_TP_CCTRL_TABLE,
8047 V_ROWINDEX(0xffff) | (mtu << 5) | w);
8048 incr[mtu][w] = (u16)t4_read_reg(adap,
8049 A_TP_CCTRL_TABLE) & 0x1fff;
8050 }
8051 }
8052
8053 /**
8054 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
8055 * @adap: the adapter
8056 * @addr: the indirect TP register address
8057 * @mask: specifies the field within the register to modify
8058 * @val: new value for the field
8059 *
8060 * Sets a field of an indirect TP register to the given value.
8061 */
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)8062 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
8063 unsigned int mask, unsigned int val)
8064 {
8065 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
8066 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
8067 t4_write_reg(adap, A_TP_PIO_DATA, val);
8068 }
8069
8070 /**
8071 * init_cong_ctrl - initialize congestion control parameters
8072 * @a: the alpha values for congestion control
8073 * @b: the beta values for congestion control
8074 *
8075 * Initialize the congestion control parameters.
8076 */
init_cong_ctrl(unsigned short * a,unsigned short * b)8077 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
8078 {
8079 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
8080 a[9] = 2;
8081 a[10] = 3;
8082 a[11] = 4;
8083 a[12] = 5;
8084 a[13] = 6;
8085 a[14] = 7;
8086 a[15] = 8;
8087 a[16] = 9;
8088 a[17] = 10;
8089 a[18] = 14;
8090 a[19] = 17;
8091 a[20] = 21;
8092 a[21] = 25;
8093 a[22] = 30;
8094 a[23] = 35;
8095 a[24] = 45;
8096 a[25] = 60;
8097 a[26] = 80;
8098 a[27] = 100;
8099 a[28] = 200;
8100 a[29] = 300;
8101 a[30] = 400;
8102 a[31] = 500;
8103
8104 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
8105 b[9] = b[10] = 1;
8106 b[11] = b[12] = 2;
8107 b[13] = b[14] = b[15] = b[16] = 3;
8108 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
8109 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
8110 b[28] = b[29] = 6;
8111 b[30] = b[31] = 7;
8112 }
8113
8114 /* The minimum additive increment value for the congestion control table */
8115 #define CC_MIN_INCR 2U
8116
8117 /**
8118 * t4_load_mtus - write the MTU and congestion control HW tables
8119 * @adap: the adapter
8120 * @mtus: the values for the MTU table
8121 * @alpha: the values for the congestion control alpha parameter
8122 * @beta: the values for the congestion control beta parameter
8123 *
8124 * Write the HW MTU table with the supplied MTUs and the high-speed
8125 * congestion control table with the supplied alpha, beta, and MTUs.
8126 * We write the two tables together because the additive increments
8127 * depend on the MTUs.
8128 */
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)8129 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
8130 const unsigned short *alpha, const unsigned short *beta)
8131 {
8132 static const unsigned int avg_pkts[NCCTRL_WIN] = {
8133 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
8134 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
8135 28672, 40960, 57344, 81920, 114688, 163840, 229376
8136 };
8137
8138 unsigned int i, w;
8139
8140 for (i = 0; i < NMTUS; ++i) {
8141 unsigned int mtu = mtus[i];
8142 unsigned int log2 = fls(mtu);
8143
8144 if (!(mtu & ((1 << log2) >> 2))) /* round */
8145 log2--;
8146 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
8147 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
8148
8149 for (w = 0; w < NCCTRL_WIN; ++w) {
8150 unsigned int inc;
8151
8152 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
8153 CC_MIN_INCR);
8154
8155 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
8156 (w << 16) | (beta[w] << 13) | inc);
8157 }
8158 }
8159 }
8160
8161 /**
8162 * t4_set_pace_tbl - set the pace table
8163 * @adap: the adapter
8164 * @pace_vals: the pace values in microseconds
8165 * @start: index of the first entry in the HW pace table to set
8166 * @n: how many entries to set
8167 *
8168 * Sets (a subset of the) HW pace table.
8169 */
t4_set_pace_tbl(struct adapter * adap,const unsigned int * pace_vals,unsigned int start,unsigned int n)8170 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
8171 unsigned int start, unsigned int n)
8172 {
8173 unsigned int vals[NTX_SCHED], i;
8174 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
8175
8176 if (n > NTX_SCHED)
8177 return -ERANGE;
8178
8179 /* convert values from us to dack ticks, rounding to closest value */
8180 for (i = 0; i < n; i++, pace_vals++) {
8181 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
8182 if (vals[i] > 0x7ff)
8183 return -ERANGE;
8184 if (*pace_vals && vals[i] == 0)
8185 return -ERANGE;
8186 }
8187 for (i = 0; i < n; i++, start++)
8188 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
8189 return 0;
8190 }
8191
8192 /**
8193 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
8194 * @adap: the adapter
8195 * @kbps: target rate in Kbps
8196 * @sched: the scheduler index
8197 *
8198 * Configure a Tx HW scheduler for the target rate.
8199 */
t4_set_sched_bps(struct adapter * adap,int sched,unsigned int kbps)8200 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
8201 {
8202 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
8203 unsigned int clk = adap->params.vpd.cclk * 1000;
8204 unsigned int selected_cpt = 0, selected_bpt = 0;
8205
8206 if (kbps > 0) {
8207 kbps *= 125; /* -> bytes */
8208 for (cpt = 1; cpt <= 255; cpt++) {
8209 tps = clk / cpt;
8210 bpt = (kbps + tps / 2) / tps;
8211 if (bpt > 0 && bpt <= 255) {
8212 v = bpt * tps;
8213 delta = v >= kbps ? v - kbps : kbps - v;
8214 if (delta < mindelta) {
8215 mindelta = delta;
8216 selected_cpt = cpt;
8217 selected_bpt = bpt;
8218 }
8219 } else if (selected_cpt)
8220 break;
8221 }
8222 if (!selected_cpt)
8223 return -EINVAL;
8224 }
8225 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
8226 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
8227 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8228 if (sched & 1)
8229 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
8230 else
8231 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
8232 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
8233 return 0;
8234 }
8235
8236 /**
8237 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
8238 * @adap: the adapter
8239 * @sched: the scheduler index
8240 * @ipg: the interpacket delay in tenths of nanoseconds
8241 *
8242 * Set the interpacket delay for a HW packet rate scheduler.
8243 */
t4_set_sched_ipg(struct adapter * adap,int sched,unsigned int ipg)8244 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
8245 {
8246 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8247
8248 /* convert ipg to nearest number of core clocks */
8249 ipg *= core_ticks_per_usec(adap);
8250 ipg = (ipg + 5000) / 10000;
8251 if (ipg > M_TXTIMERSEPQ0)
8252 return -EINVAL;
8253
8254 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8255 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8256 if (sched & 1)
8257 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
8258 else
8259 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
8260 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
8261 t4_read_reg(adap, A_TP_TM_PIO_DATA);
8262 return 0;
8263 }
8264
8265 /*
8266 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
8267 * clocks. The formula is
8268 *
8269 * bytes/s = bytes256 * 256 * ClkFreq / 4096
8270 *
8271 * which is equivalent to
8272 *
8273 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
8274 */
chan_rate(struct adapter * adap,unsigned int bytes256)8275 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
8276 {
8277 u64 v = (u64)bytes256 * adap->params.vpd.cclk;
8278
8279 return v * 62 + v / 2;
8280 }
8281
8282 /**
8283 * t4_get_chan_txrate - get the current per channel Tx rates
8284 * @adap: the adapter
8285 * @nic_rate: rates for NIC traffic
8286 * @ofld_rate: rates for offloaded traffic
8287 *
8288 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
8289 * for each channel.
8290 */
t4_get_chan_txrate(struct adapter * adap,u64 * nic_rate,u64 * ofld_rate)8291 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
8292 {
8293 u32 v;
8294
8295 v = t4_read_reg(adap, A_TP_TX_TRATE);
8296 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
8297 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
8298 if (adap->chip_params->nchan > 2) {
8299 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
8300 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
8301 }
8302
8303 v = t4_read_reg(adap, A_TP_TX_ORATE);
8304 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
8305 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
8306 if (adap->chip_params->nchan > 2) {
8307 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
8308 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
8309 }
8310 }
8311
8312 /**
8313 * t4_set_trace_filter - configure one of the tracing filters
8314 * @adap: the adapter
8315 * @tp: the desired trace filter parameters
8316 * @idx: which filter to configure
8317 * @enable: whether to enable or disable the filter
8318 *
8319 * Configures one of the tracing filters available in HW. If @tp is %NULL
8320 * it indicates that the filter is already written in the register and it
8321 * just needs to be enabled or disabled.
8322 */
t4_set_trace_filter(struct adapter * adap,const struct trace_params * tp,int idx,int enable)8323 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
8324 int idx, int enable)
8325 {
8326 int i, ofst;
8327 u32 match_ctl_a, match_ctl_b;
8328 u32 data_reg, mask_reg, cfg;
8329 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
8330
8331 if (idx < 0 || idx >= NTRACE)
8332 return -EINVAL;
8333
8334 if (chip_id(adap) >= CHELSIO_T7) {
8335 match_ctl_a = T7_MPS_TRC_FILTER_MATCH_CTL_A(idx);
8336 match_ctl_b = T7_MPS_TRC_FILTER_MATCH_CTL_B(idx);
8337 } else {
8338 match_ctl_a = MPS_TRC_FILTER_MATCH_CTL_A(idx);
8339 match_ctl_b = MPS_TRC_FILTER_MATCH_CTL_B(idx);
8340 }
8341
8342 if (tp == NULL || !enable) {
8343 t4_set_reg_field(adap, match_ctl_a, en, enable ? en : 0);
8344 return 0;
8345 }
8346
8347 /*
8348 * TODO - After T4 data book is updated, specify the exact
8349 * section below.
8350 *
8351 * See T4 data book - MPS section for a complete description
8352 * of the below if..else handling of A_MPS_TRC_CFG register
8353 * value.
8354 */
8355 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
8356 if (cfg & F_TRCMULTIFILTER) {
8357 /*
8358 * If multiple tracers are enabled, then maximum
8359 * capture size is 2.5KB (FIFO size of a single channel)
8360 * minus 2 flits for CPL_TRACE_PKT header.
8361 */
8362 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
8363 return -EINVAL;
8364 } else {
8365 /*
8366 * If multiple tracers are disabled, to avoid deadlocks
8367 * maximum packet capture size of 9600 bytes is recommended.
8368 * Also in this mode, only trace0 can be enabled and running.
8369 */
8370 if (tp->snap_len > 9600 || idx)
8371 return -EINVAL;
8372 }
8373
8374 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
8375 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
8376 tp->min_len > M_TFMINPKTSIZE)
8377 return -EINVAL;
8378
8379 /* stop the tracer we'll be changing */
8380 t4_set_reg_field(adap, match_ctl_a, en, 0);
8381
8382 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
8383 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
8384 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
8385
8386 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
8387 t4_write_reg(adap, data_reg, tp->data[i]);
8388 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
8389 }
8390 t4_write_reg(adap, match_ctl_b, V_TFCAPTUREMAX(tp->snap_len) |
8391 V_TFMINPKTSIZE(tp->min_len));
8392 t4_write_reg(adap, match_ctl_a, V_TFOFFSET(tp->skip_ofst) |
8393 V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ?
8394 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
8395 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
8396
8397 return 0;
8398 }
8399
8400 /**
8401 * t4_get_trace_filter - query one of the tracing filters
8402 * @adap: the adapter
8403 * @tp: the current trace filter parameters
8404 * @idx: which trace filter to query
8405 * @enabled: non-zero if the filter is enabled
8406 *
8407 * Returns the current settings of one of the HW tracing filters.
8408 */
t4_get_trace_filter(struct adapter * adap,struct trace_params * tp,int idx,int * enabled)8409 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
8410 int *enabled)
8411 {
8412 u32 ctla, ctlb;
8413 int i, ofst;
8414 u32 data_reg, mask_reg;
8415
8416 if (chip_id(adap) >= CHELSIO_T7) {
8417 ctla = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_A(idx));
8418 ctlb = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_B(idx));
8419 } else {
8420 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A(idx));
8421 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B(idx));
8422 }
8423
8424 if (is_t4(adap)) {
8425 *enabled = !!(ctla & F_TFEN);
8426 tp->port = G_TFPORT(ctla);
8427 tp->invert = !!(ctla & F_TFINVERTMATCH);
8428 } else {
8429 *enabled = !!(ctla & F_T5_TFEN);
8430 tp->port = G_T5_TFPORT(ctla);
8431 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
8432 }
8433 tp->snap_len = G_TFCAPTUREMAX(ctlb);
8434 tp->min_len = G_TFMINPKTSIZE(ctlb);
8435 tp->skip_ofst = G_TFOFFSET(ctla);
8436 tp->skip_len = G_TFLENGTH(ctla);
8437
8438 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
8439 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
8440 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
8441
8442 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
8443 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
8444 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
8445 }
8446 }
8447
8448 /**
8449 * t4_set_trace_rss_control - configure the trace rss control register
8450 * @adap: the adapter
8451 * @chan: the channel number for RSS control
8452 * @qid: queue number
8453 *
8454 * Configures the MPS tracing RSS control parameter for specified
8455 * @chan channel and @qid queue number.
8456 */
t4_set_trace_rss_control(struct adapter * adap,u8 chan,u16 qid)8457 void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid)
8458 {
8459 u32 mps_trc_rss_control;
8460
8461 switch (chip_id(adap)) {
8462 case CHELSIO_T4:
8463 mps_trc_rss_control = A_MPS_TRC_RSS_CONTROL;
8464 break;
8465 case CHELSIO_T5:
8466 case CHELSIO_T6:
8467 mps_trc_rss_control = A_MPS_T5_TRC_RSS_CONTROL;
8468 break;
8469 case CHELSIO_T7:
8470 default:
8471 mps_trc_rss_control = A_T7_MPS_T5_TRC_RSS_CONTROL;
8472 break;
8473 }
8474
8475 t4_write_reg(adap, mps_trc_rss_control,
8476 V_RSSCONTROL(chan) | V_QUEUENUMBER(qid));
8477 }
8478
8479 /**
8480 * t4_pmtx_get_stats - returns the HW stats from PMTX
8481 * @adap: the adapter
8482 * @cnt: where to store the count statistics
8483 * @cycles: where to store the cycle statistics
8484 *
8485 * Returns performance statistics from PMTX.
8486 */
t4_pmtx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])8487 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
8488 {
8489 int i;
8490 u32 data[2];
8491
8492 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
8493 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
8494 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
8495 if (is_t4(adap))
8496 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
8497 else {
8498 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
8499 A_PM_TX_DBG_DATA, data, 2,
8500 chip_id(adap) >= CHELSIO_T7 ?
8501 A_T7_PM_TX_DBG_STAT_MSB :
8502 A_PM_TX_DBG_STAT_MSB);
8503 cycles[i] = (((u64)data[0] << 32) | data[1]);
8504 }
8505 }
8506 }
8507
8508 /**
8509 * t4_pmrx_get_stats - returns the HW stats from PMRX
8510 * @adap: the adapter
8511 * @cnt: where to store the count statistics
8512 * @cycles: where to store the cycle statistics
8513 *
8514 * Returns performance statistics from PMRX.
8515 */
t4_pmrx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])8516 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
8517 {
8518 int i;
8519 u32 data[2];
8520
8521 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
8522 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
8523 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
8524 if (is_t4(adap)) {
8525 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
8526 } else {
8527 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
8528 A_PM_RX_DBG_DATA, data, 2,
8529 A_PM_RX_DBG_STAT_MSB);
8530 cycles[i] = (((u64)data[0] << 32) | data[1]);
8531 }
8532 }
8533 }
8534
8535 /**
8536 * t4_pmrx_cache_get_stats - returns the HW PMRX cache stats
8537 * @adap: the adapter
8538 * @stats: where to store the statistics
8539 *
8540 * Returns performance statistics of PMRX cache.
8541 */
t4_pmrx_cache_get_stats(struct adapter * adap,u32 stats[])8542 void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[])
8543 {
8544 u8 i, j;
8545
8546 for (i = 0, j = 0; i < T7_PM_RX_CACHE_NSTATS / 3; i++, j += 3) {
8547 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, 0x100 + i);
8548 stats[j] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
8549 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA,
8550 &stats[j + 1], 2, A_PM_RX_DBG_STAT_MSB);
8551 }
8552 }
8553
8554 /**
8555 * t4_get_mps_bg_map - return the buffer groups associated with a port
8556 * @adap: the adapter
8557 * @idx: the port index
8558 *
8559 * Returns a bitmap indicating which MPS buffer groups are associated
8560 * with the given port. Bit i is set if buffer group i is used by the
8561 * port.
8562 */
t4_get_mps_bg_map(struct adapter * adap,int idx)8563 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
8564 {
8565 u32 n;
8566
8567 if (adap->params.mps_bg_map != UINT32_MAX)
8568 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
8569
8570 n = adap->params.nports;
8571 MPASS(n > 0 && n <= MAX_NPORTS);
8572 if (n == 1)
8573 return idx == 0 ? 0xf : 0;
8574 if (n == 2 && chip_id(adap) <= CHELSIO_T5)
8575 return idx < 2 ? (3 << (2 * idx)) : 0;
8576 return 1 << idx;
8577 }
8578
8579 /*
8580 * TP RX e-channels associated with the port.
8581 */
t4_get_rx_e_chan_map(struct adapter * adap,int idx)8582 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
8583 {
8584 const u32 n = adap->params.nports;
8585 const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
8586
8587 switch (adap->params.tp.lb_mode) {
8588 case 0:
8589 if (n == 1)
8590 return (all_chan);
8591 if (n == 2 && chip_id(adap) <= CHELSIO_T5)
8592 return (3 << (2 * idx));
8593 return (1 << idx);
8594 case 1:
8595 MPASS(n == 1);
8596 return (all_chan);
8597 case 2:
8598 MPASS(n <= 2);
8599 return (3 << (2 * idx));
8600 default:
8601 CH_ERR(adap, "Unsupported LB mode %d\n",
8602 adap->params.tp.lb_mode);
8603 return (0);
8604 }
8605 }
8606
8607 /*
8608 * TP RX c-channel associated with the port.
8609 */
t4_get_rx_c_chan(struct adapter * adap,int idx)8610 static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx)
8611 {
8612 if (adap->params.tp_ch_map != UINT32_MAX)
8613 return (adap->params.tp_ch_map >> (8 * idx)) & 0xff;
8614 return 0;
8615 }
8616
8617 /*
8618 * TP TX c-channel associated with the port.
8619 */
t4_get_tx_c_chan(struct adapter * adap,int idx)8620 static unsigned int t4_get_tx_c_chan(struct adapter *adap, int idx)
8621 {
8622 if (adap->params.tx_tp_ch_map != UINT32_MAX)
8623 return (adap->params.tx_tp_ch_map >> (8 * idx)) & 0xff;
8624 return idx;
8625 }
8626
8627 /**
8628 * t4_get_port_type_description - return Port Type string description
8629 * @port_type: firmware Port Type enumeration
8630 */
t4_get_port_type_description(enum fw_port_type port_type)8631 const char *t4_get_port_type_description(enum fw_port_type port_type)
8632 {
8633 static const char *const port_type_description[] = {
8634 "Fiber_XFI",
8635 "Fiber_XAUI",
8636 "BT_SGMII",
8637 "BT_XFI",
8638 "BT_XAUI",
8639 "KX4",
8640 "CX4",
8641 "KX",
8642 "KR",
8643 "SFP",
8644 "BP_AP",
8645 "BP4_AP",
8646 "QSFP_10G",
8647 "QSA",
8648 "QSFP",
8649 "BP40_BA",
8650 "KR4_100G",
8651 "CR4_QSFP",
8652 "CR_QSFP",
8653 "CR2_QSFP",
8654 "SFP28",
8655 "KR_SFP28",
8656 "KR_XLAUI",
8657 };
8658
8659 if (port_type < ARRAY_SIZE(port_type_description))
8660 return port_type_description[port_type];
8661 return "UNKNOWN";
8662 }
8663
8664 /**
8665 * t4_get_port_stats_offset - collect port stats relative to a previous
8666 * snapshot
8667 * @adap: The adapter
8668 * @idx: The port
8669 * @stats: Current stats to fill
8670 * @offset: Previous stats snapshot
8671 */
t4_get_port_stats_offset(struct adapter * adap,int idx,struct port_stats * stats,struct port_stats * offset)8672 void t4_get_port_stats_offset(struct adapter *adap, int idx,
8673 struct port_stats *stats,
8674 struct port_stats *offset)
8675 {
8676 u64 *s, *o;
8677 int i;
8678
8679 t4_get_port_stats(adap, idx, stats);
8680 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
8681 i < (sizeof(struct port_stats)/sizeof(u64)) ;
8682 i++, s++, o++)
8683 *s -= *o;
8684 }
8685
8686 /**
8687 * t4_get_port_stats - collect port statistics
8688 * @adap: the adapter
8689 * @idx: the port index
8690 * @p: the stats structure to fill
8691 *
8692 * Collect statistics related to the given port from HW.
8693 */
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)8694 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
8695 {
8696 struct port_info *pi;
8697 int port_id, tx_chan;
8698 u32 bgmap, stat_ctl;
8699
8700 port_id = adap->port_map[idx];
8701 MPASS(port_id >= 0 && port_id <= adap->params.nports);
8702 pi = adap->port[port_id];
8703
8704 #define GET_STAT(name) \
8705 t4_read_reg64(adap, \
8706 t4_port_reg(adap, tx_chan, A_MPS_PORT_STAT_##name##_L));
8707 memset(p, 0, sizeof(*p));
8708 for (tx_chan = pi->tx_chan;
8709 tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
8710 p->tx_pause += GET_STAT(TX_PORT_PAUSE);
8711 p->tx_octets += GET_STAT(TX_PORT_BYTES);
8712 p->tx_frames += GET_STAT(TX_PORT_FRAMES);
8713 p->tx_bcast_frames += GET_STAT(TX_PORT_BCAST);
8714 p->tx_mcast_frames += GET_STAT(TX_PORT_MCAST);
8715 p->tx_ucast_frames += GET_STAT(TX_PORT_UCAST);
8716 p->tx_error_frames += GET_STAT(TX_PORT_ERROR);
8717 p->tx_frames_64 += GET_STAT(TX_PORT_64B);
8718 p->tx_frames_65_127 += GET_STAT(TX_PORT_65B_127B);
8719 p->tx_frames_128_255 += GET_STAT(TX_PORT_128B_255B);
8720 p->tx_frames_256_511 += GET_STAT(TX_PORT_256B_511B);
8721 p->tx_frames_512_1023 += GET_STAT(TX_PORT_512B_1023B);
8722 p->tx_frames_1024_1518 += GET_STAT(TX_PORT_1024B_1518B);
8723 p->tx_frames_1519_max += GET_STAT(TX_PORT_1519B_MAX);
8724 p->tx_drop += GET_STAT(TX_PORT_DROP);
8725 p->tx_ppp0 += GET_STAT(TX_PORT_PPP0);
8726 p->tx_ppp1 += GET_STAT(TX_PORT_PPP1);
8727 p->tx_ppp2 += GET_STAT(TX_PORT_PPP2);
8728 p->tx_ppp3 += GET_STAT(TX_PORT_PPP3);
8729 p->tx_ppp4 += GET_STAT(TX_PORT_PPP4);
8730 p->tx_ppp5 += GET_STAT(TX_PORT_PPP5);
8731 p->tx_ppp6 += GET_STAT(TX_PORT_PPP6);
8732 p->tx_ppp7 += GET_STAT(TX_PORT_PPP7);
8733
8734 p->rx_pause += GET_STAT(RX_PORT_PAUSE);
8735 p->rx_octets += GET_STAT(RX_PORT_BYTES);
8736 p->rx_frames += GET_STAT(RX_PORT_FRAMES);
8737 p->rx_bcast_frames += GET_STAT(RX_PORT_BCAST);
8738 p->rx_mcast_frames += GET_STAT(RX_PORT_MCAST);
8739 p->rx_ucast_frames += GET_STAT(RX_PORT_UCAST);
8740 p->rx_too_long += GET_STAT(RX_PORT_MTU_ERROR);
8741 p->rx_jabber += GET_STAT(RX_PORT_MTU_CRC_ERROR);
8742 p->rx_len_err += GET_STAT(RX_PORT_LEN_ERROR);
8743 p->rx_symbol_err += GET_STAT(RX_PORT_SYM_ERROR);
8744 p->rx_runt += GET_STAT(RX_PORT_LESS_64B);
8745 p->rx_frames_64 += GET_STAT(RX_PORT_64B);
8746 p->rx_frames_65_127 += GET_STAT(RX_PORT_65B_127B);
8747 p->rx_frames_128_255 += GET_STAT(RX_PORT_128B_255B);
8748 p->rx_frames_256_511 += GET_STAT(RX_PORT_256B_511B);
8749 p->rx_frames_512_1023 += GET_STAT(RX_PORT_512B_1023B);
8750 p->rx_frames_1024_1518 += GET_STAT(RX_PORT_1024B_1518B);
8751 p->rx_frames_1519_max += GET_STAT(RX_PORT_1519B_MAX);
8752 p->rx_ppp0 += GET_STAT(RX_PORT_PPP0);
8753 p->rx_ppp1 += GET_STAT(RX_PORT_PPP1);
8754 p->rx_ppp2 += GET_STAT(RX_PORT_PPP2);
8755 p->rx_ppp3 += GET_STAT(RX_PORT_PPP3);
8756 p->rx_ppp4 += GET_STAT(RX_PORT_PPP4);
8757 p->rx_ppp5 += GET_STAT(RX_PORT_PPP5);
8758 p->rx_ppp6 += GET_STAT(RX_PORT_PPP6);
8759 p->rx_ppp7 += GET_STAT(RX_PORT_PPP7);
8760 if (!is_t6(adap)) {
8761 MPASS(pi->fcs_reg == A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
8762 p->rx_fcs_err += GET_STAT(RX_PORT_CRC_ERROR);
8763 }
8764 }
8765 #undef GET_STAT
8766
8767 if (is_t6(adap) && pi->fcs_reg != -1)
8768 p->rx_fcs_err = t4_read_reg64(adap,
8769 t4_port_reg(adap, pi->tx_chan, pi->fcs_reg)) - pi->fcs_base;
8770
8771 if (chip_id(adap) >= CHELSIO_T5) {
8772 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
8773 if (stat_ctl & F_COUNTPAUSESTATTX) {
8774 p->tx_frames -= p->tx_pause;
8775 p->tx_octets -= p->tx_pause * 64;
8776 }
8777 if (stat_ctl & F_COUNTPAUSEMCTX)
8778 p->tx_mcast_frames -= p->tx_pause;
8779 if (stat_ctl & F_COUNTPAUSESTATRX) {
8780 p->rx_frames -= p->rx_pause;
8781 p->rx_octets -= p->rx_pause * 64;
8782 }
8783 if (stat_ctl & F_COUNTPAUSEMCRX)
8784 p->rx_mcast_frames -= p->rx_pause;
8785 }
8786
8787 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
8788 bgmap = pi->mps_bg_map;
8789 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
8790 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
8791 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
8792 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
8793 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
8794 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
8795 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
8796 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
8797 #undef GET_STAT_COM
8798 }
8799
8800 /**
8801 * t4_get_lb_stats - collect loopback port statistics
8802 * @adap: the adapter
8803 * @idx: the loopback port index
8804 * @p: the stats structure to fill
8805 *
8806 * Return HW statistics for the given loopback port.
8807 */
t4_get_lb_stats(struct adapter * adap,int idx,struct lb_port_stats * p)8808 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
8809 {
8810
8811 #define GET_STAT(name) \
8812 t4_read_reg64(adap, \
8813 t4_port_reg(adap, idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
8814 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
8815
8816 p->octets = GET_STAT(BYTES);
8817 p->frames = GET_STAT(FRAMES);
8818 p->bcast_frames = GET_STAT(BCAST);
8819 p->mcast_frames = GET_STAT(MCAST);
8820 p->ucast_frames = GET_STAT(UCAST);
8821 p->error_frames = GET_STAT(ERROR);
8822
8823 p->frames_64 = GET_STAT(64B);
8824 p->frames_65_127 = GET_STAT(65B_127B);
8825 p->frames_128_255 = GET_STAT(128B_255B);
8826 p->frames_256_511 = GET_STAT(256B_511B);
8827 p->frames_512_1023 = GET_STAT(512B_1023B);
8828 p->frames_1024_1518 = GET_STAT(1024B_1518B);
8829 p->frames_1519_max = GET_STAT(1519B_MAX);
8830 p->drop = GET_STAT(DROP_FRAMES);
8831
8832 if (idx < adap->params.nports) {
8833 u32 bg = adap2pinfo(adap, idx)->mps_bg_map;
8834
8835 p->ovflow0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
8836 p->ovflow1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
8837 p->ovflow2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
8838 p->ovflow3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
8839 p->trunc0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
8840 p->trunc1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
8841 p->trunc2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
8842 p->trunc3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
8843 }
8844
8845 #undef GET_STAT
8846 #undef GET_STAT_COM
8847 }
8848
8849 /**
8850 * t4_wol_magic_enable - enable/disable magic packet WoL
8851 * @adap: the adapter
8852 * @port: the physical port index
8853 * @addr: MAC address expected in magic packets, %NULL to disable
8854 *
8855 * Enables/disables magic packet wake-on-LAN for the selected port.
8856 */
t4_wol_magic_enable(struct adapter * adap,unsigned int port,const u8 * addr)8857 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
8858 const u8 *addr)
8859 {
8860 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
8861
8862 if (is_t4(adap)) {
8863 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
8864 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
8865 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
8866 } else if (chip_id(adap) < CHELSIO_T7) {
8867 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
8868 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
8869 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
8870 } else {
8871 mag_id_reg_l = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_LO);
8872 mag_id_reg_h = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_HI);
8873 port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
8874 }
8875
8876 if (addr) {
8877 t4_write_reg(adap, mag_id_reg_l,
8878 (addr[2] << 24) | (addr[3] << 16) |
8879 (addr[4] << 8) | addr[5]);
8880 t4_write_reg(adap, mag_id_reg_h,
8881 (addr[0] << 8) | addr[1]);
8882 }
8883 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
8884 V_MAGICEN(addr != NULL));
8885 }
8886
8887 /**
8888 * t4_wol_pat_enable - enable/disable pattern-based WoL
8889 * @adap: the adapter
8890 * @port: the physical port index
8891 * @map: bitmap of which HW pattern filters to set
8892 * @mask0: byte mask for bytes 0-63 of a packet
8893 * @mask1: byte mask for bytes 64-127 of a packet
8894 * @crc: Ethernet CRC for selected bytes
8895 * @enable: enable/disable switch
8896 *
8897 * Sets the pattern filters indicated in @map to mask out the bytes
8898 * specified in @mask0/@mask1 in received packets and compare the CRC of
8899 * the resulting packet against @crc. If @enable is %true pattern-based
8900 * WoL is enabled, otherwise disabled.
8901 */
t4_wol_pat_enable(struct adapter * adap,unsigned int port,unsigned int map,u64 mask0,u64 mask1,unsigned int crc,bool enable)8902 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
8903 u64 mask0, u64 mask1, unsigned int crc, bool enable)
8904 {
8905 int i;
8906 u32 port_cfg_reg;
8907
8908 if (is_t4(adap))
8909 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
8910 else if (chip_id(adap) < CHELSIO_T7)
8911 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
8912 else
8913 port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
8914
8915 if (!enable) {
8916 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
8917 return 0;
8918 }
8919 if (map > 0xff)
8920 return -EINVAL;
8921
8922 #define EPIO_REG(name) \
8923 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
8924 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
8925
8926 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
8927 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
8928 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
8929
8930 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
8931 if (!(map & 1))
8932 continue;
8933
8934 /* write byte masks */
8935 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
8936 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
8937 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
8938 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
8939 return -ETIMEDOUT;
8940
8941 /* write CRC */
8942 t4_write_reg(adap, EPIO_REG(DATA0), crc);
8943 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
8944 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
8945 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
8946 return -ETIMEDOUT;
8947 }
8948 #undef EPIO_REG
8949
8950 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
8951 return 0;
8952 }
8953
8954 /* t4_mk_filtdelwr - create a delete filter WR
8955 * @ftid: the filter ID
8956 * @wr: the filter work request to populate
8957 * @qid: ingress queue to receive the delete notification
8958 *
8959 * Creates a filter work request to delete the supplied filter. If @qid is
8960 * negative the delete notification is suppressed.
8961 */
t4_mk_filtdelwr(unsigned int ftid,struct fw_filter_wr * wr,int qid)8962 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
8963 {
8964 memset(wr, 0, sizeof(*wr));
8965 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
8966 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
8967 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
8968 V_FW_FILTER_WR_NOREPLY(qid < 0));
8969 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
8970 if (qid >= 0)
8971 wr->rx_chan_rx_rpl_iq =
8972 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
8973 }
8974
8975 #define INIT_CMD(var, cmd, rd_wr) do { \
8976 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
8977 F_FW_CMD_REQUEST | \
8978 F_FW_CMD_##rd_wr); \
8979 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
8980 } while (0)
8981
t4_fwaddrspace_write(struct adapter * adap,unsigned int mbox,u32 addr,u32 val)8982 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
8983 u32 addr, u32 val)
8984 {
8985 u32 ldst_addrspace;
8986 struct fw_ldst_cmd c;
8987
8988 memset(&c, 0, sizeof(c));
8989 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
8990 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8991 F_FW_CMD_REQUEST |
8992 F_FW_CMD_WRITE |
8993 ldst_addrspace);
8994 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
8995 c.u.addrval.addr = cpu_to_be32(addr);
8996 c.u.addrval.val = cpu_to_be32(val);
8997
8998 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8999 }
9000
9001 /**
9002 * t4_mdio_rd - read a PHY register through MDIO
9003 * @adap: the adapter
9004 * @mbox: mailbox to use for the FW command
9005 * @phy_addr: the PHY address
9006 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
9007 * @reg: the register to read
9008 * @valp: where to store the value
9009 *
9010 * Issues a FW command through the given mailbox to read a PHY register.
9011 */
t4_mdio_rd(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int * valp)9012 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
9013 unsigned int mmd, unsigned int reg, unsigned int *valp)
9014 {
9015 int ret;
9016 u32 ldst_addrspace;
9017 struct fw_ldst_cmd c;
9018
9019 memset(&c, 0, sizeof(c));
9020 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
9021 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9022 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9023 ldst_addrspace);
9024 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9025 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
9026 V_FW_LDST_CMD_MMD(mmd));
9027 c.u.mdio.raddr = cpu_to_be16(reg);
9028
9029 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9030 if (ret == 0)
9031 *valp = be16_to_cpu(c.u.mdio.rval);
9032 return ret;
9033 }
9034
9035 /**
9036 * t4_mdio_wr - write a PHY register through MDIO
9037 * @adap: the adapter
9038 * @mbox: mailbox to use for the FW command
9039 * @phy_addr: the PHY address
9040 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
9041 * @reg: the register to write
9042 * @valp: value to write
9043 *
9044 * Issues a FW command through the given mailbox to write a PHY register.
9045 */
t4_mdio_wr(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int val)9046 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
9047 unsigned int mmd, unsigned int reg, unsigned int val)
9048 {
9049 u32 ldst_addrspace;
9050 struct fw_ldst_cmd c;
9051
9052 memset(&c, 0, sizeof(c));
9053 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
9054 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9055 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9056 ldst_addrspace);
9057 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9058 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
9059 V_FW_LDST_CMD_MMD(mmd));
9060 c.u.mdio.raddr = cpu_to_be16(reg);
9061 c.u.mdio.rval = cpu_to_be16(val);
9062
9063 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9064 }
9065
9066 /**
9067 *
9068 * t4_sge_decode_idma_state - decode the idma state
9069 * @adap: the adapter
9070 * @state: the state idma is stuck in
9071 */
t4_sge_decode_idma_state(struct adapter * adapter,int state)9072 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
9073 {
9074 static const char * const t4_decode[] = {
9075 "IDMA_IDLE",
9076 "IDMA_PUSH_MORE_CPL_FIFO",
9077 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
9078 "Not used",
9079 "IDMA_PHYSADDR_SEND_PCIEHDR",
9080 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
9081 "IDMA_PHYSADDR_SEND_PAYLOAD",
9082 "IDMA_SEND_FIFO_TO_IMSG",
9083 "IDMA_FL_REQ_DATA_FL_PREP",
9084 "IDMA_FL_REQ_DATA_FL",
9085 "IDMA_FL_DROP",
9086 "IDMA_FL_H_REQ_HEADER_FL",
9087 "IDMA_FL_H_SEND_PCIEHDR",
9088 "IDMA_FL_H_PUSH_CPL_FIFO",
9089 "IDMA_FL_H_SEND_CPL",
9090 "IDMA_FL_H_SEND_IP_HDR_FIRST",
9091 "IDMA_FL_H_SEND_IP_HDR",
9092 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
9093 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
9094 "IDMA_FL_H_SEND_IP_HDR_PADDING",
9095 "IDMA_FL_D_SEND_PCIEHDR",
9096 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
9097 "IDMA_FL_D_REQ_NEXT_DATA_FL",
9098 "IDMA_FL_SEND_PCIEHDR",
9099 "IDMA_FL_PUSH_CPL_FIFO",
9100 "IDMA_FL_SEND_CPL",
9101 "IDMA_FL_SEND_PAYLOAD_FIRST",
9102 "IDMA_FL_SEND_PAYLOAD",
9103 "IDMA_FL_REQ_NEXT_DATA_FL",
9104 "IDMA_FL_SEND_NEXT_PCIEHDR",
9105 "IDMA_FL_SEND_PADDING",
9106 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
9107 "IDMA_FL_SEND_FIFO_TO_IMSG",
9108 "IDMA_FL_REQ_DATAFL_DONE",
9109 "IDMA_FL_REQ_HEADERFL_DONE",
9110 };
9111 static const char * const t5_decode[] = {
9112 "IDMA_IDLE",
9113 "IDMA_ALMOST_IDLE",
9114 "IDMA_PUSH_MORE_CPL_FIFO",
9115 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
9116 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
9117 "IDMA_PHYSADDR_SEND_PCIEHDR",
9118 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
9119 "IDMA_PHYSADDR_SEND_PAYLOAD",
9120 "IDMA_SEND_FIFO_TO_IMSG",
9121 "IDMA_FL_REQ_DATA_FL",
9122 "IDMA_FL_DROP",
9123 "IDMA_FL_DROP_SEND_INC",
9124 "IDMA_FL_H_REQ_HEADER_FL",
9125 "IDMA_FL_H_SEND_PCIEHDR",
9126 "IDMA_FL_H_PUSH_CPL_FIFO",
9127 "IDMA_FL_H_SEND_CPL",
9128 "IDMA_FL_H_SEND_IP_HDR_FIRST",
9129 "IDMA_FL_H_SEND_IP_HDR",
9130 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
9131 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
9132 "IDMA_FL_H_SEND_IP_HDR_PADDING",
9133 "IDMA_FL_D_SEND_PCIEHDR",
9134 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
9135 "IDMA_FL_D_REQ_NEXT_DATA_FL",
9136 "IDMA_FL_SEND_PCIEHDR",
9137 "IDMA_FL_PUSH_CPL_FIFO",
9138 "IDMA_FL_SEND_CPL",
9139 "IDMA_FL_SEND_PAYLOAD_FIRST",
9140 "IDMA_FL_SEND_PAYLOAD",
9141 "IDMA_FL_REQ_NEXT_DATA_FL",
9142 "IDMA_FL_SEND_NEXT_PCIEHDR",
9143 "IDMA_FL_SEND_PADDING",
9144 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
9145 };
9146 static const char * const t6_decode[] = {
9147 "IDMA_IDLE",
9148 "IDMA_PUSH_MORE_CPL_FIFO",
9149 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
9150 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
9151 "IDMA_PHYSADDR_SEND_PCIEHDR",
9152 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
9153 "IDMA_PHYSADDR_SEND_PAYLOAD",
9154 "IDMA_FL_REQ_DATA_FL",
9155 "IDMA_FL_DROP",
9156 "IDMA_FL_DROP_SEND_INC",
9157 "IDMA_FL_H_REQ_HEADER_FL",
9158 "IDMA_FL_H_SEND_PCIEHDR",
9159 "IDMA_FL_H_PUSH_CPL_FIFO",
9160 "IDMA_FL_H_SEND_CPL",
9161 "IDMA_FL_H_SEND_IP_HDR_FIRST",
9162 "IDMA_FL_H_SEND_IP_HDR",
9163 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
9164 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
9165 "IDMA_FL_H_SEND_IP_HDR_PADDING",
9166 "IDMA_FL_D_SEND_PCIEHDR",
9167 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
9168 "IDMA_FL_D_REQ_NEXT_DATA_FL",
9169 "IDMA_FL_SEND_PCIEHDR",
9170 "IDMA_FL_PUSH_CPL_FIFO",
9171 "IDMA_FL_SEND_CPL",
9172 "IDMA_FL_SEND_PAYLOAD_FIRST",
9173 "IDMA_FL_SEND_PAYLOAD",
9174 "IDMA_FL_REQ_NEXT_DATA_FL",
9175 "IDMA_FL_SEND_NEXT_PCIEHDR",
9176 "IDMA_FL_SEND_PADDING",
9177 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
9178 };
9179 static const u32 sge_regs[] = {
9180 A_SGE_DEBUG_DATA_LOW_INDEX_2,
9181 A_SGE_DEBUG_DATA_LOW_INDEX_3,
9182 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
9183 };
9184 const char * const *sge_idma_decode;
9185 int sge_idma_decode_nstates;
9186 int i;
9187 unsigned int chip_version = chip_id(adapter);
9188
9189 /* Select the right set of decode strings to dump depending on the
9190 * adapter chip type.
9191 */
9192 switch (chip_version) {
9193 case CHELSIO_T4:
9194 sge_idma_decode = (const char * const *)t4_decode;
9195 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
9196 break;
9197
9198 case CHELSIO_T5:
9199 sge_idma_decode = (const char * const *)t5_decode;
9200 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
9201 break;
9202
9203 case CHELSIO_T6:
9204 case CHELSIO_T7:
9205 sge_idma_decode = (const char * const *)t6_decode;
9206 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
9207 break;
9208
9209 default:
9210 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
9211 return;
9212 }
9213
9214 if (state < sge_idma_decode_nstates)
9215 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
9216 else
9217 CH_WARN(adapter, "idma state %d unknown\n", state);
9218
9219 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
9220 CH_WARN(adapter, "SGE register %#x value %#x\n",
9221 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
9222 }
9223
9224 /**
9225 * t4_sge_ctxt_flush - flush the SGE context cache
9226 * @adap: the adapter
9227 * @mbox: mailbox to use for the FW command
9228 *
9229 * Issues a FW command through the given mailbox to flush the
9230 * SGE context cache.
9231 */
t4_sge_ctxt_flush(struct adapter * adap,unsigned int mbox,int ctxt_type)9232 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
9233 {
9234 int ret;
9235 u32 ldst_addrspace;
9236 struct fw_ldst_cmd c;
9237
9238 memset(&c, 0, sizeof(c));
9239 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
9240 FW_LDST_ADDRSPC_SGE_EGRC :
9241 FW_LDST_ADDRSPC_SGE_INGC);
9242 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9243 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9244 ldst_addrspace);
9245 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9246 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
9247
9248 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9249 return ret;
9250 }
9251
9252 /**
9253 * t4_fw_hello - establish communication with FW
9254 * @adap: the adapter
9255 * @mbox: mailbox to use for the FW command
9256 * @evt_mbox: mailbox to receive async FW events
9257 * @master: specifies the caller's willingness to be the device master
9258 * @state: returns the current device state (if non-NULL)
9259 *
9260 * Issues a command to establish communication with FW. Returns either
9261 * an error (negative integer) or the mailbox of the Master PF.
9262 */
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)9263 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
9264 enum dev_master master, enum dev_state *state)
9265 {
9266 int ret;
9267 struct fw_hello_cmd c;
9268 u32 v;
9269 unsigned int master_mbox;
9270 int retries = FW_CMD_HELLO_RETRIES;
9271
9272 retry:
9273 memset(&c, 0, sizeof(c));
9274 INIT_CMD(c, HELLO, WRITE);
9275 c.err_to_clearinit = cpu_to_be32(
9276 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
9277 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
9278 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
9279 mbox : M_FW_HELLO_CMD_MBMASTER) |
9280 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
9281 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
9282 F_FW_HELLO_CMD_CLEARINIT);
9283
9284 /*
9285 * Issue the HELLO command to the firmware. If it's not successful
9286 * but indicates that we got a "busy" or "timeout" condition, retry
9287 * the HELLO until we exhaust our retry limit. If we do exceed our
9288 * retry limit, check to see if the firmware left us any error
9289 * information and report that if so ...
9290 */
9291 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9292 if (ret != FW_SUCCESS) {
9293 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
9294 goto retry;
9295 return ret;
9296 }
9297
9298 v = be32_to_cpu(c.err_to_clearinit);
9299 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
9300 if (state) {
9301 if (v & F_FW_HELLO_CMD_ERR)
9302 *state = DEV_STATE_ERR;
9303 else if (v & F_FW_HELLO_CMD_INIT)
9304 *state = DEV_STATE_INIT;
9305 else
9306 *state = DEV_STATE_UNINIT;
9307 }
9308
9309 /*
9310 * If we're not the Master PF then we need to wait around for the
9311 * Master PF Driver to finish setting up the adapter.
9312 *
9313 * Note that we also do this wait if we're a non-Master-capable PF and
9314 * there is no current Master PF; a Master PF may show up momentarily
9315 * and we wouldn't want to fail pointlessly. (This can happen when an
9316 * OS loads lots of different drivers rapidly at the same time). In
9317 * this case, the Master PF returned by the firmware will be
9318 * M_PCIE_FW_MASTER so the test below will work ...
9319 */
9320 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
9321 master_mbox != mbox) {
9322 int waiting = FW_CMD_HELLO_TIMEOUT;
9323
9324 /*
9325 * Wait for the firmware to either indicate an error or
9326 * initialized state. If we see either of these we bail out
9327 * and report the issue to the caller. If we exhaust the
9328 * "hello timeout" and we haven't exhausted our retries, try
9329 * again. Otherwise bail with a timeout error.
9330 */
9331 for (;;) {
9332 u32 pcie_fw;
9333
9334 msleep(50);
9335 waiting -= 50;
9336
9337 /*
9338 * If neither Error nor Initialialized are indicated
9339 * by the firmware keep waiting till we exhaust our
9340 * timeout ... and then retry if we haven't exhausted
9341 * our retries ...
9342 */
9343 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
9344 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
9345 if (waiting <= 0) {
9346 if (retries-- > 0)
9347 goto retry;
9348
9349 return -ETIMEDOUT;
9350 }
9351 continue;
9352 }
9353
9354 /*
9355 * We either have an Error or Initialized condition
9356 * report errors preferentially.
9357 */
9358 if (state) {
9359 if (pcie_fw & F_PCIE_FW_ERR)
9360 *state = DEV_STATE_ERR;
9361 else if (pcie_fw & F_PCIE_FW_INIT)
9362 *state = DEV_STATE_INIT;
9363 }
9364
9365 /*
9366 * If we arrived before a Master PF was selected and
9367 * there's not a valid Master PF, grab its identity
9368 * for our caller.
9369 */
9370 if (master_mbox == M_PCIE_FW_MASTER &&
9371 (pcie_fw & F_PCIE_FW_MASTER_VLD))
9372 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
9373 break;
9374 }
9375 }
9376
9377 return master_mbox;
9378 }
9379
9380 /**
9381 * t4_fw_bye - end communication with FW
9382 * @adap: the adapter
9383 * @mbox: mailbox to use for the FW command
9384 *
9385 * Issues a command to terminate communication with FW.
9386 */
t4_fw_bye(struct adapter * adap,unsigned int mbox)9387 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
9388 {
9389 struct fw_bye_cmd c;
9390
9391 memset(&c, 0, sizeof(c));
9392 INIT_CMD(c, BYE, WRITE);
9393 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9394 }
9395
9396 /**
9397 * t4_fw_reset - issue a reset to FW
9398 * @adap: the adapter
9399 * @mbox: mailbox to use for the FW command
9400 * @reset: specifies the type of reset to perform
9401 *
9402 * Issues a reset command of the specified type to FW.
9403 */
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)9404 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
9405 {
9406 struct fw_reset_cmd c;
9407
9408 memset(&c, 0, sizeof(c));
9409 INIT_CMD(c, RESET, WRITE);
9410 c.val = cpu_to_be32(reset);
9411 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9412 }
9413
9414 /**
9415 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
9416 * @adap: the adapter
9417 * @mbox: mailbox to use for the FW RESET command (if desired)
9418 * @force: force uP into RESET even if FW RESET command fails
9419 *
9420 * Issues a RESET command to firmware (if desired) with a HALT indication
9421 * and then puts the microprocessor into RESET state. The RESET command
9422 * will only be issued if a legitimate mailbox is provided (mbox <=
9423 * M_PCIE_FW_MASTER).
9424 *
9425 * This is generally used in order for the host to safely manipulate the
9426 * adapter without fear of conflicting with whatever the firmware might
9427 * be doing. The only way out of this state is to RESTART the firmware
9428 * ...
9429 */
t4_fw_halt(struct adapter * adap,unsigned int mbox,int force)9430 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
9431 {
9432 int ret = 0;
9433
9434 /*
9435 * If a legitimate mailbox is provided, issue a RESET command
9436 * with a HALT indication.
9437 */
9438 if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
9439 struct fw_reset_cmd c;
9440
9441 memset(&c, 0, sizeof(c));
9442 INIT_CMD(c, RESET, WRITE);
9443 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
9444 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
9445 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9446 }
9447
9448 /*
9449 * Normally we won't complete the operation if the firmware RESET
9450 * command fails but if our caller insists we'll go ahead and put the
9451 * uP into RESET. This can be useful if the firmware is hung or even
9452 * missing ... We'll have to take the risk of putting the uP into
9453 * RESET without the cooperation of firmware in that case.
9454 *
9455 * We also force the firmware's HALT flag to be on in case we bypassed
9456 * the firmware RESET command above or we're dealing with old firmware
9457 * which doesn't have the HALT capability. This will serve as a flag
9458 * for the incoming firmware to know that it's coming out of a HALT
9459 * rather than a RESET ... if it's new enough to understand that ...
9460 */
9461 if (ret == 0 || force) {
9462 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
9463 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
9464 F_PCIE_FW_HALT);
9465 }
9466
9467 /*
9468 * And we always return the result of the firmware RESET command
9469 * even when we force the uP into RESET ...
9470 */
9471 return ret;
9472 }
9473
9474 /**
9475 * t4_fw_restart - restart the firmware by taking the uP out of RESET
9476 * @adap: the adapter
9477 *
9478 * Restart firmware previously halted by t4_fw_halt(). On successful
9479 * return the previous PF Master remains as the new PF Master and there
9480 * is no need to issue a new HELLO command, etc.
9481 */
t4_fw_restart(struct adapter * adap,unsigned int mbox)9482 int t4_fw_restart(struct adapter *adap, unsigned int mbox)
9483 {
9484 int ms;
9485
9486 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
9487 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
9488 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
9489 return FW_SUCCESS;
9490 msleep(100);
9491 ms += 100;
9492 }
9493
9494 return -ETIMEDOUT;
9495 }
9496
9497 /**
9498 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
9499 * @adap: the adapter
9500 * @mbox: mailbox to use for the FW RESET command (if desired)
9501 * @fw_data: the firmware image to write
9502 * @size: image size
9503 * @force: force upgrade even if firmware doesn't cooperate
9504 *
9505 * Perform all of the steps necessary for upgrading an adapter's
9506 * firmware image. Normally this requires the cooperation of the
9507 * existing firmware in order to halt all existing activities
9508 * but if an invalid mailbox token is passed in we skip that step
9509 * (though we'll still put the adapter microprocessor into RESET in
9510 * that case).
9511 *
9512 * On successful return the new firmware will have been loaded and
9513 * the adapter will have been fully RESET losing all previous setup
9514 * state. On unsuccessful return the adapter may be completely hosed ...
9515 * positive errno indicates that the adapter is ~probably~ intact, a
9516 * negative errno indicates that things are looking bad ...
9517 */
t4_fw_upgrade(struct adapter * adap,unsigned int mbox,const u8 * fw_data,unsigned int size,int force)9518 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
9519 const u8 *fw_data, unsigned int size, int force)
9520 {
9521 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
9522 unsigned int bootstrap =
9523 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
9524 int ret;
9525
9526 if (!t4_fw_matches_chip(adap, fw_hdr))
9527 return -EINVAL;
9528
9529 if (!bootstrap) {
9530 ret = t4_fw_halt(adap, mbox, force);
9531 if (ret < 0 && !force)
9532 return ret;
9533 }
9534
9535 ret = t4_load_fw(adap, fw_data, size);
9536 if (ret < 0 || bootstrap)
9537 return ret;
9538
9539 return t4_fw_restart(adap, mbox);
9540 }
9541
9542 /**
9543 * t4_fw_initialize - ask FW to initialize the device
9544 * @adap: the adapter
9545 * @mbox: mailbox to use for the FW command
9546 *
9547 * Issues a command to FW to partially initialize the device. This
9548 * performs initialization that generally doesn't depend on user input.
9549 */
t4_fw_initialize(struct adapter * adap,unsigned int mbox)9550 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
9551 {
9552 struct fw_initialize_cmd c;
9553
9554 memset(&c, 0, sizeof(c));
9555 INIT_CMD(c, INITIALIZE, WRITE);
9556 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9557 }
9558
9559 /**
9560 * t4_query_params_rw - query FW or device parameters
9561 * @adap: the adapter
9562 * @mbox: mailbox to use for the FW command
9563 * @pf: the PF
9564 * @vf: the VF
9565 * @nparams: the number of parameters
9566 * @params: the parameter names
9567 * @val: the parameter values
9568 * @rw: Write and read flag
9569 *
9570 * Reads the value of FW or device parameters. Up to 7 parameters can be
9571 * queried at once.
9572 */
t4_query_params_rw(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val,int rw)9573 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
9574 unsigned int vf, unsigned int nparams, const u32 *params,
9575 u32 *val, int rw)
9576 {
9577 int i, ret;
9578 struct fw_params_cmd c;
9579 __be32 *p = &c.param[0].mnem;
9580
9581 if (nparams > 7)
9582 return -EINVAL;
9583
9584 memset(&c, 0, sizeof(c));
9585 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
9586 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9587 V_FW_PARAMS_CMD_PFN(pf) |
9588 V_FW_PARAMS_CMD_VFN(vf));
9589 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
9590
9591 for (i = 0; i < nparams; i++) {
9592 *p++ = cpu_to_be32(*params++);
9593 if (rw)
9594 *p = cpu_to_be32(*(val + i));
9595 p++;
9596 }
9597
9598 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9599
9600 /*
9601 * We always copy back the results, even if there's an error. We'll
9602 * get an error if any of the parameters was unknown to the Firmware,
9603 * but there will be results for the others ... (Older Firmware
9604 * stopped at the first unknown parameter; newer Firmware processes
9605 * them all and flags the unknown parameters with a return value of
9606 * ~0UL.)
9607 */
9608 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
9609 *val++ = be32_to_cpu(*p);
9610
9611 return ret;
9612 }
9613
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)9614 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
9615 unsigned int vf, unsigned int nparams, const u32 *params,
9616 u32 *val)
9617 {
9618 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
9619 }
9620
9621 /**
9622 * t4_set_params_timeout - sets FW or device parameters
9623 * @adap: the adapter
9624 * @mbox: mailbox to use for the FW command
9625 * @pf: the PF
9626 * @vf: the VF
9627 * @nparams: the number of parameters
9628 * @params: the parameter names
9629 * @val: the parameter values
9630 * @timeout: the timeout time
9631 *
9632 * Sets the value of FW or device parameters. Up to 7 parameters can be
9633 * specified at once.
9634 */
t4_set_params_timeout(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val,int timeout)9635 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
9636 unsigned int pf, unsigned int vf,
9637 unsigned int nparams, const u32 *params,
9638 const u32 *val, int timeout)
9639 {
9640 struct fw_params_cmd c;
9641 __be32 *p = &c.param[0].mnem;
9642
9643 if (nparams > 7)
9644 return -EINVAL;
9645
9646 memset(&c, 0, sizeof(c));
9647 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
9648 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9649 V_FW_PARAMS_CMD_PFN(pf) |
9650 V_FW_PARAMS_CMD_VFN(vf));
9651 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
9652
9653 while (nparams--) {
9654 *p++ = cpu_to_be32(*params++);
9655 *p++ = cpu_to_be32(*val++);
9656 }
9657
9658 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
9659 }
9660
9661 /**
9662 * t4_set_params - sets FW or device parameters
9663 * @adap: the adapter
9664 * @mbox: mailbox to use for the FW command
9665 * @pf: the PF
9666 * @vf: the VF
9667 * @nparams: the number of parameters
9668 * @params: the parameter names
9669 * @val: the parameter values
9670 *
9671 * Sets the value of FW or device parameters. Up to 7 parameters can be
9672 * specified at once.
9673 */
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)9674 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
9675 unsigned int vf, unsigned int nparams, const u32 *params,
9676 const u32 *val)
9677 {
9678 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
9679 FW_CMD_MAX_TIMEOUT);
9680 }
9681
9682 /**
9683 * t4_cfg_pfvf - configure PF/VF resource limits
9684 * @adap: the adapter
9685 * @mbox: mailbox to use for the FW command
9686 * @pf: the PF being configured
9687 * @vf: the VF being configured
9688 * @txq: the max number of egress queues
9689 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
9690 * @rxqi: the max number of interrupt-capable ingress queues
9691 * @rxq: the max number of interruptless ingress queues
9692 * @tc: the PCI traffic class
9693 * @vi: the max number of virtual interfaces
9694 * @cmask: the channel access rights mask for the PF/VF
9695 * @pmask: the port access rights mask for the PF/VF
9696 * @nexact: the maximum number of exact MPS filters
9697 * @rcaps: read capabilities
9698 * @wxcaps: write/execute capabilities
9699 *
9700 * Configures resource limits and capabilities for a physical or virtual
9701 * function.
9702 */
t4_cfg_pfvf(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int txq,unsigned int txq_eth_ctrl,unsigned int rxqi,unsigned int rxq,unsigned int tc,unsigned int vi,unsigned int cmask,unsigned int pmask,unsigned int nexact,unsigned int rcaps,unsigned int wxcaps)9703 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
9704 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
9705 unsigned int rxqi, unsigned int rxq, unsigned int tc,
9706 unsigned int vi, unsigned int cmask, unsigned int pmask,
9707 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
9708 {
9709 struct fw_pfvf_cmd c;
9710
9711 memset(&c, 0, sizeof(c));
9712 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
9713 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
9714 V_FW_PFVF_CMD_VFN(vf));
9715 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
9716 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
9717 V_FW_PFVF_CMD_NIQ(rxq));
9718 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
9719 V_FW_PFVF_CMD_PMASK(pmask) |
9720 V_FW_PFVF_CMD_NEQ(txq));
9721 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
9722 V_FW_PFVF_CMD_NVI(vi) |
9723 V_FW_PFVF_CMD_NEXACTF(nexact));
9724 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
9725 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
9726 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
9727 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9728 }
9729
9730 /**
9731 * t4_alloc_vi_func - allocate a virtual interface
9732 * @adap: the adapter
9733 * @mbox: mailbox to use for the FW command
9734 * @port: physical port associated with the VI
9735 * @pf: the PF owning the VI
9736 * @vf: the VF owning the VI
9737 * @nmac: number of MAC addresses needed (1 to 5)
9738 * @mac: the MAC addresses of the VI
9739 * @rss_size: size of RSS table slice associated with this VI
9740 * @portfunc: which Port Application Function MAC Address is desired
9741 * @idstype: Intrusion Detection Type
9742 *
9743 * Allocates a virtual interface for the given physical port. If @mac is
9744 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
9745 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
9746 * @mac should be large enough to hold @nmac Ethernet addresses, they are
9747 * stored consecutively so the space needed is @nmac * 6 bytes.
9748 * Returns a negative error number or the non-negative VI id.
9749 */
t4_alloc_vi_func(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,u16 * rss_size,uint8_t * vfvld,uint16_t * vin,unsigned int portfunc,unsigned int idstype)9750 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
9751 unsigned int port, unsigned int pf, unsigned int vf,
9752 unsigned int nmac, u8 *mac, u16 *rss_size,
9753 uint8_t *vfvld, uint16_t *vin,
9754 unsigned int portfunc, unsigned int idstype)
9755 {
9756 int ret;
9757 struct fw_vi_cmd c;
9758
9759 memset(&c, 0, sizeof(c));
9760 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
9761 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
9762 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
9763 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
9764 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
9765 V_FW_VI_CMD_FUNC(portfunc));
9766 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
9767 c.nmac = nmac - 1;
9768 if(!rss_size)
9769 c.norss_rsssize = F_FW_VI_CMD_NORSS;
9770
9771 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9772 if (ret)
9773 return ret;
9774 ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
9775
9776 if (mac) {
9777 memcpy(mac, c.mac, sizeof(c.mac));
9778 switch (nmac) {
9779 case 5:
9780 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
9781 case 4:
9782 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
9783 case 3:
9784 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
9785 case 2:
9786 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
9787 }
9788 }
9789 if (rss_size)
9790 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
9791 if (vfvld) {
9792 *vfvld = adap->params.viid_smt_extn_support ?
9793 G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) :
9794 G_FW_VIID_VIVLD(ret);
9795 }
9796 if (vin) {
9797 *vin = adap->params.viid_smt_extn_support ?
9798 G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) :
9799 G_FW_VIID_VIN(ret);
9800 }
9801
9802 return ret;
9803 }
9804
9805 /**
9806 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
9807 * @adap: the adapter
9808 * @mbox: mailbox to use for the FW command
9809 * @port: physical port associated with the VI
9810 * @pf: the PF owning the VI
9811 * @vf: the VF owning the VI
9812 * @nmac: number of MAC addresses needed (1 to 5)
9813 * @mac: the MAC addresses of the VI
9814 * @rss_size: size of RSS table slice associated with this VI
9815 *
9816 * backwards compatible and convieniance routine to allocate a Virtual
9817 * Interface with a Ethernet Port Application Function and Intrustion
9818 * Detection System disabled.
9819 */
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,u16 * rss_size,uint8_t * vfvld,uint16_t * vin)9820 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
9821 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
9822 u16 *rss_size, uint8_t *vfvld, uint16_t *vin)
9823 {
9824 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
9825 vfvld, vin, FW_VI_FUNC_ETH, 0);
9826 }
9827
9828 /**
9829 * t4_free_vi - free a virtual interface
9830 * @adap: the adapter
9831 * @mbox: mailbox to use for the FW command
9832 * @pf: the PF owning the VI
9833 * @vf: the VF owning the VI
9834 * @viid: virtual interface identifiler
9835 *
9836 * Free a previously allocated virtual interface.
9837 */
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)9838 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
9839 unsigned int vf, unsigned int viid)
9840 {
9841 struct fw_vi_cmd c;
9842
9843 memset(&c, 0, sizeof(c));
9844 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
9845 F_FW_CMD_REQUEST |
9846 F_FW_CMD_EXEC |
9847 V_FW_VI_CMD_PFN(pf) |
9848 V_FW_VI_CMD_VFN(vf));
9849 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
9850 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
9851
9852 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9853 }
9854
9855 /**
9856 * t4_set_rxmode - set Rx properties of a virtual interface
9857 * @adap: the adapter
9858 * @mbox: mailbox to use for the FW command
9859 * @viid: the VI id
9860 * @mtu: the new MTU or -1
9861 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
9862 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
9863 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
9864 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
9865 * @sleep_ok: if true we may sleep while awaiting command completion
9866 *
9867 * Sets Rx properties of a virtual interface.
9868 */
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)9869 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
9870 int mtu, int promisc, int all_multi, int bcast, int vlanex,
9871 bool sleep_ok)
9872 {
9873 struct fw_vi_rxmode_cmd c;
9874
9875 /* convert to FW values */
9876 if (mtu < 0)
9877 mtu = M_FW_VI_RXMODE_CMD_MTU;
9878 if (promisc < 0)
9879 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
9880 if (all_multi < 0)
9881 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
9882 if (bcast < 0)
9883 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
9884 if (vlanex < 0)
9885 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
9886
9887 memset(&c, 0, sizeof(c));
9888 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
9889 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9890 V_FW_VI_RXMODE_CMD_VIID(viid));
9891 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
9892 c.mtu_to_vlanexen =
9893 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
9894 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
9895 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
9896 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
9897 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
9898 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
9899 }
9900
9901 /**
9902 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
9903 * @adap: the adapter
9904 * @viid: the VI id
9905 * @mac: the MAC address
9906 * @mask: the mask
9907 * @vni: the VNI id for the tunnel protocol
9908 * @vni_mask: mask for the VNI id
9909 * @dip_hit: to enable DIP match for the MPS entry
9910 * @lookup_type: MAC address for inner (1) or outer (0) header
9911 * @sleep_ok: call is allowed to sleep
9912 *
9913 * Allocates an MPS entry with specified MAC address and VNI value.
9914 *
9915 * Returns a negative error number or the allocated index for this mac.
9916 */
t4_alloc_encap_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int vni,unsigned int vni_mask,u8 dip_hit,u8 lookup_type,bool sleep_ok)9917 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
9918 const u8 *addr, const u8 *mask, unsigned int vni,
9919 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
9920 bool sleep_ok)
9921 {
9922 struct fw_vi_mac_cmd c;
9923 struct fw_vi_mac_vni *p = c.u.exact_vni;
9924 int ret = 0;
9925 u32 val;
9926
9927 memset(&c, 0, sizeof(c));
9928 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9929 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9930 V_FW_VI_MAC_CMD_VIID(viid));
9931 val = V_FW_CMD_LEN16(1) |
9932 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
9933 c.freemacs_to_len16 = cpu_to_be32(val);
9934 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
9935 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
9936 memcpy(p->macaddr, addr, sizeof(p->macaddr));
9937 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
9938
9939 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
9940 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
9941 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
9942 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
9943
9944 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
9945 if (ret == 0)
9946 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
9947 return ret;
9948 }
9949
9950 /**
9951 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
9952 * @adap: the adapter
9953 * @viid: the VI id
9954 * @mac: the MAC address
9955 * @mask: the mask
9956 * @idx: index at which to add this entry
9957 * @port_id: the port index
9958 * @lookup_type: MAC address for inner (1) or outer (0) header
9959 * @sleep_ok: call is allowed to sleep
9960 *
9961 * Adds the mac entry at the specified index using raw mac interface.
9962 *
9963 * Returns a negative error number or the allocated index for this mac.
9964 */
t4_alloc_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)9965 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
9966 const u8 *addr, const u8 *mask, unsigned int idx,
9967 u8 lookup_type, u8 port_id, bool sleep_ok)
9968 {
9969 int ret = 0;
9970 struct fw_vi_mac_cmd c;
9971 struct fw_vi_mac_raw *p = &c.u.raw;
9972 u32 val;
9973
9974 memset(&c, 0, sizeof(c));
9975 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9976 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9977 V_FW_VI_MAC_CMD_VIID(viid));
9978 val = V_FW_CMD_LEN16(1) |
9979 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
9980 c.freemacs_to_len16 = cpu_to_be32(val);
9981
9982 /* Specify that this is an inner mac address */
9983 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
9984
9985 /* Lookup Type. Outer header: 0, Inner header: 1 */
9986 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
9987 V_DATAPORTNUM(port_id));
9988 /* Lookup mask and port mask */
9989 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
9990 V_DATAPORTNUM(M_DATAPORTNUM));
9991
9992 /* Copy the address and the mask */
9993 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
9994 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
9995
9996 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
9997 if (ret == 0) {
9998 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
9999 if (ret != idx)
10000 ret = -ENOMEM;
10001 }
10002
10003 return ret;
10004 }
10005
10006 /**
10007 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
10008 * @adap: the adapter
10009 * @mbox: mailbox to use for the FW command
10010 * @viid: the VI id
10011 * @free: if true any existing filters for this VI id are first removed
10012 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
10013 * @addr: the MAC address(es)
10014 * @idx: where to store the index of each allocated filter
10015 * @hash: pointer to hash address filter bitmap
10016 * @sleep_ok: call is allowed to sleep
10017 *
10018 * Allocates an exact-match filter for each of the supplied addresses and
10019 * sets it to the corresponding address. If @idx is not %NULL it should
10020 * have at least @naddr entries, each of which will be set to the index of
10021 * the filter allocated for the corresponding MAC address. If a filter
10022 * could not be allocated for an address its index is set to 0xffff.
10023 * If @hash is not %NULL addresses that fail to allocate an exact filter
10024 * are hashed and update the hash filter bitmap pointed at by @hash.
10025 *
10026 * Returns a negative error number or the number of filters allocated.
10027 */
t4_alloc_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)10028 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
10029 unsigned int viid, bool free, unsigned int naddr,
10030 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
10031 {
10032 int offset, ret = 0;
10033 struct fw_vi_mac_cmd c;
10034 unsigned int nfilters = 0;
10035 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
10036 unsigned int rem = naddr;
10037
10038 if (naddr > max_naddr)
10039 return -EINVAL;
10040
10041 for (offset = 0; offset < naddr ; /**/) {
10042 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
10043 ? rem
10044 : ARRAY_SIZE(c.u.exact));
10045 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
10046 u.exact[fw_naddr]), 16);
10047 struct fw_vi_mac_exact *p;
10048 int i;
10049
10050 memset(&c, 0, sizeof(c));
10051 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
10052 F_FW_CMD_REQUEST |
10053 F_FW_CMD_WRITE |
10054 V_FW_CMD_EXEC(free) |
10055 V_FW_VI_MAC_CMD_VIID(viid));
10056 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
10057 V_FW_CMD_LEN16(len16));
10058
10059 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
10060 p->valid_to_idx =
10061 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
10062 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
10063 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
10064 }
10065
10066 /*
10067 * It's okay if we run out of space in our MAC address arena.
10068 * Some of the addresses we submit may get stored so we need
10069 * to run through the reply to see what the results were ...
10070 */
10071 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
10072 if (ret && ret != -FW_ENOMEM)
10073 break;
10074
10075 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
10076 u16 index = G_FW_VI_MAC_CMD_IDX(
10077 be16_to_cpu(p->valid_to_idx));
10078
10079 if (idx)
10080 idx[offset+i] = (index >= max_naddr
10081 ? 0xffff
10082 : index);
10083 if (index < max_naddr)
10084 nfilters++;
10085 else if (hash)
10086 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
10087 }
10088
10089 free = false;
10090 offset += fw_naddr;
10091 rem -= fw_naddr;
10092 }
10093
10094 if (ret == 0 || ret == -FW_ENOMEM)
10095 ret = nfilters;
10096 return ret;
10097 }
10098
10099 /**
10100 * t4_free_encap_mac_filt - frees MPS entry at given index
10101 * @adap: the adapter
10102 * @viid: the VI id
10103 * @idx: index of MPS entry to be freed
10104 * @sleep_ok: call is allowed to sleep
10105 *
10106 * Frees the MPS entry at supplied index
10107 *
10108 * Returns a negative error number or zero on success
10109 */
t4_free_encap_mac_filt(struct adapter * adap,unsigned int viid,int idx,bool sleep_ok)10110 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
10111 int idx, bool sleep_ok)
10112 {
10113 struct fw_vi_mac_exact *p;
10114 struct fw_vi_mac_cmd c;
10115 u8 addr[] = {0,0,0,0,0,0};
10116 int ret = 0;
10117 u32 exact;
10118
10119 memset(&c, 0, sizeof(c));
10120 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
10121 F_FW_CMD_REQUEST |
10122 F_FW_CMD_WRITE |
10123 V_FW_CMD_EXEC(0) |
10124 V_FW_VI_MAC_CMD_VIID(viid));
10125 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
10126 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
10127 exact |
10128 V_FW_CMD_LEN16(1));
10129 p = c.u.exact;
10130 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
10131 V_FW_VI_MAC_CMD_IDX(idx));
10132 memcpy(p->macaddr, addr, sizeof(p->macaddr));
10133
10134 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
10135 return ret;
10136 }
10137
10138 /**
10139 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
10140 * @adap: the adapter
10141 * @viid: the VI id
10142 * @addr: the MAC address
10143 * @mask: the mask
10144 * @idx: index of the entry in mps tcam
10145 * @lookup_type: MAC address for inner (1) or outer (0) header
10146 * @port_id: the port index
10147 * @sleep_ok: call is allowed to sleep
10148 *
10149 * Removes the mac entry at the specified index using raw mac interface.
10150 *
10151 * Returns a negative error number on failure.
10152 */
t4_free_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)10153 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
10154 const u8 *addr, const u8 *mask, unsigned int idx,
10155 u8 lookup_type, u8 port_id, bool sleep_ok)
10156 {
10157 struct fw_vi_mac_cmd c;
10158 struct fw_vi_mac_raw *p = &c.u.raw;
10159 u32 raw;
10160
10161 memset(&c, 0, sizeof(c));
10162 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
10163 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
10164 V_FW_CMD_EXEC(0) |
10165 V_FW_VI_MAC_CMD_VIID(viid));
10166 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
10167 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
10168 raw |
10169 V_FW_CMD_LEN16(1));
10170
10171 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
10172 FW_VI_MAC_ID_BASED_FREE);
10173
10174 /* Lookup Type. Outer header: 0, Inner header: 1 */
10175 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
10176 V_DATAPORTNUM(port_id));
10177 /* Lookup mask and port mask */
10178 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
10179 V_DATAPORTNUM(M_DATAPORTNUM));
10180
10181 /* Copy the address and the mask */
10182 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
10183 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
10184
10185 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
10186 }
10187
10188 /**
10189 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
10190 * @adap: the adapter
10191 * @mbox: mailbox to use for the FW command
10192 * @viid: the VI id
10193 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
10194 * @addr: the MAC address(es)
10195 * @sleep_ok: call is allowed to sleep
10196 *
10197 * Frees the exact-match filter for each of the supplied addresses
10198 *
10199 * Returns a negative error number or the number of filters freed.
10200 */
t4_free_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)10201 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
10202 unsigned int viid, unsigned int naddr,
10203 const u8 **addr, bool sleep_ok)
10204 {
10205 int offset, ret = 0;
10206 struct fw_vi_mac_cmd c;
10207 unsigned int nfilters = 0;
10208 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
10209 unsigned int rem = naddr;
10210
10211 if (naddr > max_naddr)
10212 return -EINVAL;
10213
10214 for (offset = 0; offset < (int)naddr ; /**/) {
10215 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
10216 ? rem
10217 : ARRAY_SIZE(c.u.exact));
10218 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
10219 u.exact[fw_naddr]), 16);
10220 struct fw_vi_mac_exact *p;
10221 int i;
10222
10223 memset(&c, 0, sizeof(c));
10224 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
10225 F_FW_CMD_REQUEST |
10226 F_FW_CMD_WRITE |
10227 V_FW_CMD_EXEC(0) |
10228 V_FW_VI_MAC_CMD_VIID(viid));
10229 c.freemacs_to_len16 =
10230 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
10231 V_FW_CMD_LEN16(len16));
10232
10233 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
10234 p->valid_to_idx = cpu_to_be16(
10235 F_FW_VI_MAC_CMD_VALID |
10236 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
10237 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
10238 }
10239
10240 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
10241 if (ret)
10242 break;
10243
10244 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
10245 u16 index = G_FW_VI_MAC_CMD_IDX(
10246 be16_to_cpu(p->valid_to_idx));
10247
10248 if (index < max_naddr)
10249 nfilters++;
10250 }
10251
10252 offset += fw_naddr;
10253 rem -= fw_naddr;
10254 }
10255
10256 if (ret == 0)
10257 ret = nfilters;
10258 return ret;
10259 }
10260
10261 /**
10262 * t4_change_mac - modifies the exact-match filter for a MAC address
10263 * @adap: the adapter
10264 * @mbox: mailbox to use for the FW command
10265 * @viid: the VI id
10266 * @idx: index of existing filter for old value of MAC address, or -1
10267 * @addr: the new MAC address value
10268 * @persist: whether a new MAC allocation should be persistent
10269 * @smt_idx: add MAC to SMT and return its index, or NULL
10270 *
10271 * Modifies an exact-match filter and sets it to the new MAC address if
10272 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
10273 * latter case the address is added persistently if @persist is %true.
10274 *
10275 * Note that in general it is not possible to modify the value of a given
10276 * filter so the generic way to modify an address filter is to free the one
10277 * being used by the old address value and allocate a new filter for the
10278 * new address value.
10279 *
10280 * Returns a negative error number or the index of the filter with the new
10281 * MAC value. Note that this index may differ from @idx.
10282 */
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,uint16_t * smt_idx)10283 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
10284 int idx, const u8 *addr, bool persist, uint16_t *smt_idx)
10285 {
10286 int ret, mode;
10287 struct fw_vi_mac_cmd c;
10288 struct fw_vi_mac_exact *p = c.u.exact;
10289 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
10290
10291 if (idx < 0) /* new allocation */
10292 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
10293 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
10294
10295 memset(&c, 0, sizeof(c));
10296 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
10297 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
10298 V_FW_VI_MAC_CMD_VIID(viid));
10299 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
10300 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
10301 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
10302 V_FW_VI_MAC_CMD_IDX(idx));
10303 memcpy(p->macaddr, addr, sizeof(p->macaddr));
10304
10305 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10306 if (ret == 0) {
10307 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
10308 if (ret >= max_mac_addr)
10309 ret = -ENOMEM;
10310 if (smt_idx) {
10311 if (adap->params.viid_smt_extn_support)
10312 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
10313 else {
10314 if (chip_id(adap) <= CHELSIO_T5)
10315 *smt_idx = (viid & M_FW_VIID_VIN) << 1;
10316 else
10317 *smt_idx = viid & M_FW_VIID_VIN;
10318 }
10319 }
10320 }
10321 return ret;
10322 }
10323
10324 /**
10325 * t4_set_addr_hash - program the MAC inexact-match hash filter
10326 * @adap: the adapter
10327 * @mbox: mailbox to use for the FW command
10328 * @viid: the VI id
10329 * @ucast: whether the hash filter should also match unicast addresses
10330 * @vec: the value to be written to the hash filter
10331 * @sleep_ok: call is allowed to sleep
10332 *
10333 * Sets the 64-bit inexact-match hash filter for a virtual interface.
10334 */
t4_set_addr_hash(struct adapter * adap,unsigned int mbox,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)10335 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
10336 bool ucast, u64 vec, bool sleep_ok)
10337 {
10338 struct fw_vi_mac_cmd c;
10339 u32 val;
10340
10341 memset(&c, 0, sizeof(c));
10342 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
10343 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
10344 V_FW_VI_ENABLE_CMD_VIID(viid));
10345 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
10346 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
10347 c.freemacs_to_len16 = cpu_to_be32(val);
10348 c.u.hash.hashvec = cpu_to_be64(vec);
10349 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
10350 }
10351
10352 /**
10353 * t4_enable_vi_params - enable/disable a virtual interface
10354 * @adap: the adapter
10355 * @mbox: mailbox to use for the FW command
10356 * @viid: the VI id
10357 * @rx_en: 1=enable Rx, 0=disable Rx
10358 * @tx_en: 1=enable Tx, 0=disable Tx
10359 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
10360 *
10361 * Enables/disables a virtual interface. Note that setting DCB Enable
10362 * only makes sense when enabling a Virtual Interface ...
10363 */
t4_enable_vi_params(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en,bool dcb_en)10364 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
10365 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
10366 {
10367 struct fw_vi_enable_cmd c;
10368
10369 memset(&c, 0, sizeof(c));
10370 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
10371 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
10372 V_FW_VI_ENABLE_CMD_VIID(viid));
10373 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
10374 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
10375 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
10376 FW_LEN16(c));
10377 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
10378 }
10379
10380 /**
10381 * t4_enable_vi - enable/disable a virtual interface
10382 * @adap: the adapter
10383 * @mbox: mailbox to use for the FW command
10384 * @viid: the VI id
10385 * @rx_en: 1=enable Rx, 0=disable Rx
10386 * @tx_en: 1=enable Tx, 0=disable Tx
10387 *
10388 * Enables/disables a virtual interface. Note that setting DCB Enable
10389 * only makes sense when enabling a Virtual Interface ...
10390 */
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)10391 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
10392 bool rx_en, bool tx_en)
10393 {
10394 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
10395 }
10396
10397 /**
10398 * t4_identify_port - identify a VI's port by blinking its LED
10399 * @adap: the adapter
10400 * @mbox: mailbox to use for the FW command
10401 * @viid: the VI id
10402 * @nblinks: how many times to blink LED at 2.5 Hz
10403 *
10404 * Identifies a VI's port by blinking its LED.
10405 */
t4_identify_port(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int nblinks)10406 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
10407 unsigned int nblinks)
10408 {
10409 struct fw_vi_enable_cmd c;
10410
10411 memset(&c, 0, sizeof(c));
10412 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
10413 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
10414 V_FW_VI_ENABLE_CMD_VIID(viid));
10415 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
10416 c.blinkdur = cpu_to_be16(nblinks);
10417 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
10418 }
10419
10420 /**
10421 * t4_iq_stop - stop an ingress queue and its FLs
10422 * @adap: the adapter
10423 * @mbox: mailbox to use for the FW command
10424 * @pf: the PF owning the queues
10425 * @vf: the VF owning the queues
10426 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
10427 * @iqid: ingress queue id
10428 * @fl0id: FL0 queue id or 0xffff if no attached FL0
10429 * @fl1id: FL1 queue id or 0xffff if no attached FL1
10430 *
10431 * Stops an ingress queue and its associated FLs, if any. This causes
10432 * any current or future data/messages destined for these queues to be
10433 * tossed.
10434 */
t4_iq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)10435 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
10436 unsigned int vf, unsigned int iqtype, unsigned int iqid,
10437 unsigned int fl0id, unsigned int fl1id)
10438 {
10439 struct fw_iq_cmd c;
10440
10441 memset(&c, 0, sizeof(c));
10442 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
10443 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
10444 V_FW_IQ_CMD_VFN(vf));
10445 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
10446 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
10447 c.iqid = cpu_to_be16(iqid);
10448 c.fl0id = cpu_to_be16(fl0id);
10449 c.fl1id = cpu_to_be16(fl1id);
10450 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
10451 }
10452
10453 /**
10454 * t4_iq_free - free an ingress queue and its FLs
10455 * @adap: the adapter
10456 * @mbox: mailbox to use for the FW command
10457 * @pf: the PF owning the queues
10458 * @vf: the VF owning the queues
10459 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
10460 * @iqid: ingress queue id
10461 * @fl0id: FL0 queue id or 0xffff if no attached FL0
10462 * @fl1id: FL1 queue id or 0xffff if no attached FL1
10463 *
10464 * Frees an ingress queue and its associated FLs, if any.
10465 */
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)10466 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
10467 unsigned int vf, unsigned int iqtype, unsigned int iqid,
10468 unsigned int fl0id, unsigned int fl1id)
10469 {
10470 struct fw_iq_cmd c;
10471
10472 memset(&c, 0, sizeof(c));
10473 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
10474 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
10475 V_FW_IQ_CMD_VFN(vf));
10476 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
10477 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
10478 c.iqid = cpu_to_be16(iqid);
10479 c.fl0id = cpu_to_be16(fl0id);
10480 c.fl1id = cpu_to_be16(fl1id);
10481 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
10482 }
10483
10484 /**
10485 * t4_eth_eq_stop - stop an Ethernet egress queue
10486 * @adap: the adapter
10487 * @mbox: mailbox to use for the FW command
10488 * @pf: the PF owning the queues
10489 * @vf: the VF owning the queues
10490 * @eqid: egress queue id
10491 *
10492 * Stops an Ethernet egress queue. The queue can be reinitialized or
10493 * freed but is not otherwise functional after this call.
10494 */
t4_eth_eq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)10495 int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
10496 unsigned int vf, unsigned int eqid)
10497 {
10498 struct fw_eq_eth_cmd c;
10499
10500 memset(&c, 0, sizeof(c));
10501 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
10502 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
10503 V_FW_EQ_ETH_CMD_PFN(pf) |
10504 V_FW_EQ_ETH_CMD_VFN(vf));
10505 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_EQSTOP | FW_LEN16(c));
10506 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
10507 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
10508 }
10509
10510 /**
10511 * t4_eth_eq_free - free an Ethernet egress queue
10512 * @adap: the adapter
10513 * @mbox: mailbox to use for the FW command
10514 * @pf: the PF owning the queue
10515 * @vf: the VF owning the queue
10516 * @eqid: egress queue id
10517 *
10518 * Frees an Ethernet egress queue.
10519 */
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)10520 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
10521 unsigned int vf, unsigned int eqid)
10522 {
10523 struct fw_eq_eth_cmd c;
10524
10525 memset(&c, 0, sizeof(c));
10526 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
10527 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
10528 V_FW_EQ_ETH_CMD_PFN(pf) |
10529 V_FW_EQ_ETH_CMD_VFN(vf));
10530 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
10531 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
10532 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
10533 }
10534
10535 /**
10536 * t4_ctrl_eq_free - free a control egress queue
10537 * @adap: the adapter
10538 * @mbox: mailbox to use for the FW command
10539 * @pf: the PF owning the queue
10540 * @vf: the VF owning the queue
10541 * @eqid: egress queue id
10542 *
10543 * Frees a control egress queue.
10544 */
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)10545 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
10546 unsigned int vf, unsigned int eqid)
10547 {
10548 struct fw_eq_ctrl_cmd c;
10549
10550 memset(&c, 0, sizeof(c));
10551 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
10552 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
10553 V_FW_EQ_CTRL_CMD_PFN(pf) |
10554 V_FW_EQ_CTRL_CMD_VFN(vf));
10555 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
10556 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
10557 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
10558 }
10559
10560 /**
10561 * t4_ofld_eq_free - free an offload egress queue
10562 * @adap: the adapter
10563 * @mbox: mailbox to use for the FW command
10564 * @pf: the PF owning the queue
10565 * @vf: the VF owning the queue
10566 * @eqid: egress queue id
10567 *
10568 * Frees a control egress queue.
10569 */
t4_ofld_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)10570 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
10571 unsigned int vf, unsigned int eqid)
10572 {
10573 struct fw_eq_ofld_cmd c;
10574
10575 memset(&c, 0, sizeof(c));
10576 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
10577 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
10578 V_FW_EQ_OFLD_CMD_PFN(pf) |
10579 V_FW_EQ_OFLD_CMD_VFN(vf));
10580 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
10581 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
10582 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
10583 }
10584
10585 /**
10586 * t4_link_down_rc_str - return a string for a Link Down Reason Code
10587 * @link_down_rc: Link Down Reason Code
10588 *
10589 * Returns a string representation of the Link Down Reason Code.
10590 */
t4_link_down_rc_str(unsigned char link_down_rc)10591 const char *t4_link_down_rc_str(unsigned char link_down_rc)
10592 {
10593 static const char *reason[] = {
10594 "Link Down",
10595 "Remote Fault",
10596 "Auto-negotiation Failure",
10597 "Reserved3",
10598 "Insufficient Airflow",
10599 "Unable To Determine Reason",
10600 "No RX Signal Detected",
10601 "Reserved7",
10602 };
10603
10604 if (link_down_rc >= ARRAY_SIZE(reason))
10605 return "Bad Reason Code";
10606
10607 return reason[link_down_rc];
10608 }
10609
10610 /*
10611 * Return the highest speed set in the port capabilities, in Mb/s.
10612 */
fwcap_to_speed(uint32_t caps)10613 unsigned int fwcap_to_speed(uint32_t caps)
10614 {
10615 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
10616 do { \
10617 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
10618 return __speed; \
10619 } while (0)
10620
10621 TEST_SPEED_RETURN(400G, 400000);
10622 TEST_SPEED_RETURN(200G, 200000);
10623 TEST_SPEED_RETURN(100G, 100000);
10624 TEST_SPEED_RETURN(50G, 50000);
10625 TEST_SPEED_RETURN(40G, 40000);
10626 TEST_SPEED_RETURN(25G, 25000);
10627 TEST_SPEED_RETURN(10G, 10000);
10628 TEST_SPEED_RETURN(1G, 1000);
10629 TEST_SPEED_RETURN(100M, 100);
10630
10631 #undef TEST_SPEED_RETURN
10632
10633 return 0;
10634 }
10635
10636 /*
10637 * Return the port capabilities bit for the given speed, which is in Mb/s.
10638 */
speed_to_fwcap(unsigned int speed)10639 uint32_t speed_to_fwcap(unsigned int speed)
10640 {
10641 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
10642 do { \
10643 if (speed == __speed) \
10644 return FW_PORT_CAP32_SPEED_##__caps_speed; \
10645 } while (0)
10646
10647 TEST_SPEED_RETURN(400G, 400000);
10648 TEST_SPEED_RETURN(200G, 200000);
10649 TEST_SPEED_RETURN(100G, 100000);
10650 TEST_SPEED_RETURN(50G, 50000);
10651 TEST_SPEED_RETURN(40G, 40000);
10652 TEST_SPEED_RETURN(25G, 25000);
10653 TEST_SPEED_RETURN(10G, 10000);
10654 TEST_SPEED_RETURN(1G, 1000);
10655 TEST_SPEED_RETURN(100M, 100);
10656
10657 #undef TEST_SPEED_RETURN
10658
10659 return 0;
10660 }
10661
10662 /*
10663 * Return the port capabilities bit for the highest speed in the capabilities.
10664 */
fwcap_top_speed(uint32_t caps)10665 uint32_t fwcap_top_speed(uint32_t caps)
10666 {
10667 #define TEST_SPEED_RETURN(__caps_speed) \
10668 do { \
10669 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
10670 return FW_PORT_CAP32_SPEED_##__caps_speed; \
10671 } while (0)
10672
10673 TEST_SPEED_RETURN(400G);
10674 TEST_SPEED_RETURN(200G);
10675 TEST_SPEED_RETURN(100G);
10676 TEST_SPEED_RETURN(50G);
10677 TEST_SPEED_RETURN(40G);
10678 TEST_SPEED_RETURN(25G);
10679 TEST_SPEED_RETURN(10G);
10680 TEST_SPEED_RETURN(1G);
10681 TEST_SPEED_RETURN(100M);
10682
10683 #undef TEST_SPEED_RETURN
10684
10685 return 0;
10686 }
10687
10688 /**
10689 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
10690 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
10691 *
10692 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
10693 * 32-bit Port Capabilities value.
10694 */
lstatus_to_fwcap(u32 lstatus)10695 static uint32_t lstatus_to_fwcap(u32 lstatus)
10696 {
10697 uint32_t linkattr = 0;
10698
10699 /*
10700 * Unfortunately the format of the Link Status in the old
10701 * 16-bit Port Information message isn't the same as the
10702 * 16-bit Port Capabilities bitfield used everywhere else ...
10703 */
10704 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
10705 linkattr |= FW_PORT_CAP32_FC_RX;
10706 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
10707 linkattr |= FW_PORT_CAP32_FC_TX;
10708 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
10709 linkattr |= FW_PORT_CAP32_SPEED_100M;
10710 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
10711 linkattr |= FW_PORT_CAP32_SPEED_1G;
10712 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
10713 linkattr |= FW_PORT_CAP32_SPEED_10G;
10714 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
10715 linkattr |= FW_PORT_CAP32_SPEED_25G;
10716 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
10717 linkattr |= FW_PORT_CAP32_SPEED_40G;
10718 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
10719 linkattr |= FW_PORT_CAP32_SPEED_100G;
10720
10721 return linkattr;
10722 }
10723
10724 /*
10725 * Updates all fields owned by the common code in port_info and link_config
10726 * based on information provided by the firmware. Does not touch any
10727 * requested_* field.
10728 */
handle_port_info(struct port_info * pi,const struct fw_port_cmd * p,enum fw_port_action action,bool * mod_changed,bool * link_changed)10729 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
10730 enum fw_port_action action, bool *mod_changed, bool *link_changed)
10731 {
10732 struct link_config old_lc, *lc = &pi->link_cfg;
10733 unsigned char fc;
10734 u32 stat, linkattr;
10735 int old_ptype, old_mtype;
10736
10737 old_ptype = pi->port_type;
10738 old_mtype = pi->mod_type;
10739 old_lc = *lc;
10740 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
10741 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
10742
10743 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
10744 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
10745 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
10746 G_FW_PORT_CMD_MDIOADDR(stat) : -1;
10747
10748 lc->pcaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
10749 lc->acaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
10750 lc->lpacaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
10751 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
10752 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
10753
10754 linkattr = lstatus_to_fwcap(stat);
10755 } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
10756 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
10757
10758 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
10759 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
10760 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
10761 G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
10762
10763 lc->pcaps = be32_to_cpu(p->u.info32.pcaps32);
10764 lc->acaps = be32_to_cpu(p->u.info32.acaps32);
10765 lc->lpacaps = be32_to_cpu(p->u.info32.lpacaps32);
10766 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
10767 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
10768
10769 linkattr = be32_to_cpu(p->u.info32.linkattr32);
10770 } else {
10771 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
10772 return;
10773 }
10774
10775 lc->speed = fwcap_to_speed(linkattr);
10776 lc->fec = fwcap_to_fec(linkattr, true);
10777
10778 fc = 0;
10779 if (linkattr & FW_PORT_CAP32_FC_RX)
10780 fc |= PAUSE_RX;
10781 if (linkattr & FW_PORT_CAP32_FC_TX)
10782 fc |= PAUSE_TX;
10783 lc->fc = fc;
10784
10785 if (mod_changed != NULL)
10786 *mod_changed = false;
10787 if (link_changed != NULL)
10788 *link_changed = false;
10789 if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
10790 old_lc.pcaps != lc->pcaps) {
10791 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE)
10792 lc->fec_hint = fwcap_to_fec(lc->acaps, true);
10793 if (mod_changed != NULL)
10794 *mod_changed = true;
10795 }
10796 if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
10797 old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
10798 if (link_changed != NULL)
10799 *link_changed = true;
10800 }
10801 }
10802
10803 /**
10804 * t4_update_port_info - retrieve and update port information if changed
10805 * @pi: the port_info
10806 *
10807 * We issue a Get Port Information Command to the Firmware and, if
10808 * successful, we check to see if anything is different from what we
10809 * last recorded and update things accordingly.
10810 */
t4_update_port_info(struct port_info * pi)10811 int t4_update_port_info(struct port_info *pi)
10812 {
10813 struct adapter *sc = pi->adapter;
10814 struct fw_port_cmd cmd;
10815 enum fw_port_action action;
10816 int ret;
10817
10818 memset(&cmd, 0, sizeof(cmd));
10819 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
10820 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10821 V_FW_PORT_CMD_PORTID(pi->hw_port));
10822 action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
10823 FW_PORT_ACTION_GET_PORT_INFO;
10824 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
10825 FW_LEN16(cmd));
10826 ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
10827 if (ret)
10828 return ret;
10829
10830 handle_port_info(pi, &cmd, action, NULL, NULL);
10831 return 0;
10832 }
10833
10834 /**
10835 * t4_handle_fw_rpl - process a FW reply message
10836 * @adap: the adapter
10837 * @rpl: start of the FW message
10838 *
10839 * Processes a FW message, such as link state change messages.
10840 */
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)10841 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
10842 {
10843 u8 opcode = *(const u8 *)rpl;
10844 const struct fw_port_cmd *p = (const void *)rpl;
10845 enum fw_port_action action =
10846 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
10847 bool mod_changed, link_changed;
10848
10849 if (opcode == FW_PORT_CMD &&
10850 (action == FW_PORT_ACTION_GET_PORT_INFO ||
10851 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
10852 /* link/module state change message */
10853 int hw_port = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
10854 int port_id = adap->port_map[hw_port];
10855 struct port_info *pi;
10856
10857 MPASS(port_id >= 0 && port_id < adap->params.nports);
10858 pi = adap->port[port_id];
10859 PORT_LOCK(pi);
10860 handle_port_info(pi, p, action, &mod_changed, &link_changed);
10861 PORT_UNLOCK(pi);
10862 if (mod_changed)
10863 t4_os_portmod_changed(pi);
10864 if (link_changed) {
10865 PORT_LOCK(pi);
10866 t4_os_link_changed(pi);
10867 PORT_UNLOCK(pi);
10868 }
10869 } else {
10870 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
10871 return -EINVAL;
10872 }
10873 return 0;
10874 }
10875
10876 /**
10877 * get_pci_mode - determine a card's PCI mode
10878 * @adapter: the adapter
10879 * @p: where to store the PCI settings
10880 *
10881 * Determines a card's PCI mode and associated parameters, such as speed
10882 * and width.
10883 */
get_pci_mode(struct adapter * adapter,struct pci_params * p)10884 static void get_pci_mode(struct adapter *adapter,
10885 struct pci_params *p)
10886 {
10887 u16 val;
10888 u32 pcie_cap;
10889
10890 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
10891 if (pcie_cap) {
10892 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
10893 p->speed = val & PCI_EXP_LNKSTA_CLS;
10894 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
10895 }
10896 }
10897
10898 struct flash_desc {
10899 u32 vendor_and_model_id;
10900 u32 size_mb;
10901 };
10902
t4_get_flash_params(struct adapter * adapter)10903 int t4_get_flash_params(struct adapter *adapter)
10904 {
10905 /*
10906 * Table for non-standard supported Flash parts. Note, all Flash
10907 * parts must have 64KB sectors.
10908 */
10909 static struct flash_desc supported_flash[] = {
10910 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
10911 };
10912
10913 int ret;
10914 u32 flashid = 0;
10915 unsigned int part, manufacturer;
10916 unsigned int density, size = 0;
10917
10918
10919 /*
10920 * Issue a Read ID Command to the Flash part. We decode supported
10921 * Flash parts and their sizes from this. There's a newer Query
10922 * Command which can retrieve detailed geometry information but many
10923 * Flash parts don't support it.
10924 */
10925 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
10926 if (!ret)
10927 ret = sf1_read(adapter, 3, 0, 1, &flashid);
10928 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
10929 if (ret < 0)
10930 return ret;
10931
10932 /*
10933 * Check to see if it's one of our non-standard supported Flash parts.
10934 */
10935 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
10936 if (supported_flash[part].vendor_and_model_id == flashid) {
10937 adapter->params.sf_size =
10938 supported_flash[part].size_mb;
10939 adapter->params.sf_nsec =
10940 adapter->params.sf_size / SF_SEC_SIZE;
10941 goto found;
10942 }
10943
10944 /*
10945 * Decode Flash part size. The code below looks repetative with
10946 * common encodings, but that's not guaranteed in the JEDEC
10947 * specification for the Read JADEC ID command. The only thing that
10948 * we're guaranteed by the JADEC specification is where the
10949 * Manufacturer ID is in the returned result. After that each
10950 * Manufacturer ~could~ encode things completely differently.
10951 * Note, all Flash parts must have 64KB sectors.
10952 */
10953 manufacturer = flashid & 0xff;
10954 switch (manufacturer) {
10955 case 0x20: /* Micron/Numonix */
10956 /*
10957 * This Density -> Size decoding table is taken from Micron
10958 * Data Sheets.
10959 */
10960 density = (flashid >> 16) & 0xff;
10961 switch (density) {
10962 case 0x14: size = 1 << 20; break; /* 1MB */
10963 case 0x15: size = 1 << 21; break; /* 2MB */
10964 case 0x16: size = 1 << 22; break; /* 4MB */
10965 case 0x17: size = 1 << 23; break; /* 8MB */
10966 case 0x18: size = 1 << 24; break; /* 16MB */
10967 case 0x19: size = 1 << 25; break; /* 32MB */
10968 case 0x20: size = 1 << 26; break; /* 64MB */
10969 case 0x21: size = 1 << 27; break; /* 128MB */
10970 case 0x22: size = 1 << 28; break; /* 256MB */
10971 }
10972 break;
10973
10974 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
10975 /*
10976 * This Density -> Size decoding table is taken from ISSI
10977 * Data Sheets.
10978 */
10979 density = (flashid >> 16) & 0xff;
10980 switch (density) {
10981 case 0x16: size = 1 << 25; break; /* 32MB */
10982 case 0x17: size = 1 << 26; break; /* 64MB */
10983 }
10984 break;
10985
10986 case 0xc2: /* Macronix */
10987 /*
10988 * This Density -> Size decoding table is taken from Macronix
10989 * Data Sheets.
10990 */
10991 density = (flashid >> 16) & 0xff;
10992 switch (density) {
10993 case 0x17: size = 1 << 23; break; /* 8MB */
10994 case 0x18: size = 1 << 24; break; /* 16MB */
10995 }
10996 break;
10997
10998 case 0xef: /* Winbond */
10999 /*
11000 * This Density -> Size decoding table is taken from Winbond
11001 * Data Sheets.
11002 */
11003 density = (flashid >> 16) & 0xff;
11004 switch (density) {
11005 case 0x17: size = 1 << 23; break; /* 8MB */
11006 case 0x18: size = 1 << 24; break; /* 16MB */
11007 }
11008 break;
11009 }
11010
11011 /* If we didn't recognize the FLASH part, that's no real issue: the
11012 * Hardware/Software contract says that Hardware will _*ALWAYS*_ use a
11013 * FLASH part which has 64KB sectors and is at least 4MB or 16MB in
11014 * size, depending on the board.
11015 */
11016 if (size == 0) {
11017 size = chip_id(adapter) >= CHELSIO_T7 ? 16 : 4;
11018 CH_WARN(adapter, "Unknown Flash Part %#x, assuming %uMB\n",
11019 flashid, size);
11020 size <<= 20;
11021 }
11022
11023 /*
11024 * Store decoded Flash size and fall through into vetting code.
11025 */
11026 adapter->params.sf_size = size;
11027 adapter->params.sf_nsec = size / SF_SEC_SIZE;
11028
11029 found:
11030 /*
11031 * We should ~probably~ reject adapters with FLASHes which are too
11032 * small but we have some legacy FPGAs with small FLASHes that we'd
11033 * still like to use. So instead we emit a scary message ...
11034 */
11035 if (adapter->params.sf_size < FLASH_MIN_SIZE)
11036 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
11037 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
11038
11039 return 0;
11040 }
11041
set_pcie_completion_timeout(struct adapter * adapter,u8 range)11042 static void set_pcie_completion_timeout(struct adapter *adapter,
11043 u8 range)
11044 {
11045 u16 val;
11046 u32 pcie_cap;
11047
11048 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
11049 if (pcie_cap) {
11050 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
11051 val &= 0xfff0;
11052 val |= range ;
11053 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
11054 }
11055 }
11056
t4_get_chip_params(int chipid)11057 const struct chip_params *t4_get_chip_params(int chipid)
11058 {
11059 static const struct chip_params chip_params[] = {
11060 {
11061 /* T4 */
11062 .nchan = NCHAN,
11063 .pm_stats_cnt = PM_NSTATS,
11064 .cng_ch_bits_log = 2,
11065 .nsched_cls = 15,
11066 .cim_num_ibq = CIM_NUM_IBQ,
11067 .cim_num_obq = CIM_NUM_OBQ,
11068 .filter_opt_len = FILTER_OPT_LEN,
11069 .filter_num_opt = S_FT_LAST + 1,
11070 .mps_rplc_size = 128,
11071 .vfcount = 128,
11072 .sge_fl_db = F_DBPRIO,
11073 .sge_ctxt_size = SGE_CTXT_SIZE,
11074 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
11075 .rss_nentries = RSS_NENTRIES,
11076 .cim_la_size = CIMLA_SIZE,
11077 },
11078 {
11079 /* T5 */
11080 .nchan = NCHAN,
11081 .pm_stats_cnt = PM_NSTATS,
11082 .cng_ch_bits_log = 2,
11083 .nsched_cls = 16,
11084 .cim_num_ibq = CIM_NUM_IBQ,
11085 .cim_num_obq = CIM_NUM_OBQ_T5,
11086 .filter_opt_len = T5_FILTER_OPT_LEN,
11087 .filter_num_opt = S_FT_LAST + 1,
11088 .mps_rplc_size = 128,
11089 .vfcount = 128,
11090 .sge_fl_db = F_DBPRIO | F_DBTYPE,
11091 .sge_ctxt_size = SGE_CTXT_SIZE,
11092 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
11093 .rss_nentries = RSS_NENTRIES,
11094 .cim_la_size = CIMLA_SIZE,
11095 },
11096 {
11097 /* T6 */
11098 .nchan = T6_NCHAN,
11099 .pm_stats_cnt = T6_PM_NSTATS,
11100 .cng_ch_bits_log = 3,
11101 .nsched_cls = 16,
11102 .cim_num_ibq = CIM_NUM_IBQ,
11103 .cim_num_obq = CIM_NUM_OBQ_T5,
11104 .filter_opt_len = T5_FILTER_OPT_LEN,
11105 .filter_num_opt = S_FT_LAST + 1,
11106 .mps_rplc_size = 256,
11107 .vfcount = 256,
11108 .sge_fl_db = 0,
11109 .sge_ctxt_size = SGE_CTXT_SIZE,
11110 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
11111 .rss_nentries = T6_RSS_NENTRIES,
11112 .cim_la_size = CIMLA_SIZE_T6,
11113 },
11114 {
11115 /* T7 */
11116 .nchan = NCHAN,
11117 .pm_stats_cnt = T6_PM_NSTATS,
11118 .cng_ch_bits_log = 2,
11119 .nsched_cls = 16,
11120 .cim_num_ibq = CIM_NUM_IBQ_T7,
11121 .cim_num_obq = CIM_NUM_OBQ_T7,
11122 .filter_opt_len = T7_FILTER_OPT_LEN,
11123 .filter_num_opt = S_T7_FT_LAST + 1,
11124 .mps_rplc_size = 256,
11125 .vfcount = 256,
11126 .sge_fl_db = 0,
11127 .sge_ctxt_size = SGE_CTXT_SIZE_T7,
11128 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES * 3,
11129 .rss_nentries = T7_RSS_NENTRIES,
11130 .cim_la_size = CIMLA_SIZE_T6,
11131 },
11132 };
11133
11134 chipid -= CHELSIO_T4;
11135 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
11136 return NULL;
11137
11138 return &chip_params[chipid];
11139 }
11140
11141 /**
11142 * t4_prep_adapter - prepare SW and HW for operation
11143 * @adapter: the adapter
11144 * @buf: temporary space of at least VPD_LEN size provided by the caller.
11145 *
11146 * Initialize adapter SW state for the various HW modules, set initial
11147 * values for some adapter tunables, take PHYs out of reset, and
11148 * initialize the MDIO interface.
11149 */
t4_prep_adapter(struct adapter * adapter,u32 * buf)11150 int t4_prep_adapter(struct adapter *adapter, u32 *buf)
11151 {
11152 int ret;
11153 uint16_t device_id;
11154 uint32_t pl_rev;
11155
11156 get_pci_mode(adapter, &adapter->params.pci);
11157
11158 pl_rev = t4_read_reg(adapter, A_PL_REV);
11159 adapter->params.chipid = G_CHIPID(pl_rev);
11160 adapter->params.rev = G_REV(pl_rev);
11161 if (adapter->params.chipid == 0) {
11162 /* T4 did not have chipid in PL_REV (T5 onwards do) */
11163 adapter->params.chipid = CHELSIO_T4;
11164
11165 /* T4A1 chip is not supported */
11166 if (adapter->params.rev == 1) {
11167 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
11168 return -EINVAL;
11169 }
11170 }
11171
11172 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
11173 if (adapter->chip_params == NULL)
11174 return -EINVAL;
11175
11176 adapter->params.pci.vpd_cap_addr =
11177 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
11178
11179 ret = t4_get_flash_params(adapter);
11180 if (ret < 0)
11181 return ret;
11182
11183 /* Cards with real ASICs have the chipid in the PCIe device id */
11184 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
11185 if (device_id >> 12 == chip_id(adapter))
11186 adapter->params.cim_la_size = adapter->chip_params->cim_la_size;
11187 else {
11188 /* FPGA */
11189 adapter->params.fpga = 1;
11190 adapter->params.cim_la_size = 2 * adapter->chip_params->cim_la_size;
11191 }
11192
11193 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
11194 if (ret < 0)
11195 return ret;
11196
11197 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
11198
11199 /*
11200 * Default port and clock for debugging in case we can't reach FW.
11201 */
11202 adapter->params.nports = 1;
11203 adapter->params.portvec = 1;
11204 adapter->params.vpd.cclk = 50000;
11205
11206 /* Set pci completion timeout value to 4 seconds. */
11207 set_pcie_completion_timeout(adapter, 0xd);
11208 return 0;
11209 }
11210
11211 /**
11212 * t4_shutdown_adapter - shut down adapter, host & wire
11213 * @adapter: the adapter
11214 *
11215 * Perform an emergency shutdown of the adapter and stop it from
11216 * continuing any further communication on the ports or DMA to the
11217 * host. This is typically used when the adapter and/or firmware
11218 * have crashed and we want to prevent any further accidental
11219 * communication with the rest of the world. This will also force
11220 * the port Link Status to go down -- if register writes work --
11221 * which should help our peers figure out that we're down.
11222 */
t4_shutdown_adapter(struct adapter * adapter)11223 int t4_shutdown_adapter(struct adapter *adapter)
11224 {
11225 int port;
11226 const bool bt = adapter->bt_map != 0;
11227
11228 t4_intr_disable(adapter);
11229 if (bt)
11230 t4_write_reg(adapter, A_DBG_GPIO_EN, 0xffff0000);
11231 for_each_port(adapter, port) {
11232 u32 a_port_cfg = is_t4(adapter) ?
11233 t4_port_reg(adapter, port, A_XGMAC_PORT_CFG) :
11234 t4_port_reg(adapter, port, A_MAC_PORT_CFG);
11235
11236 t4_write_reg(adapter, a_port_cfg,
11237 t4_read_reg(adapter, a_port_cfg)
11238 & ~V_SIGNAL_DET(1));
11239 if (!bt) {
11240 u32 hss_cfg0 = is_t4(adapter) ?
11241 t4_port_reg(adapter, port, A_XGMAC_PORT_HSS_CFG0) :
11242 t4_port_reg(adapter, port, A_MAC_PORT_HSS_CFG0);
11243 t4_set_reg_field(adapter, hss_cfg0, F_HSSPDWNPLLB |
11244 F_HSSPDWNPLLA | F_HSSPLLBYPB | F_HSSPLLBYPA,
11245 F_HSSPDWNPLLB | F_HSSPDWNPLLA | F_HSSPLLBYPB |
11246 F_HSSPLLBYPA);
11247 }
11248 }
11249 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
11250
11251 return 0;
11252 }
11253
11254 /**
11255 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
11256 * @adapter: the adapter
11257 * @qid: the Queue ID
11258 * @qtype: the Ingress or Egress type for @qid
11259 * @user: true if this request is for a user mode queue
11260 * @pbar2_qoffset: BAR2 Queue Offset
11261 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
11262 *
11263 * Returns the BAR2 SGE Queue Registers information associated with the
11264 * indicated Absolute Queue ID. These are passed back in return value
11265 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
11266 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
11267 *
11268 * This may return an error which indicates that BAR2 SGE Queue
11269 * registers aren't available. If an error is not returned, then the
11270 * following values are returned:
11271 *
11272 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
11273 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
11274 *
11275 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
11276 * require the "Inferred Queue ID" ability may be used. E.g. the
11277 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
11278 * then these "Inferred Queue ID" register may not be used.
11279 */
t4_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,int user,u64 * pbar2_qoffset,unsigned int * pbar2_qid)11280 int t4_bar2_sge_qregs(struct adapter *adapter,
11281 unsigned int qid,
11282 enum t4_bar2_qtype qtype,
11283 int user,
11284 u64 *pbar2_qoffset,
11285 unsigned int *pbar2_qid)
11286 {
11287 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
11288 u64 bar2_page_offset, bar2_qoffset;
11289 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
11290
11291 /* T4 doesn't support BAR2 SGE Queue registers for kernel
11292 * mode queues.
11293 */
11294 if (!user && is_t4(adapter))
11295 return -EINVAL;
11296
11297 /* Get our SGE Page Size parameters.
11298 */
11299 page_shift = adapter->params.sge.page_shift;
11300 page_size = 1 << page_shift;
11301
11302 /* Get the right Queues per Page parameters for our Queue.
11303 */
11304 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
11305 ? adapter->params.sge.eq_s_qpp
11306 : adapter->params.sge.iq_s_qpp);
11307 qpp_mask = (1 << qpp_shift) - 1;
11308
11309 /* Calculate the basics of the BAR2 SGE Queue register area:
11310 * o The BAR2 page the Queue registers will be in.
11311 * o The BAR2 Queue ID.
11312 * o The BAR2 Queue ID Offset into the BAR2 page.
11313 */
11314 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
11315 bar2_qid = qid & qpp_mask;
11316 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
11317
11318 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
11319 * hardware will infer the Absolute Queue ID simply from the writes to
11320 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
11321 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
11322 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
11323 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
11324 * from the BAR2 Page and BAR2 Queue ID.
11325 *
11326 * One important censequence of this is that some BAR2 SGE registers
11327 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
11328 * there. But other registers synthesize the SGE Queue ID purely
11329 * from the writes to the registers -- the Write Combined Doorbell
11330 * Buffer is a good example. These BAR2 SGE Registers are only
11331 * available for those BAR2 SGE Register areas where the SGE Absolute
11332 * Queue ID can be inferred from simple writes.
11333 */
11334 bar2_qoffset = bar2_page_offset;
11335 bar2_qinferred = (bar2_qid_offset < page_size);
11336 if (bar2_qinferred) {
11337 bar2_qoffset += bar2_qid_offset;
11338 bar2_qid = 0;
11339 }
11340
11341 *pbar2_qoffset = bar2_qoffset;
11342 *pbar2_qid = bar2_qid;
11343 return 0;
11344 }
11345
11346 /**
11347 * t4_init_devlog_ncores_params - initialize adap->params.devlog and ncores
11348 * @adap: the adapter
11349 * @fw_attach: whether we can talk to the firmware
11350 */
t4_init_devlog_ncores_params(struct adapter * adap,int fw_attach)11351 int t4_init_devlog_ncores_params(struct adapter *adap, int fw_attach)
11352 {
11353 struct devlog_params *dparams = &adap->params.devlog;
11354 u32 pf_dparams;
11355 unsigned int devlog_meminfo;
11356 struct fw_devlog_cmd devlog_cmd;
11357 int ret;
11358
11359 /* If we're dealing with newer firmware, the Device Log Paramerters
11360 * are stored in a designated register which allows us to access the
11361 * Device Log even if we can't talk to the firmware.
11362 */
11363 pf_dparams =
11364 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
11365 if (pf_dparams && pf_dparams != UINT32_MAX) {
11366 unsigned int nentries, nentries128, ncore_shift;
11367
11368 ncore_shift = (G_PCIE_FW_PF_DEVLOG_COUNT_MSB(pf_dparams) << 1) |
11369 G_PCIE_FW_PF_DEVLOG_COUNT_LSB(pf_dparams);
11370 adap->params.ncores = 1 << ncore_shift;
11371
11372 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
11373 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
11374 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
11375 nentries = (nentries128 + 1) * 128;
11376 dparams->size = nentries * sizeof(struct fw_devlog_e);
11377
11378 return 0;
11379 }
11380
11381 /*
11382 * For any failing returns ...
11383 */
11384 adap->params.ncores = 1;
11385 memset(dparams, 0, sizeof *dparams);
11386
11387 /*
11388 * If we can't talk to the firmware, there's really nothing we can do
11389 * at this point.
11390 */
11391 if (!fw_attach)
11392 return -ENXIO;
11393
11394 /* Otherwise, ask the firmware for it's Device Log Parameters.
11395 */
11396 memset(&devlog_cmd, 0, sizeof devlog_cmd);
11397 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11398 F_FW_CMD_REQUEST | F_FW_CMD_READ);
11399 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11400 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
11401 &devlog_cmd);
11402 if (ret)
11403 return ret;
11404
11405 devlog_meminfo =
11406 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
11407 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
11408 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
11409 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
11410
11411 return 0;
11412 }
11413
11414 /**
11415 * t4_init_sge_params - initialize adap->params.sge
11416 * @adapter: the adapter
11417 *
11418 * Initialize various fields of the adapter's SGE Parameters structure.
11419 */
t4_init_sge_params(struct adapter * adapter)11420 int t4_init_sge_params(struct adapter *adapter)
11421 {
11422 u32 r;
11423 struct sge_params *sp = &adapter->params.sge;
11424 unsigned i, tscale = 1;
11425
11426 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
11427 sp->counter_val[0] = G_THRESHOLD_0(r);
11428 sp->counter_val[1] = G_THRESHOLD_1(r);
11429 sp->counter_val[2] = G_THRESHOLD_2(r);
11430 sp->counter_val[3] = G_THRESHOLD_3(r);
11431
11432 if (chip_id(adapter) >= CHELSIO_T6) {
11433 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
11434 tscale = G_TSCALE(r);
11435 if (tscale == 0)
11436 tscale = 1;
11437 else
11438 tscale += 2;
11439 }
11440
11441 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
11442 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
11443 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
11444 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
11445 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
11446 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
11447 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
11448 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
11449 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
11450
11451 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
11452 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
11453 if (is_t4(adapter))
11454 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
11455 else if (is_t5(adapter))
11456 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
11457 else
11458 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
11459
11460 /* egress queues: log2 of # of doorbells per BAR2 page */
11461 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
11462 r >>= S_QUEUESPERPAGEPF0 +
11463 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
11464 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
11465
11466 /* ingress queues: log2 of # of doorbells per BAR2 page */
11467 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
11468 r >>= S_QUEUESPERPAGEPF0 +
11469 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
11470 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
11471
11472 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
11473 r >>= S_HOSTPAGESIZEPF0 +
11474 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
11475 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
11476
11477 r = t4_read_reg(adapter, A_SGE_CONTROL);
11478 sp->sge_control = r;
11479 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
11480 sp->fl_pktshift = G_PKTSHIFT(r);
11481 if (chip_id(adapter) <= CHELSIO_T5) {
11482 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
11483 X_INGPADBOUNDARY_SHIFT);
11484 } else {
11485 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
11486 X_T6_INGPADBOUNDARY_SHIFT);
11487 }
11488 if (is_t4(adapter))
11489 sp->pack_boundary = sp->pad_boundary;
11490 else {
11491 r = t4_read_reg(adapter, A_SGE_CONTROL2);
11492 if (G_INGPACKBOUNDARY(r) == 0)
11493 sp->pack_boundary = 16;
11494 else
11495 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
11496 }
11497 for (i = 0; i < SGE_FLBUF_SIZES; i++)
11498 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
11499 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
11500
11501 return 0;
11502 }
11503
11504 /* Convert the LE's hardware hash mask to a shorter filter mask. */
11505 static inline uint16_t
hashmask_to_filtermask(struct adapter * adap,uint64_t hashmask,uint16_t filter_mode)11506 hashmask_to_filtermask(struct adapter *adap, uint64_t hashmask, uint16_t filter_mode)
11507 {
11508 int first, last, i;
11509 uint16_t filter_mask;
11510 uint64_t mask; /* field mask */
11511
11512
11513 if (chip_id(adap) >= CHELSIO_T7) {
11514 first = S_T7_FT_FIRST;
11515 last = S_T7_FT_LAST;
11516 } else {
11517 first = S_FT_FIRST;
11518 last = S_FT_LAST;
11519 }
11520
11521 for (filter_mask = 0, i = first; i <= last; i++) {
11522 if ((filter_mode & (1 << i)) == 0)
11523 continue;
11524 mask = (1 << t4_filter_field_width(adap, i)) - 1;
11525 if ((hashmask & mask) == mask)
11526 filter_mask |= 1 << i;
11527 hashmask >>= t4_filter_field_width(adap, i);
11528 }
11529
11530 return (filter_mask);
11531 }
11532
11533 /*
11534 * Read and cache the adapter's compressed filter mode and ingress config.
11535 */
11536 static void
read_filter_mode_and_ingress_config(struct adapter * adap)11537 read_filter_mode_and_ingress_config(struct adapter *adap)
11538 {
11539 int rc;
11540 uint32_t v, param[2], val[2];
11541 struct tp_params *tpp = &adap->params.tp;
11542 uint64_t hash_mask;
11543
11544 param[0] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11545 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
11546 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
11547 param[1] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11548 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
11549 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
11550 rc = -t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val);
11551 if (rc == 0) {
11552 tpp->filter_mode = G_FW_PARAMS_PARAM_FILTER_MODE(val[0]);
11553 tpp->filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val[0]);
11554 tpp->vnic_mode = val[1];
11555 } else {
11556 /*
11557 * Old firmware. Read filter mode/mask and ingress config
11558 * straight from the hardware.
11559 */
11560 t4_tp_pio_read(adap, &v, 1, A_TP_VLAN_PRI_MAP, true);
11561 tpp->filter_mode = v & 0xffff;
11562
11563 hash_mask = 0;
11564 if (chip_id(adap) > CHELSIO_T4) {
11565 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
11566 hash_mask = v;
11567 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
11568 hash_mask |= (u64)v << 32;
11569 }
11570 if (chip_id(adap) >= CHELSIO_T7) {
11571 /*
11572 * This param came before T7 so T7+ firmwares should
11573 * always support this query.
11574 */
11575 CH_WARN(adap, "query for filter mode/mask failed: %d\n",
11576 rc);
11577 }
11578 tpp->filter_mask = hashmask_to_filtermask(adap, hash_mask,
11579 tpp->filter_mode);
11580
11581 t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true);
11582 if (v & F_VNIC)
11583 tpp->vnic_mode = FW_VNIC_MODE_PF_VF;
11584 else
11585 tpp->vnic_mode = FW_VNIC_MODE_OUTER_VLAN;
11586 }
11587
11588 /*
11589 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
11590 * shift positions of several elements of the Compressed Filter Tuple
11591 * for this adapter which we need frequently ...
11592 */
11593 if (chip_id(adap) >= CHELSIO_T7) {
11594 tpp->ipsecidx_shift = t4_filter_field_shift(adap, F_IPSECIDX);
11595 tpp->fcoe_shift = t4_filter_field_shift(adap, F_T7_FCOE);
11596 tpp->port_shift = t4_filter_field_shift(adap, F_T7_PORT);
11597 tpp->vnic_shift = t4_filter_field_shift(adap, F_T7_VNIC_ID);
11598 tpp->vlan_shift = t4_filter_field_shift(adap, F_T7_VLAN);
11599 tpp->tos_shift = t4_filter_field_shift(adap, F_T7_TOS);
11600 tpp->protocol_shift = t4_filter_field_shift(adap, F_T7_PROTOCOL);
11601 tpp->ethertype_shift = t4_filter_field_shift(adap, F_T7_ETHERTYPE);
11602 tpp->macmatch_shift = t4_filter_field_shift(adap, F_T7_MACMATCH);
11603 tpp->matchtype_shift = t4_filter_field_shift(adap, F_T7_MPSHITTYPE);
11604 tpp->frag_shift = t4_filter_field_shift(adap, F_T7_FRAGMENTATION);
11605 tpp->roce_shift = t4_filter_field_shift(adap, F_ROCE);
11606 tpp->synonly_shift = t4_filter_field_shift(adap, F_SYNONLY);
11607 tpp->tcpflags_shift = t4_filter_field_shift(adap, F_TCPFLAGS);
11608 } else {
11609 tpp->ipsecidx_shift = -1;
11610 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
11611 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
11612 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
11613 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
11614 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
11615 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
11616 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
11617 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
11618 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
11619 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
11620 tpp->roce_shift = -1;
11621 tpp->synonly_shift = -1;
11622 tpp->tcpflags_shift = -1;
11623 }
11624 }
11625
11626 /**
11627 * t4_init_tp_params - initialize adap->params.tp
11628 * @adap: the adapter
11629 *
11630 * Initialize various fields of the adapter's TP Parameters structure.
11631 */
t4_init_tp_params(struct adapter * adap)11632 int t4_init_tp_params(struct adapter *adap)
11633 {
11634 u32 tx_len, rx_len, r, v;
11635 struct tp_params *tpp = &adap->params.tp;
11636
11637 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
11638 tpp->tre = G_TIMERRESOLUTION(v);
11639 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
11640
11641 read_filter_mode_and_ingress_config(adap);
11642
11643 tpp->rx_pkt_encap = false;
11644 tpp->lb_mode = 0;
11645 tpp->lb_nchan = 1;
11646 if (chip_id(adap) > CHELSIO_T5) {
11647 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
11648 tpp->rx_pkt_encap = v & F_CRXPKTENC;
11649 if (chip_id(adap) >= CHELSIO_T7) {
11650 t4_tp_pio_read(adap, &v, 1, A_TP_CHANNEL_MAP, true);
11651 tpp->lb_mode = G_T7_LB_MODE(v);
11652 if (tpp->lb_mode == 1)
11653 tpp->lb_nchan = 4;
11654 else if (tpp->lb_mode == 2)
11655 tpp->lb_nchan = 2;
11656 }
11657 }
11658
11659 rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
11660 tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
11661
11662 r = t4_read_reg(adap, A_TP_PARA_REG2);
11663 rx_len = min(rx_len, G_MAXRXDATA(r));
11664 tx_len = min(tx_len, G_MAXRXDATA(r));
11665
11666 r = t4_read_reg(adap, A_TP_PARA_REG7);
11667 v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
11668 rx_len = min(rx_len, v);
11669 tx_len = min(tx_len, v);
11670
11671 tpp->max_tx_pdu = tx_len;
11672 tpp->max_rx_pdu = rx_len;
11673
11674 return 0;
11675 }
11676
11677 /**
11678 * t4_filter_field_width - returns the width of a filter field
11679 * @adap: the adapter
11680 * @filter_field: the filter field whose width is being requested
11681 *
11682 * Return the shift position of a filter field within the Compressed
11683 * Filter Tuple. The filter field is specified via its selection bit
11684 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
11685 */
t4_filter_field_width(const struct adapter * adap,int filter_field)11686 int t4_filter_field_width(const struct adapter *adap, int filter_field)
11687 {
11688 const int nopt = adap->chip_params->filter_num_opt;
11689 static const uint8_t width_t7[] = {
11690 W_FT_IPSECIDX,
11691 W_FT_FCOE,
11692 W_FT_PORT,
11693 W_FT_VNIC_ID,
11694 W_FT_VLAN,
11695 W_FT_TOS,
11696 W_FT_PROTOCOL,
11697 W_FT_ETHERTYPE,
11698 W_FT_MACMATCH,
11699 W_FT_MPSHITTYPE,
11700 W_FT_FRAGMENTATION,
11701 W_FT_ROCE,
11702 W_FT_SYNONLY,
11703 W_FT_TCPFLAGS
11704 };
11705 static const uint8_t width_t4[] = {
11706 W_FT_FCOE,
11707 W_FT_PORT,
11708 W_FT_VNIC_ID,
11709 W_FT_VLAN,
11710 W_FT_TOS,
11711 W_FT_PROTOCOL,
11712 W_FT_ETHERTYPE,
11713 W_FT_MACMATCH,
11714 W_FT_MPSHITTYPE,
11715 W_FT_FRAGMENTATION
11716 };
11717 const uint8_t *width = chip_id(adap) >= CHELSIO_T7 ? width_t7 : width_t4;
11718
11719 if (filter_field < 0 || filter_field >= nopt)
11720 return (0);
11721 return (width[filter_field]);
11722 }
11723
11724 /**
11725 * t4_filter_field_shift - calculate filter field shift
11726 * @adap: the adapter
11727 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
11728 *
11729 * Return the shift position of a filter field within the Compressed
11730 * Filter Tuple. The filter field is specified via its selection bit
11731 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
11732 */
t4_filter_field_shift(const struct adapter * adap,int filter_sel)11733 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
11734 {
11735 const unsigned int filter_mode = adap->params.tp.filter_mode;
11736 unsigned int sel;
11737 int field_shift;
11738
11739 if ((filter_mode & filter_sel) == 0)
11740 return -1;
11741
11742 if (chip_id(adap) >= CHELSIO_T7) {
11743 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
11744 switch (filter_mode & sel) {
11745 case F_IPSECIDX:
11746 field_shift += W_FT_IPSECIDX;
11747 break;
11748 case F_T7_FCOE:
11749 field_shift += W_FT_FCOE;
11750 break;
11751 case F_T7_PORT:
11752 field_shift += W_FT_PORT;
11753 break;
11754 case F_T7_VNIC_ID:
11755 field_shift += W_FT_VNIC_ID;
11756 break;
11757 case F_T7_VLAN:
11758 field_shift += W_FT_VLAN;
11759 break;
11760 case F_T7_TOS:
11761 field_shift += W_FT_TOS;
11762 break;
11763 case F_T7_PROTOCOL:
11764 field_shift += W_FT_PROTOCOL;
11765 break;
11766 case F_T7_ETHERTYPE:
11767 field_shift += W_FT_ETHERTYPE;
11768 break;
11769 case F_T7_MACMATCH:
11770 field_shift += W_FT_MACMATCH;
11771 break;
11772 case F_T7_MPSHITTYPE:
11773 field_shift += W_FT_MPSHITTYPE;
11774 break;
11775 case F_T7_FRAGMENTATION:
11776 field_shift += W_FT_FRAGMENTATION;
11777 break;
11778 case F_ROCE:
11779 field_shift += W_FT_ROCE;
11780 break;
11781 case F_SYNONLY:
11782 field_shift += W_FT_SYNONLY;
11783 break;
11784 case F_TCPFLAGS:
11785 field_shift += W_FT_TCPFLAGS;
11786 break;
11787 }
11788 }
11789 return field_shift;
11790 }
11791
11792 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
11793 switch (filter_mode & sel) {
11794 case F_FCOE:
11795 field_shift += W_FT_FCOE;
11796 break;
11797 case F_PORT:
11798 field_shift += W_FT_PORT;
11799 break;
11800 case F_VNIC_ID:
11801 field_shift += W_FT_VNIC_ID;
11802 break;
11803 case F_VLAN:
11804 field_shift += W_FT_VLAN;
11805 break;
11806 case F_TOS:
11807 field_shift += W_FT_TOS;
11808 break;
11809 case F_PROTOCOL:
11810 field_shift += W_FT_PROTOCOL;
11811 break;
11812 case F_ETHERTYPE:
11813 field_shift += W_FT_ETHERTYPE;
11814 break;
11815 case F_MACMATCH:
11816 field_shift += W_FT_MACMATCH;
11817 break;
11818 case F_MPSHITTYPE:
11819 field_shift += W_FT_MPSHITTYPE;
11820 break;
11821 case F_FRAGMENTATION:
11822 field_shift += W_FT_FRAGMENTATION;
11823 break;
11824 }
11825 }
11826 return field_shift;
11827 }
11828
t4_port_init(struct adapter * adap,int mbox,int pf,int vf,int port_id)11829 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
11830 {
11831 u8 addr[6];
11832 int ret, i, j;
11833 struct port_info *p = adap2pinfo(adap, port_id);
11834 u32 param, val;
11835 struct vi_info *vi = &p->vi[0];
11836
11837 for (i = 0, j = -1; i <= p->port_id; i++) {
11838 do {
11839 j++;
11840 } while ((adap->params.portvec & (1 << j)) == 0);
11841 }
11842
11843 p->hw_port = j;
11844 p->tx_chan = t4_get_tx_c_chan(adap, j);
11845 p->rx_chan = t4_get_rx_c_chan(adap, j);
11846 p->mps_bg_map = t4_get_mps_bg_map(adap, j);
11847 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
11848
11849 if (!(adap->flags & IS_VF) ||
11850 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
11851 t4_update_port_info(p);
11852 }
11853
11854 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size,
11855 &vi->vfvld, &vi->vin);
11856 if (ret < 0)
11857 return ret;
11858
11859 vi->viid = ret;
11860 t4_os_set_hw_addr(p, addr);
11861
11862 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11863 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
11864 V_FW_PARAMS_PARAM_YZ(vi->viid);
11865 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
11866 if (ret)
11867 vi->rss_base = 0xffff;
11868 else {
11869 /* MPASS((val >> 16) == rss_size); */
11870 vi->rss_base = val & 0xffff;
11871 }
11872
11873 return 0;
11874 }
11875
t4_read_cimq_cfg_ibq_core(struct adapter * adap,u8 coreid,u32 qid,u16 * base,u16 * size,u16 * thres)11876 static void t4_read_cimq_cfg_ibq_core(struct adapter *adap, u8 coreid, u32 qid,
11877 u16 *base, u16 *size, u16 *thres)
11878 {
11879 unsigned int v, m;
11880
11881 if (chip_id(adap) > CHELSIO_T6) {
11882 v = F_T7_IBQSELECT | V_T7_QUENUMSELECT(qid) |
11883 V_CORESELECT(coreid);
11884 /* value is in 512-byte units */
11885 m = 512;
11886 } else {
11887 v = F_IBQSELECT | V_QUENUMSELECT(qid);
11888 /* value is in 256-byte units */
11889 m = 256;
11890 }
11891
11892 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
11893 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
11894 if (base)
11895 *base = G_CIMQBASE(v) * m;
11896 if (size)
11897 *size = G_CIMQSIZE(v) * m;
11898 if (thres)
11899 *thres = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
11900 }
11901
t4_read_cimq_cfg_obq_core(struct adapter * adap,u8 coreid,u32 qid,u16 * base,u16 * size)11902 static void t4_read_cimq_cfg_obq_core(struct adapter *adap, u8 coreid, u32 qid,
11903 u16 *base, u16 *size)
11904 {
11905 unsigned int v, m;
11906
11907 if (chip_id(adap) > CHELSIO_T6) {
11908 v = F_T7_OBQSELECT | V_T7_QUENUMSELECT(qid) |
11909 V_CORESELECT(coreid);
11910 /* value is in 512-byte units */
11911 m = 512;
11912 } else {
11913 v = F_OBQSELECT | V_QUENUMSELECT(qid);
11914 /* value is in 256-byte units */
11915 m = 256;
11916 }
11917
11918 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
11919 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
11920 if (base)
11921 *base = G_CIMQBASE(v) * m;
11922 if (size)
11923 *size = G_CIMQSIZE(v) * m;
11924 }
11925
11926 /**
11927 * t4_read_cimq_cfg_core - read CIM queue configuration on specific core
11928 * @adap: the adapter
11929 * @coreid: the uP coreid
11930 * @base: holds the queue base addresses in bytes
11931 * @size: holds the queue sizes in bytes
11932 * @thres: holds the queue full thresholds in bytes
11933 *
11934 * Returns the current configuration of the CIM queues, starting with
11935 * the IBQs, then the OBQs, on a specific @coreid.
11936 */
t4_read_cimq_cfg_core(struct adapter * adap,u8 coreid,u16 * base,u16 * size,u16 * thres)11937 void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
11938 u16 *size, u16 *thres)
11939 {
11940 unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
11941 unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
11942 unsigned int i;
11943
11944 for (i = 0; i < cim_num_ibq; i++, base++, size++, thres++)
11945 t4_read_cimq_cfg_ibq_core(adap, coreid, i, base, size, thres);
11946
11947 for (i = 0; i < cim_num_obq; i++, base++, size++)
11948 t4_read_cimq_cfg_obq_core(adap, coreid, i, base, size);
11949 }
11950
t4_read_cim_ibq_data_core(struct adapter * adap,u8 coreid,u32 addr,u32 * data)11951 static int t4_read_cim_ibq_data_core(struct adapter *adap, u8 coreid, u32 addr,
11952 u32 *data)
11953 {
11954 int ret, attempts;
11955 unsigned int v;
11956
11957 /* It might take 3-10ms before the IBQ debug read access is allowed.
11958 * Wait for 1 Sec with a delay of 1 usec.
11959 */
11960 attempts = 1000000;
11961
11962 if (chip_id(adap) > CHELSIO_T6)
11963 v = V_T7_IBQDBGADDR(addr) | V_IBQDBGCORE(coreid);
11964 else
11965 v = V_IBQDBGADDR(addr);
11966
11967 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, v | F_IBQDBGEN);
11968 ret = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
11969 attempts, 1);
11970 if (ret)
11971 return ret;
11972
11973 *data = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
11974 return 0;
11975 }
11976
11977 /**
11978 * t4_read_cim_ibq_core - read the contents of a CIM inbound queue on
11979 * specific core
11980 * @adap: the adapter
11981 * @coreid: the uP coreid
11982 * @qid: the queue index
11983 * @data: where to store the queue contents
11984 * @n: capacity of @data in 32-bit words
11985 *
11986 * Reads the contents of the selected CIM queue starting at address 0 up
11987 * to the capacity of @data on a specific @coreid. @n must be a multiple
11988 * of 4. Returns < 0 on error and the number of 32-bit words actually
11989 * read on success.
11990 */
t4_read_cim_ibq_core(struct adapter * adap,u8 coreid,u32 qid,u32 * data,size_t n)11991 int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
11992 size_t n)
11993 {
11994 unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
11995 u16 i, addr, nwords;
11996 int ret;
11997
11998 if (qid > (cim_num_ibq - 1) || (n & 3))
11999 return -EINVAL;
12000
12001 t4_read_cimq_cfg_ibq_core(adap, coreid, qid, &addr, &nwords, NULL);
12002 addr >>= sizeof(u16);
12003 nwords >>= sizeof(u16);
12004 if (n > nwords)
12005 n = nwords;
12006
12007 for (i = 0; i < n; i++, addr++, data++) {
12008 ret = t4_read_cim_ibq_data_core(adap, coreid, addr, data);
12009 if (ret < 0)
12010 return ret;
12011 }
12012
12013 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
12014 return i;
12015 }
12016
t4_read_cim_obq_data_core(struct adapter * adap,u8 coreid,u32 addr,u32 * data)12017 static int t4_read_cim_obq_data_core(struct adapter *adap, u8 coreid, u32 addr,
12018 u32 *data)
12019 {
12020 unsigned int v;
12021 int ret;
12022
12023 if (chip_id(adap) > CHELSIO_T6)
12024 v = V_T7_OBQDBGADDR(addr) | V_OBQDBGCORE(coreid);
12025 else
12026 v = V_OBQDBGADDR(addr);
12027
12028 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, v | F_OBQDBGEN);
12029 ret = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1);
12030 if (ret)
12031 return ret;
12032
12033 *data = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
12034 return 0;
12035 }
12036
12037 /**
12038 * t4_read_cim_obq_core - read the contents of a CIM outbound queue on
12039 * specific core
12040 * @adap: the adapter
12041 * @coreid: the uP coreid
12042 * @qid: the queue index
12043 * @data: where to store the queue contents
12044 * @n: capacity of @data in 32-bit words
12045 *
12046 * Reads the contents of the selected CIM queue starting at address 0 up
12047 * to the capacity of @data on specific @coreid. @n must be a multiple
12048 * of 4. Returns < 0 on error and the number of 32-bit words actually
12049 * read on success.
12050 */
t4_read_cim_obq_core(struct adapter * adap,u8 coreid,u32 qid,u32 * data,size_t n)12051 int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
12052 size_t n)
12053 {
12054 unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
12055 u16 i, addr, nwords;
12056 int ret;
12057
12058 if ((qid > (cim_num_obq - 1)) || (n & 3))
12059 return -EINVAL;
12060
12061 t4_read_cimq_cfg_obq_core(adap, coreid, qid, &addr, &nwords);
12062 addr >>= sizeof(u16);
12063 nwords >>= sizeof(u16);
12064 if (n > nwords)
12065 n = nwords;
12066
12067 for (i = 0; i < n; i++, addr++, data++) {
12068 ret = t4_read_cim_obq_data_core(adap, coreid, addr, data);
12069 if (ret < 0)
12070 return ret;
12071 }
12072
12073 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
12074 return i;
12075 }
12076
12077 /**
12078 * t4_cim_read_core - read a block from CIM internal address space
12079 * of a control register group on specific core.
12080 * @adap: the adapter
12081 * @group: the control register group to select for read
12082 * @coreid: the uP coreid
12083 * @addr: the start address within the CIM address space
12084 * @n: number of words to read
12085 * @valp: where to store the result
12086 *
12087 * Reads a block of 4-byte words from the CIM intenal address space
12088 * of a control register @group on a specific @coreid.
12089 */
t4_cim_read_core(struct adapter * adap,u8 group,u8 coreid,unsigned int addr,unsigned int n,unsigned int * valp)12090 int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
12091 unsigned int addr, unsigned int n,
12092 unsigned int *valp)
12093 {
12094 unsigned int hostbusy, v = 0;
12095 int ret = 0;
12096
12097 if (chip_id(adap) > CHELSIO_T6) {
12098 hostbusy = F_T7_HOSTBUSY;
12099 v = V_HOSTGRPSEL(group) | V_HOSTCORESEL(coreid);
12100 } else {
12101 hostbusy = F_HOSTBUSY;
12102 }
12103
12104 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
12105 return -EBUSY;
12106
12107 for ( ; !ret && n--; addr += 4) {
12108 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
12109 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
12110 0, 5, 2);
12111 if (!ret)
12112 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
12113 }
12114
12115 return ret;
12116 }
12117
12118 /**
12119 * t4_cim_write_core - write a block into CIM internal address space
12120 * of a control register group on specific core.
12121 * @adap: the adapter
12122 * @group: the control register group to select for write
12123 * @coreid: the uP coreid
12124 * @addr: the start address within the CIM address space
12125 * @n: number of words to write
12126 * @valp: set of values to write
12127 *
12128 * Writes a block of 4-byte words into the CIM intenal address space
12129 * of a control register @group on a specific @coreid.
12130 */
t4_cim_write_core(struct adapter * adap,u8 group,u8 coreid,unsigned int addr,unsigned int n,const unsigned int * valp)12131 int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
12132 unsigned int addr, unsigned int n,
12133 const unsigned int *valp)
12134 {
12135 unsigned int hostbusy, v;
12136 int ret = 0;
12137
12138 if (chip_id(adap) > CHELSIO_T6) {
12139 hostbusy = F_T7_HOSTBUSY;
12140 v = F_T7_HOSTWRITE | V_HOSTGRPSEL(group) |
12141 V_HOSTCORESEL(coreid);
12142 } else {
12143 hostbusy = F_HOSTBUSY;
12144 v = F_HOSTWRITE;
12145 }
12146
12147 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
12148 return -EBUSY;
12149
12150 for ( ; !ret && n--; addr += 4) {
12151 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
12152 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
12153 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
12154 0, 5, 2);
12155 }
12156
12157 return ret;
12158 }
12159
12160 /**
12161 * t4_cim_read_la_core - read CIM LA capture buffer on specific core
12162 * @adap: the adapter
12163 * @coreid: uP coreid
12164 * @la_buf: where to store the LA data
12165 * @wrptr: the HW write pointer within the capture buffer
12166 *
12167 * Reads the contents of the CIM LA buffer on a specific @coreid
12168 * with the most recent entry at the end of the returned data
12169 * and with the entry at @wrptr first. We try to leave the LA
12170 * in the running state we find it in.
12171 */
t4_cim_read_la_core(struct adapter * adap,u8 coreid,u32 * la_buf,u32 * wrptr)12172 int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
12173 u32 *wrptr)
12174 {
12175 unsigned int cfg, val, idx;
12176 int i, ret;
12177
12178 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &cfg);
12179 if (ret)
12180 return ret;
12181
12182 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
12183 val = 0;
12184 ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
12185 &val);
12186 if (ret)
12187 return ret;
12188 }
12189
12190 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &val);
12191 if (ret)
12192 goto restart;
12193
12194 idx = G_UPDBGLAWRPTR(val);
12195 if (wrptr)
12196 *wrptr = idx;
12197
12198 for (i = 0; i < adap->params.cim_la_size; i++) {
12199 val = V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN;
12200 ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
12201 &val);
12202 if (ret)
12203 break;
12204 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
12205 &val);
12206 if (ret)
12207 break;
12208 if (val & F_UPDBGLARDEN) {
12209 ret = -ETIMEDOUT;
12210 break;
12211 }
12212 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_DATA, 1,
12213 &la_buf[i]);
12214 if (ret)
12215 break;
12216
12217 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
12218 * identify the 32-bit portion of the full 312-bit data
12219 */
12220 if ((chip_id(adap) > CHELSIO_T5) && (idx & 0xf) >= 9)
12221 idx = (idx & 0xff0) + 0x10;
12222 else
12223 idx++;
12224 /* address can't exceed 0xfff */
12225 idx &= M_UPDBGLARDPTR;
12226 }
12227 restart:
12228 if (cfg & F_UPDBGLAEN) {
12229 int r;
12230
12231 val = cfg & ~F_UPDBGLARDEN;
12232 r = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
12233 &val);
12234 if (!ret)
12235 ret = r;
12236 }
12237
12238 return ret;
12239 }
12240
12241 /**
12242 * t4_tp_read_la - read TP LA capture buffer
12243 * @adap: the adapter
12244 * @la_buf: where to store the LA data
12245 * @wrptr: the HW write pointer within the capture buffer
12246 *
12247 * Reads the contents of the TP LA buffer with the most recent entry at
12248 * the end of the returned data and with the entry at @wrptr first.
12249 * We leave the LA in the running state we find it in.
12250 */
t4_tp_read_la(struct adapter * adap,u64 * la_buf,unsigned int * wrptr)12251 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
12252 {
12253 bool last_incomplete;
12254 unsigned int i, cfg, val, idx;
12255
12256 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
12257 if (cfg & F_DBGLAENABLE) /* freeze LA */
12258 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
12259 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
12260
12261 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
12262 idx = G_DBGLAWPTR(val);
12263 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
12264 if (last_incomplete)
12265 idx = (idx + 1) & M_DBGLARPTR;
12266 if (wrptr)
12267 *wrptr = idx;
12268
12269 val &= 0xffff;
12270 val &= ~V_DBGLARPTR(M_DBGLARPTR);
12271 val |= adap->params.tp.la_mask;
12272
12273 for (i = 0; i < TPLA_SIZE; i++) {
12274 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
12275 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
12276 idx = (idx + 1) & M_DBGLARPTR;
12277 }
12278
12279 /* Wipe out last entry if it isn't valid */
12280 if (last_incomplete)
12281 la_buf[TPLA_SIZE - 1] = ~0ULL;
12282
12283 if (cfg & F_DBGLAENABLE) /* restore running state */
12284 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
12285 cfg | adap->params.tp.la_mask);
12286 }
12287
12288 /*
12289 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
12290 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
12291 * state for more than the Warning Threshold then we'll issue a warning about
12292 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
12293 * appears to be hung every Warning Repeat second till the situation clears.
12294 * If the situation clears, we'll note that as well.
12295 */
12296 #define SGE_IDMA_WARN_THRESH 1
12297 #define SGE_IDMA_WARN_REPEAT 300
12298
12299 /**
12300 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
12301 * @adapter: the adapter
12302 * @idma: the adapter IDMA Monitor state
12303 *
12304 * Initialize the state of an SGE Ingress DMA Monitor.
12305 */
t4_idma_monitor_init(struct adapter * adapter,struct sge_idma_monitor_state * idma)12306 void t4_idma_monitor_init(struct adapter *adapter,
12307 struct sge_idma_monitor_state *idma)
12308 {
12309 /* Initialize the state variables for detecting an SGE Ingress DMA
12310 * hang. The SGE has internal counters which count up on each clock
12311 * tick whenever the SGE finds its Ingress DMA State Engines in the
12312 * same state they were on the previous clock tick. The clock used is
12313 * the Core Clock so we have a limit on the maximum "time" they can
12314 * record; typically a very small number of seconds. For instance,
12315 * with a 600MHz Core Clock, we can only count up to a bit more than
12316 * 7s. So we'll synthesize a larger counter in order to not run the
12317 * risk of having the "timers" overflow and give us the flexibility to
12318 * maintain a Hung SGE State Machine of our own which operates across
12319 * a longer time frame.
12320 */
12321 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
12322 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
12323 }
12324
12325 /**
12326 * t4_idma_monitor - monitor SGE Ingress DMA state
12327 * @adapter: the adapter
12328 * @idma: the adapter IDMA Monitor state
12329 * @hz: number of ticks/second
12330 * @ticks: number of ticks since the last IDMA Monitor call
12331 */
t4_idma_monitor(struct adapter * adapter,struct sge_idma_monitor_state * idma,int hz,int ticks)12332 void t4_idma_monitor(struct adapter *adapter,
12333 struct sge_idma_monitor_state *idma,
12334 int hz, int ticks)
12335 {
12336 int i, idma_same_state_cnt[2];
12337
12338 /* Read the SGE Debug Ingress DMA Same State Count registers. These
12339 * are counters inside the SGE which count up on each clock when the
12340 * SGE finds its Ingress DMA State Engines in the same states they
12341 * were in the previous clock. The counters will peg out at
12342 * 0xffffffff without wrapping around so once they pass the 1s
12343 * threshold they'll stay above that till the IDMA state changes.
12344 */
12345 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
12346 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
12347 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
12348
12349 for (i = 0; i < 2; i++) {
12350 u32 debug0, debug11;
12351
12352 /* If the Ingress DMA Same State Counter ("timer") is less
12353 * than 1s, then we can reset our synthesized Stall Timer and
12354 * continue. If we have previously emitted warnings about a
12355 * potential stalled Ingress Queue, issue a note indicating
12356 * that the Ingress Queue has resumed forward progress.
12357 */
12358 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
12359 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
12360 CH_WARN(adapter, "SGE idma%d, queue %u, "
12361 "resumed after %d seconds\n",
12362 i, idma->idma_qid[i],
12363 idma->idma_stalled[i]/hz);
12364 idma->idma_stalled[i] = 0;
12365 continue;
12366 }
12367
12368 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
12369 * domain. The first time we get here it'll be because we
12370 * passed the 1s Threshold; each additional time it'll be
12371 * because the RX Timer Callback is being fired on its regular
12372 * schedule.
12373 *
12374 * If the stall is below our Potential Hung Ingress Queue
12375 * Warning Threshold, continue.
12376 */
12377 if (idma->idma_stalled[i] == 0) {
12378 idma->idma_stalled[i] = hz;
12379 idma->idma_warn[i] = 0;
12380 } else {
12381 idma->idma_stalled[i] += ticks;
12382 idma->idma_warn[i] -= ticks;
12383 }
12384
12385 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
12386 continue;
12387
12388 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
12389 */
12390 if (idma->idma_warn[i] > 0)
12391 continue;
12392 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
12393
12394 /* Read and save the SGE IDMA State and Queue ID information.
12395 * We do this every time in case it changes across time ...
12396 * can't be too careful ...
12397 */
12398 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
12399 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
12400 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
12401
12402 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
12403 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
12404 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
12405
12406 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
12407 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
12408 i, idma->idma_qid[i], idma->idma_state[i],
12409 idma->idma_stalled[i]/hz,
12410 debug0, debug11);
12411 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
12412 }
12413 }
12414
12415 /**
12416 * t4_set_vf_mac - Set MAC address for the specified VF
12417 * @adapter: The adapter
12418 * @pf: the PF used to instantiate the VFs
12419 * @vf: one of the VFs instantiated by the specified PF
12420 * @naddr: the number of MAC addresses
12421 * @addr: the MAC address(es) to be set to the specified VF
12422 */
t4_set_vf_mac(struct adapter * adapter,unsigned int pf,unsigned int vf,unsigned int naddr,u8 * addr)12423 int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf,
12424 unsigned int naddr, u8 *addr)
12425 {
12426 struct fw_acl_mac_cmd cmd;
12427
12428 memset(&cmd, 0, sizeof(cmd));
12429 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
12430 F_FW_CMD_REQUEST |
12431 F_FW_CMD_WRITE |
12432 V_FW_ACL_MAC_CMD_PFN(pf) |
12433 V_FW_ACL_MAC_CMD_VFN(vf));
12434
12435 /* Note: Do not enable the ACL */
12436 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
12437 cmd.nmac = naddr;
12438
12439 switch (pf) {
12440 case 3:
12441 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
12442 break;
12443 case 2:
12444 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
12445 break;
12446 case 1:
12447 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
12448 break;
12449 case 0:
12450 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
12451 break;
12452 }
12453
12454 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
12455 }
12456
12457 /**
12458 * t4_read_pace_tbl - read the pace table
12459 * @adap: the adapter
12460 * @pace_vals: holds the returned values
12461 *
12462 * Returns the values of TP's pace table in microseconds.
12463 */
t4_read_pace_tbl(struct adapter * adap,unsigned int pace_vals[NTX_SCHED])12464 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
12465 {
12466 unsigned int i, v;
12467
12468 for (i = 0; i < NTX_SCHED; i++) {
12469 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
12470 v = t4_read_reg(adap, A_TP_PACE_TABLE);
12471 pace_vals[i] = dack_ticks_to_usec(adap, v);
12472 }
12473 }
12474
12475 /**
12476 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
12477 * @adap: the adapter
12478 * @sched: the scheduler index
12479 * @kbps: the byte rate in Kbps
12480 * @ipg: the interpacket delay in tenths of nanoseconds
12481 *
12482 * Return the current configuration of a HW Tx scheduler.
12483 */
t4_get_tx_sched(struct adapter * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg,bool sleep_ok)12484 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
12485 unsigned int *ipg, bool sleep_ok)
12486 {
12487 unsigned int v, addr, bpt, cpt;
12488
12489 if (kbps) {
12490 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
12491 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
12492 if (sched & 1)
12493 v >>= 16;
12494 bpt = (v >> 8) & 0xff;
12495 cpt = v & 0xff;
12496 if (!cpt)
12497 *kbps = 0; /* scheduler disabled */
12498 else {
12499 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
12500 *kbps = (v * bpt) / 125;
12501 }
12502 }
12503 if (ipg) {
12504 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
12505 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
12506 if (sched & 1)
12507 v >>= 16;
12508 v &= 0xffff;
12509 *ipg = (10000 * v) / core_ticks_per_usec(adap);
12510 }
12511 }
12512
12513 /**
12514 * t4_load_cfg - download config file
12515 * @adap: the adapter
12516 * @cfg_data: the cfg text file to write
12517 * @size: text file size
12518 *
12519 * Write the supplied config text file to the card's serial flash.
12520 */
t4_load_cfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)12521 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
12522 {
12523 int ret, i, n, cfg_addr;
12524 unsigned int addr, len;
12525 unsigned int flash_cfg_start_sec;
12526
12527 cfg_addr = t4_flash_cfg_addr(adap, &len);
12528 if (cfg_addr < 0)
12529 return cfg_addr;
12530
12531 if (size > len) {
12532 CH_ERR(adap, "cfg file too large, max is %u bytes\n", len);
12533 return -EFBIG;
12534 }
12535
12536 flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
12537 i = DIV_ROUND_UP(len, SF_SEC_SIZE);
12538 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
12539 flash_cfg_start_sec + i - 1);
12540 /*
12541 * If size == 0 then we're simply erasing the FLASH sectors associated
12542 * with the on-adapter Firmware Configuration File.
12543 */
12544 if (ret || size == 0)
12545 goto out;
12546
12547 /* this will write to the flash up to SF_PAGE_SIZE at a time */
12548 addr = cfg_addr;
12549 for (i = 0; i < size; i += SF_PAGE_SIZE) {
12550 n = min(size - i, SF_PAGE_SIZE);
12551 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
12552 if (ret)
12553 goto out;
12554 addr += SF_PAGE_SIZE;
12555 cfg_data += SF_PAGE_SIZE;
12556 }
12557
12558 out:
12559 if (ret)
12560 CH_ERR(adap, "config file %s failed %d\n",
12561 (size == 0 ? "clear" : "download"), ret);
12562 return ret;
12563 }
12564
12565 /**
12566 * t5_fw_init_extern_mem - initialize the external memory
12567 * @adap: the adapter
12568 *
12569 * Initializes the external memory on T5.
12570 */
t5_fw_init_extern_mem(struct adapter * adap)12571 int t5_fw_init_extern_mem(struct adapter *adap)
12572 {
12573 u32 params[1], val[1];
12574 int ret;
12575
12576 if (!is_t5(adap))
12577 return 0;
12578
12579 val[0] = 0xff; /* Initialize all MCs */
12580 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12581 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
12582 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
12583 FW_CMD_MAX_TIMEOUT);
12584
12585 return ret;
12586 }
12587
12588 /* BIOS boot headers */
12589 typedef struct pci_expansion_rom_header {
12590 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
12591 u8 reserved[22]; /* Reserved per processor Architecture data */
12592 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
12593 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
12594
12595 /* Legacy PCI Expansion ROM Header */
12596 typedef struct legacy_pci_expansion_rom_header {
12597 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
12598 u8 size512; /* Current Image Size in units of 512 bytes */
12599 u8 initentry_point[4];
12600 u8 cksum; /* Checksum computed on the entire Image */
12601 u8 reserved[16]; /* Reserved */
12602 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
12603 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
12604
12605 /* EFI PCI Expansion ROM Header */
12606 typedef struct efi_pci_expansion_rom_header {
12607 u8 signature[2]; // ROM signature. The value 0xaa55
12608 u8 initialization_size[2]; /* Units 512. Includes this header */
12609 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
12610 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
12611 u8 efi_machine_type[2]; /* Machine type from EFI image header */
12612 u8 compression_type[2]; /* Compression type. */
12613 /*
12614 * Compression type definition
12615 * 0x0: uncompressed
12616 * 0x1: Compressed
12617 * 0x2-0xFFFF: Reserved
12618 */
12619 u8 reserved[8]; /* Reserved */
12620 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
12621 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
12622 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
12623
12624 /* PCI Data Structure Format */
12625 typedef struct pcir_data_structure { /* PCI Data Structure */
12626 u8 signature[4]; /* Signature. The string "PCIR" */
12627 u8 vendor_id[2]; /* Vendor Identification */
12628 u8 device_id[2]; /* Device Identification */
12629 u8 vital_product[2]; /* Pointer to Vital Product Data */
12630 u8 length[2]; /* PCIR Data Structure Length */
12631 u8 revision; /* PCIR Data Structure Revision */
12632 u8 class_code[3]; /* Class Code */
12633 u8 image_length[2]; /* Image Length. Multiple of 512B */
12634 u8 code_revision[2]; /* Revision Level of Code/Data */
12635 u8 code_type; /* Code Type. */
12636 /*
12637 * PCI Expansion ROM Code Types
12638 * 0x00: Intel IA-32, PC-AT compatible. Legacy
12639 * 0x01: Open Firmware standard for PCI. FCODE
12640 * 0x02: Hewlett-Packard PA RISC. HP reserved
12641 * 0x03: EFI Image. EFI
12642 * 0x04-0xFF: Reserved.
12643 */
12644 u8 indicator; /* Indicator. Identifies the last image in the ROM */
12645 u8 reserved[2]; /* Reserved */
12646 } pcir_data_t; /* PCI__DATA_STRUCTURE */
12647
12648 /* BOOT constants */
12649 enum {
12650 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
12651 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
12652 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
12653 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
12654 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
12655 VENDOR_ID = 0x1425, /* Vendor ID */
12656 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
12657 };
12658
12659 /*
12660 * modify_device_id - Modifies the device ID of the Boot BIOS image
12661 * @adatper: the device ID to write.
12662 * @boot_data: the boot image to modify.
12663 *
12664 * Write the supplied device ID to the boot BIOS image.
12665 */
modify_device_id(int device_id,u8 * boot_data)12666 static void modify_device_id(int device_id, u8 *boot_data)
12667 {
12668 legacy_pci_exp_rom_header_t *header;
12669 pcir_data_t *pcir_header;
12670 u32 cur_header = 0;
12671
12672 /*
12673 * Loop through all chained images and change the device ID's
12674 */
12675 while (1) {
12676 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
12677 pcir_header = (pcir_data_t *) &boot_data[cur_header +
12678 le16_to_cpu(*(u16*)header->pcir_offset)];
12679
12680 /*
12681 * Only modify the Device ID if code type is Legacy or HP.
12682 * 0x00: Okay to modify
12683 * 0x01: FCODE. Do not be modify
12684 * 0x03: Okay to modify
12685 * 0x04-0xFF: Do not modify
12686 */
12687 if (pcir_header->code_type == 0x00) {
12688 u8 csum = 0;
12689 int i;
12690
12691 /*
12692 * Modify Device ID to match current adatper
12693 */
12694 *(u16*) pcir_header->device_id = device_id;
12695
12696 /*
12697 * Set checksum temporarily to 0.
12698 * We will recalculate it later.
12699 */
12700 header->cksum = 0x0;
12701
12702 /*
12703 * Calculate and update checksum
12704 */
12705 for (i = 0; i < (header->size512 * 512); i++)
12706 csum += (u8)boot_data[cur_header + i];
12707
12708 /*
12709 * Invert summed value to create the checksum
12710 * Writing new checksum value directly to the boot data
12711 */
12712 boot_data[cur_header + 7] = -csum;
12713
12714 } else if (pcir_header->code_type == 0x03) {
12715
12716 /*
12717 * Modify Device ID to match current adatper
12718 */
12719 *(u16*) pcir_header->device_id = device_id;
12720
12721 }
12722
12723
12724 /*
12725 * Check indicator element to identify if this is the last
12726 * image in the ROM.
12727 */
12728 if (pcir_header->indicator & 0x80)
12729 break;
12730
12731 /*
12732 * Move header pointer up to the next image in the ROM.
12733 */
12734 cur_header += header->size512 * 512;
12735 }
12736 }
12737
12738 /*
12739 * t4_load_boot - download boot flash
12740 * @adapter: the adapter
12741 * @boot_data: the boot image to write
12742 * @boot_addr: offset in flash to write boot_data
12743 * @size: image size
12744 *
12745 * Write the supplied boot image to the card's serial flash.
12746 * The boot image has the following sections: a 28-byte header and the
12747 * boot image.
12748 */
t4_load_boot(struct adapter * adap,u8 * boot_data,unsigned int boot_addr,unsigned int size)12749 int t4_load_boot(struct adapter *adap, u8 *boot_data,
12750 unsigned int boot_addr, unsigned int size)
12751 {
12752 pci_exp_rom_header_t *header;
12753 int pcir_offset ;
12754 pcir_data_t *pcir_header;
12755 int ret, addr;
12756 uint16_t device_id;
12757 unsigned int i, start, len;
12758 unsigned int boot_sector = boot_addr * 1024;
12759
12760 /*
12761 * Make sure the boot image does not exceed its available space.
12762 */
12763 len = 0;
12764 start = t4_flash_loc_start(adap, FLASH_LOC_BOOT_AREA, &len);
12765 if (boot_sector + size > start + len) {
12766 CH_ERR(adap, "boot data is larger than available BOOT area\n");
12767 return -EFBIG;
12768 }
12769
12770 /*
12771 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
12772 * and Boot configuration data sections. These 3 boot sections span
12773 * the entire FLASH_LOC_BOOT_AREA.
12774 */
12775 i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE);
12776 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
12777 (boot_sector >> 16) + i - 1);
12778
12779 /*
12780 * If size == 0 then we're simply erasing the FLASH sectors associated
12781 * with the on-adapter option ROM file
12782 */
12783 if (ret || (size == 0))
12784 goto out;
12785
12786 /* Get boot header */
12787 header = (pci_exp_rom_header_t *)boot_data;
12788 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
12789 /* PCIR Data Structure */
12790 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
12791
12792 /*
12793 * Perform some primitive sanity testing to avoid accidentally
12794 * writing garbage over the boot sectors. We ought to check for
12795 * more but it's not worth it for now ...
12796 */
12797 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
12798 CH_ERR(adap, "boot image too small/large\n");
12799 return -EFBIG;
12800 }
12801
12802 #ifndef CHELSIO_T4_DIAGS
12803 /*
12804 * Check BOOT ROM header signature
12805 */
12806 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
12807 CH_ERR(adap, "Boot image missing signature\n");
12808 return -EINVAL;
12809 }
12810
12811 /*
12812 * Check PCI header signature
12813 */
12814 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
12815 CH_ERR(adap, "PCI header missing signature\n");
12816 return -EINVAL;
12817 }
12818
12819 /*
12820 * Check Vendor ID matches Chelsio ID
12821 */
12822 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
12823 CH_ERR(adap, "Vendor ID missing signature\n");
12824 return -EINVAL;
12825 }
12826 #endif
12827
12828 /*
12829 * Retrieve adapter's device ID
12830 */
12831 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
12832 /* Want to deal with PF 0 so I strip off PF 4 indicator */
12833 device_id = device_id & 0xf0ff;
12834
12835 /*
12836 * Check PCIE Device ID
12837 */
12838 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
12839 /*
12840 * Change the device ID in the Boot BIOS image to match
12841 * the Device ID of the current adapter.
12842 */
12843 modify_device_id(device_id, boot_data);
12844 }
12845
12846 /*
12847 * Skip over the first SF_PAGE_SIZE worth of data and write it after
12848 * we finish copying the rest of the boot image. This will ensure
12849 * that the BIOS boot header will only be written if the boot image
12850 * was written in full.
12851 */
12852 addr = boot_sector;
12853 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
12854 addr += SF_PAGE_SIZE;
12855 boot_data += SF_PAGE_SIZE;
12856 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
12857 if (ret)
12858 goto out;
12859 }
12860
12861 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
12862 (const u8 *)header, 0);
12863
12864 out:
12865 if (ret)
12866 CH_ERR(adap, "boot image download failed, error %d\n", ret);
12867 return ret;
12868 }
12869
12870 /*
12871 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
12872 * @adapter: the adapter
12873 *
12874 * Return the address within the flash where the OptionROM Configuration
12875 * is stored, or an error if the device FLASH is too small to contain
12876 * a OptionROM Configuration.
12877 */
t4_flash_bootcfg_addr(struct adapter * adapter,unsigned int * lenp)12878 static int t4_flash_bootcfg_addr(struct adapter *adapter, unsigned int *lenp)
12879 {
12880 unsigned int len = 0;
12881 const int start = t4_flash_loc_start(adapter, FLASH_LOC_BOOTCFG, &len);
12882
12883 /*
12884 * If the device FLASH isn't large enough to hold a Firmware
12885 * Configuration File, return an error.
12886 */
12887 if (adapter->params.sf_size < start + len)
12888 return -ENOSPC;
12889 if (lenp != NULL)
12890 *lenp = len;
12891 return (start);
12892 }
12893
t4_load_bootcfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)12894 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
12895 {
12896 int ret, i, n, cfg_addr;
12897 unsigned int addr, len;
12898 unsigned int flash_cfg_start_sec;
12899
12900 cfg_addr = t4_flash_bootcfg_addr(adap, &len);
12901 if (cfg_addr < 0)
12902 return cfg_addr;
12903
12904 if (size > len) {
12905 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", len);
12906 return -EFBIG;
12907 }
12908
12909 flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
12910 i = DIV_ROUND_UP(len, SF_SEC_SIZE);
12911 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
12912 flash_cfg_start_sec + i - 1);
12913
12914 /*
12915 * If size == 0 then we're simply erasing the FLASH sectors associated
12916 * with the on-adapter OptionROM Configuration File.
12917 */
12918 if (ret || size == 0)
12919 goto out;
12920
12921 /* this will write to the flash up to SF_PAGE_SIZE at a time */
12922 addr = cfg_addr;
12923 for (i = 0; i < size; i += SF_PAGE_SIZE) {
12924 n = min(size - i, SF_PAGE_SIZE);
12925 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
12926 if (ret)
12927 goto out;
12928 addr += SF_PAGE_SIZE;
12929 cfg_data += SF_PAGE_SIZE;
12930 }
12931
12932 out:
12933 if (ret)
12934 CH_ERR(adap, "boot config data %s failed %d\n",
12935 (size == 0 ? "clear" : "download"), ret);
12936 return ret;
12937 }
12938
12939 /**
12940 * t4_set_filter_cfg - set up filter mode/mask and ingress config.
12941 * @adap: the adapter
12942 * @mode: a bitmap selecting which optional filter components to enable
12943 * @mask: a bitmap selecting which components to enable in filter mask
12944 * @vnic_mode: the ingress config/vnic mode setting
12945 *
12946 * Sets the filter mode and mask by selecting the optional components to
12947 * enable in filter tuples. Returns 0 on success and a negative error if
12948 * the requested mode needs more bits than are available for optional
12949 * components. The filter mask must be a subset of the filter mode.
12950 */
t4_set_filter_cfg(struct adapter * adap,int mode,int mask,int vnic_mode)12951 int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
12952 {
12953 int i, nbits, rc;
12954 uint32_t param, val;
12955 uint16_t fmode, fmask;
12956 const int maxbits = adap->chip_params->filter_opt_len;
12957 const int nopt = adap->chip_params->filter_num_opt;
12958 int width;
12959
12960 if (mode != -1 || mask != -1) {
12961 if (mode != -1) {
12962 fmode = mode;
12963 nbits = 0;
12964 for (i = 0; i < nopt; i++) {
12965 if (fmode & (1 << i))
12966 nbits += t4_filter_field_width(adap, i);
12967 }
12968 if (nbits > maxbits) {
12969 CH_ERR(adap, "optional fields in the filter "
12970 "mode (0x%x) add up to %d bits "
12971 "(must be <= %db). Remove some fields and "
12972 "try again.\n", fmode, nbits, maxbits);
12973 return -E2BIG;
12974 }
12975
12976 /*
12977 * Hardware < T7 wants the bits to be maxed out. Keep
12978 * setting them until there's no room for more.
12979 */
12980 if (chip_id(adap) < CHELSIO_T7) {
12981 for (i = 0; i < nopt; i++) {
12982 if (fmode & (1 << i))
12983 continue;
12984 width = t4_filter_field_width(adap, i);
12985 if (nbits + width <= maxbits) {
12986 fmode |= 1 << i;
12987 nbits += width;
12988 if (nbits == maxbits)
12989 break;
12990 }
12991 }
12992 }
12993
12994 fmask = fmode & adap->params.tp.filter_mask;
12995 if (fmask != adap->params.tp.filter_mask) {
12996 CH_WARN(adap,
12997 "filter mask will be changed from 0x%x to "
12998 "0x%x to comply with the filter mode (0x%x).\n",
12999 adap->params.tp.filter_mask, fmask, fmode);
13000 }
13001 } else {
13002 fmode = adap->params.tp.filter_mode;
13003 fmask = mask;
13004 if ((fmode | fmask) != fmode) {
13005 CH_ERR(adap,
13006 "filter mask (0x%x) must be a subset of "
13007 "the filter mode (0x%x).\n", fmask, fmode);
13008 return -EINVAL;
13009 }
13010 }
13011
13012 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
13013 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
13014 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
13015 val = V_FW_PARAMS_PARAM_FILTER_MODE(fmode) |
13016 V_FW_PARAMS_PARAM_FILTER_MASK(fmask);
13017 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m,
13018 &val);
13019 if (rc < 0)
13020 return rc;
13021 }
13022
13023 if (vnic_mode != -1) {
13024 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
13025 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
13026 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
13027 val = vnic_mode;
13028 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m,
13029 &val);
13030 if (rc < 0)
13031 return rc;
13032 }
13033
13034 /* Refresh. */
13035 read_filter_mode_and_ingress_config(adap);
13036
13037 return 0;
13038 }
13039
13040 /**
13041 * t4_clr_port_stats - clear port statistics
13042 * @adap: the adapter
13043 * @idx: the port index
13044 *
13045 * Clear HW statistics for the given port.
13046 */
t4_clr_port_stats(struct adapter * adap,int idx)13047 void t4_clr_port_stats(struct adapter *adap, int idx)
13048 {
13049 struct port_info *pi;
13050 int i, port_id, tx_chan;
13051 u32 bgmap, port_base_addr;
13052
13053 port_id = adap->port_map[idx];
13054 MPASS(port_id >= 0 && port_id <= adap->params.nports);
13055 pi = adap->port[port_id];
13056
13057 for (tx_chan = pi->tx_chan;
13058 tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
13059 port_base_addr = t4_port_reg(adap, tx_chan, 0);
13060
13061 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
13062 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
13063 t4_write_reg(adap, port_base_addr + i, 0);
13064 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
13065 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
13066 t4_write_reg(adap, port_base_addr + i, 0);
13067 }
13068 bgmap = pi->mps_bg_map;
13069 for (i = 0; i < 4; i++)
13070 if (bgmap & (1 << i)) {
13071 t4_write_reg(adap,
13072 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
13073 t4_write_reg(adap,
13074 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
13075 }
13076 }
13077
13078 /**
13079 * t4_i2c_io - read/write I2C data from adapter
13080 * @adap: the adapter
13081 * @port: Port number if per-port device; <0 if not
13082 * @devid: per-port device ID or absolute device ID
13083 * @offset: byte offset into device I2C space
13084 * @len: byte length of I2C space data
13085 * @buf: buffer in which to return I2C data for read
13086 * buffer which holds the I2C data for write
13087 * @write: if true, do a write; else do a read
13088 * Reads/Writes the I2C data from/to the indicated device and location.
13089 */
t4_i2c_io(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf,bool write)13090 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
13091 int port, unsigned int devid,
13092 unsigned int offset, unsigned int len,
13093 u8 *buf, bool write)
13094 {
13095 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
13096 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
13097 int ret = 0;
13098
13099 if (len > I2C_PAGE_SIZE)
13100 return -EINVAL;
13101
13102 /* Dont allow reads that spans multiple pages */
13103 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
13104 return -EINVAL;
13105
13106 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
13107 ldst_cmd.op_to_addrspace =
13108 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
13109 F_FW_CMD_REQUEST |
13110 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
13111 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
13112 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
13113 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
13114 ldst_cmd.u.i2c.did = devid;
13115
13116 while (len > 0) {
13117 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
13118
13119 ldst_cmd.u.i2c.boffset = offset;
13120 ldst_cmd.u.i2c.blen = i2c_len;
13121
13122 if (write)
13123 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
13124
13125 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
13126 write ? NULL : &ldst_rpl);
13127 if (ret)
13128 break;
13129
13130 if (!write)
13131 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
13132 offset += i2c_len;
13133 buf += i2c_len;
13134 len -= i2c_len;
13135 }
13136
13137 return ret;
13138 }
13139
t4_i2c_rd(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)13140 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
13141 int port, unsigned int devid,
13142 unsigned int offset, unsigned int len,
13143 u8 *buf)
13144 {
13145 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
13146 }
13147
t4_i2c_wr(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)13148 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
13149 int port, unsigned int devid,
13150 unsigned int offset, unsigned int len,
13151 u8 *buf)
13152 {
13153 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
13154 }
13155
13156 /**
13157 * t4_sge_ctxt_rd - read an SGE context through FW
13158 * @adap: the adapter
13159 * @mbox: mailbox to use for the FW command
13160 * @cid: the context id
13161 * @ctype: the context type
13162 * @data: where to store the context data
13163 *
13164 * Issues a FW command through the given mailbox to read an SGE context.
13165 */
t4_sge_ctxt_rd(struct adapter * adap,unsigned int mbox,unsigned int cid,enum ctxt_type ctype,u32 * data)13166 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
13167 enum ctxt_type ctype, u32 *data)
13168 {
13169 int ret;
13170 struct fw_ldst_cmd c;
13171
13172 if (ctype == CTXT_EGRESS)
13173 ret = FW_LDST_ADDRSPC_SGE_EGRC;
13174 else if (ctype == CTXT_INGRESS)
13175 ret = FW_LDST_ADDRSPC_SGE_INGC;
13176 else if (ctype == CTXT_FLM)
13177 ret = FW_LDST_ADDRSPC_SGE_FLMC;
13178 else
13179 ret = FW_LDST_ADDRSPC_SGE_CONMC;
13180
13181 memset(&c, 0, sizeof(c));
13182 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
13183 F_FW_CMD_REQUEST | F_FW_CMD_READ |
13184 V_FW_LDST_CMD_ADDRSPACE(ret));
13185 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
13186 c.u.idctxt.physid = cpu_to_be32(cid);
13187
13188 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
13189 if (ret == 0) {
13190 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
13191 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
13192 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
13193 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
13194 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
13195 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
13196 if (chip_id(adap) > CHELSIO_T6)
13197 data[6] = be32_to_cpu(c.u.idctxt.ctxt_data6);
13198 }
13199 return ret;
13200 }
13201
13202 /**
13203 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
13204 * @adap: the adapter
13205 * @cid: the context id
13206 * @ctype: the context type
13207 * @data: where to store the context data
13208 *
13209 * Reads an SGE context directly, bypassing FW. This is only for
13210 * debugging when FW is unavailable.
13211 */
t4_sge_ctxt_rd_bd(struct adapter * adap,unsigned int cid,enum ctxt_type ctype,u32 * data)13212 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
13213 u32 *data)
13214 {
13215 int i, ret;
13216
13217 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
13218 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
13219 if (!ret) {
13220 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
13221 *data++ = t4_read_reg(adap, i);
13222 if (chip_id(adap) > CHELSIO_T6)
13223 *data++ = t4_read_reg(adap, i);
13224 }
13225 return ret;
13226 }
13227
t4_sched_config(struct adapter * adapter,int type,int minmaxen,int sleep_ok)13228 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
13229 int sleep_ok)
13230 {
13231 struct fw_sched_cmd cmd;
13232
13233 memset(&cmd, 0, sizeof(cmd));
13234 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
13235 F_FW_CMD_REQUEST |
13236 F_FW_CMD_WRITE);
13237 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
13238
13239 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
13240 cmd.u.config.type = type;
13241 cmd.u.config.minmaxen = minmaxen;
13242
13243 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
13244 NULL, sleep_ok);
13245 }
13246
t4_sched_params(struct adapter * adapter,int type,int level,int mode,int rateunit,int ratemode,int channel,int cl,int minrate,int maxrate,int weight,int pktsize,int burstsize,int sleep_ok)13247 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
13248 int rateunit, int ratemode, int channel, int cl,
13249 int minrate, int maxrate, int weight, int pktsize,
13250 int burstsize, int sleep_ok)
13251 {
13252 struct fw_sched_cmd cmd;
13253
13254 memset(&cmd, 0, sizeof(cmd));
13255 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
13256 F_FW_CMD_REQUEST |
13257 F_FW_CMD_WRITE);
13258 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
13259
13260 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
13261 cmd.u.params.type = type;
13262 cmd.u.params.level = level;
13263 cmd.u.params.mode = mode;
13264 cmd.u.params.ch = channel;
13265 cmd.u.params.cl = cl;
13266 cmd.u.params.unit = rateunit;
13267 cmd.u.params.rate = ratemode;
13268 cmd.u.params.min = cpu_to_be32(minrate);
13269 cmd.u.params.max = cpu_to_be32(maxrate);
13270 cmd.u.params.weight = cpu_to_be16(weight);
13271 cmd.u.params.pktsize = cpu_to_be16(pktsize);
13272 cmd.u.params.burstsize = cpu_to_be16(burstsize);
13273
13274 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
13275 NULL, sleep_ok);
13276 }
13277
t4_sched_params_ch_rl(struct adapter * adapter,int channel,int ratemode,unsigned int maxrate,int sleep_ok)13278 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
13279 unsigned int maxrate, int sleep_ok)
13280 {
13281 struct fw_sched_cmd cmd;
13282
13283 memset(&cmd, 0, sizeof(cmd));
13284 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
13285 F_FW_CMD_REQUEST |
13286 F_FW_CMD_WRITE);
13287 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
13288
13289 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
13290 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
13291 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
13292 cmd.u.params.ch = channel;
13293 cmd.u.params.rate = ratemode; /* REL or ABS */
13294 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */
13295
13296 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
13297 NULL, sleep_ok);
13298 }
13299
t4_sched_params_cl_wrr(struct adapter * adapter,int channel,int cl,int weight,int sleep_ok)13300 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
13301 int weight, int sleep_ok)
13302 {
13303 struct fw_sched_cmd cmd;
13304
13305 if (weight < 0 || weight > 100)
13306 return -EINVAL;
13307
13308 memset(&cmd, 0, sizeof(cmd));
13309 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
13310 F_FW_CMD_REQUEST |
13311 F_FW_CMD_WRITE);
13312 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
13313
13314 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
13315 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
13316 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
13317 cmd.u.params.ch = channel;
13318 cmd.u.params.cl = cl;
13319 cmd.u.params.weight = cpu_to_be16(weight);
13320
13321 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
13322 NULL, sleep_ok);
13323 }
13324
t4_sched_params_cl_rl_kbps(struct adapter * adapter,int channel,int cl,int mode,unsigned int maxrate,int pktsize,int sleep_ok)13325 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
13326 int mode, unsigned int maxrate, int pktsize, int sleep_ok)
13327 {
13328 struct fw_sched_cmd cmd;
13329
13330 memset(&cmd, 0, sizeof(cmd));
13331 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
13332 F_FW_CMD_REQUEST |
13333 F_FW_CMD_WRITE);
13334 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
13335
13336 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
13337 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
13338 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
13339 cmd.u.params.mode = mode;
13340 cmd.u.params.ch = channel;
13341 cmd.u.params.cl = cl;
13342 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
13343 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
13344 cmd.u.params.max = cpu_to_be32(maxrate);
13345 cmd.u.params.pktsize = cpu_to_be16(pktsize);
13346
13347 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
13348 NULL, sleep_ok);
13349 }
13350
13351 /*
13352 * t4_config_watchdog - configure (enable/disable) a watchdog timer
13353 * @adapter: the adapter
13354 * @mbox: mailbox to use for the FW command
13355 * @pf: the PF owning the queue
13356 * @vf: the VF owning the queue
13357 * @timeout: watchdog timeout in ms
13358 * @action: watchdog timer / action
13359 *
13360 * There are separate watchdog timers for each possible watchdog
13361 * action. Configure one of the watchdog timers by setting a non-zero
13362 * timeout. Disable a watchdog timer by using a timeout of zero.
13363 */
t4_config_watchdog(struct adapter * adapter,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int timeout,unsigned int action)13364 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
13365 unsigned int pf, unsigned int vf,
13366 unsigned int timeout, unsigned int action)
13367 {
13368 struct fw_watchdog_cmd wdog;
13369 unsigned int ticks;
13370
13371 /*
13372 * The watchdog command expects a timeout in units of 10ms so we need
13373 * to convert it here (via rounding) and force a minimum of one 10ms
13374 * "tick" if the timeout is non-zero but the conversion results in 0
13375 * ticks.
13376 */
13377 ticks = (timeout + 5)/10;
13378 if (timeout && !ticks)
13379 ticks = 1;
13380
13381 memset(&wdog, 0, sizeof wdog);
13382 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
13383 F_FW_CMD_REQUEST |
13384 F_FW_CMD_WRITE |
13385 V_FW_PARAMS_CMD_PFN(pf) |
13386 V_FW_PARAMS_CMD_VFN(vf));
13387 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
13388 wdog.timeout = cpu_to_be32(ticks);
13389 wdog.action = cpu_to_be32(action);
13390
13391 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
13392 }
13393
t4_get_devlog_level(struct adapter * adapter,unsigned int * level)13394 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
13395 {
13396 struct fw_devlog_cmd devlog_cmd;
13397 int ret;
13398
13399 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
13400 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
13401 F_FW_CMD_REQUEST | F_FW_CMD_READ);
13402 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
13403 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
13404 sizeof(devlog_cmd), &devlog_cmd);
13405 if (ret)
13406 return ret;
13407
13408 *level = devlog_cmd.level;
13409 return 0;
13410 }
13411
t4_set_devlog_level(struct adapter * adapter,unsigned int level)13412 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
13413 {
13414 struct fw_devlog_cmd devlog_cmd;
13415
13416 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
13417 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
13418 F_FW_CMD_REQUEST |
13419 F_FW_CMD_WRITE);
13420 devlog_cmd.level = level;
13421 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
13422 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
13423 sizeof(devlog_cmd), &devlog_cmd);
13424 }
13425
t4_configure_add_smac(struct adapter * adap)13426 int t4_configure_add_smac(struct adapter *adap)
13427 {
13428 unsigned int param, val;
13429 int ret = 0;
13430
13431 adap->params.smac_add_support = 0;
13432 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
13433 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
13434 /* Query FW to check if FW supports adding source mac address
13435 * to TCAM feature or not.
13436 * If FW returns 1, driver can use this feature and driver need to send
13437 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
13438 * enable adding smac to TCAM.
13439 */
13440 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
13441 if (ret)
13442 return ret;
13443
13444 if (val == 1) {
13445 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
13446 ¶m, &val);
13447 if (!ret)
13448 /* Firmware allows adding explicit TCAM entries.
13449 * Save this internally.
13450 */
13451 adap->params.smac_add_support = 1;
13452 }
13453
13454 return ret;
13455 }
13456
t4_configure_ringbb(struct adapter * adap)13457 int t4_configure_ringbb(struct adapter *adap)
13458 {
13459 unsigned int param, val;
13460 int ret = 0;
13461
13462 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
13463 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
13464 /* Query FW to check if FW supports ring switch feature or not.
13465 * If FW returns 1, driver can use this feature and driver need to send
13466 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
13467 * enable the ring backbone configuration.
13468 */
13469 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
13470 if (ret < 0) {
13471 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
13472 ret);
13473 goto out;
13474 }
13475
13476 if (val != 1) {
13477 CH_ERR(adap, "FW doesnot support ringbackbone features\n");
13478 goto out;
13479 }
13480
13481 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
13482 if (ret < 0) {
13483 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
13484 ret);
13485 goto out;
13486 }
13487
13488 out:
13489 return ret;
13490 }
13491
13492 /*
13493 * t4_set_vlan_acl - Set a VLAN id for the specified VF
13494 * @adapter: the adapter
13495 * @mbox: mailbox to use for the FW command
13496 * @vf: one of the VFs instantiated by the specified PF
13497 * @vlan: The vlanid to be set
13498 *
13499 */
t4_set_vlan_acl(struct adapter * adap,unsigned int pf,unsigned int vf,u16 vlan)13500 int t4_set_vlan_acl(struct adapter *adap, unsigned int pf, unsigned int vf,
13501 u16 vlan)
13502 {
13503 struct fw_acl_vlan_cmd vlan_cmd;
13504 unsigned int enable;
13505
13506 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
13507 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
13508 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
13509 F_FW_CMD_REQUEST |
13510 F_FW_CMD_WRITE |
13511 F_FW_CMD_EXEC |
13512 V_FW_ACL_VLAN_CMD_PFN(pf) |
13513 V_FW_ACL_VLAN_CMD_VFN(vf));
13514 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd) |
13515 V_FW_ACL_VLAN_CMD_PMASK(1 << pf));
13516 /* Drop all packets that donot match vlan id */
13517 vlan_cmd.dropnovlan_fm = (enable
13518 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
13519 F_FW_ACL_VLAN_CMD_FM)
13520 : 0);
13521 if (enable != 0) {
13522 vlan_cmd.nvlan = 1;
13523 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
13524 }
13525
13526 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
13527 }
13528
13529 /**
13530 * t4_del_mac - Removes the exact-match filter for a MAC address
13531 * @adap: the adapter
13532 * @mbox: mailbox to use for the FW command
13533 * @viid: the VI id
13534 * @addr: the MAC address value
13535 * @smac: if true, delete from only the smac region of MPS
13536 *
13537 * Modifies an exact-match filter and sets it to the new MAC address if
13538 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
13539 * latter case the address is added persistently if @persist is %true.
13540 *
13541 * Returns a negative error number or the index of the filter with the new
13542 * MAC value. Note that this index may differ from @idx.
13543 */
t4_del_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,const u8 * addr,bool smac)13544 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
13545 const u8 *addr, bool smac)
13546 {
13547 int ret;
13548 struct fw_vi_mac_cmd c;
13549 struct fw_vi_mac_exact *p = c.u.exact;
13550 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
13551
13552 memset(&c, 0, sizeof(c));
13553 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
13554 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
13555 V_FW_VI_MAC_CMD_VIID(viid));
13556 c.freemacs_to_len16 = cpu_to_be32(
13557 V_FW_CMD_LEN16(1) |
13558 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
13559
13560 memcpy(p->macaddr, addr, sizeof(p->macaddr));
13561 p->valid_to_idx = cpu_to_be16(
13562 F_FW_VI_MAC_CMD_VALID |
13563 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
13564
13565 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
13566 if (ret == 0) {
13567 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
13568 if (ret < max_mac_addr)
13569 return -ENOMEM;
13570 }
13571
13572 return ret;
13573 }
13574
13575 /**
13576 * t4_add_mac - Adds an exact-match filter for a MAC address
13577 * @adap: the adapter
13578 * @mbox: mailbox to use for the FW command
13579 * @viid: the VI id
13580 * @idx: index of existing filter for old value of MAC address, or -1
13581 * @addr: the new MAC address value
13582 * @persist: whether a new MAC allocation should be persistent
13583 * @add_smt: if true also add the address to the HW SMT
13584 * @smac: if true, update only the smac region of MPS
13585 *
13586 * Modifies an exact-match filter and sets it to the new MAC address if
13587 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
13588 * latter case the address is added persistently if @persist is %true.
13589 *
13590 * Returns a negative error number or the index of the filter with the new
13591 * MAC value. Note that this index may differ from @idx.
13592 */
t4_add_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx,bool smac)13593 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
13594 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
13595 {
13596 int ret, mode;
13597 struct fw_vi_mac_cmd c;
13598 struct fw_vi_mac_exact *p = c.u.exact;
13599 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
13600
13601 if (idx < 0) /* new allocation */
13602 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
13603 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
13604
13605 memset(&c, 0, sizeof(c));
13606 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
13607 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
13608 V_FW_VI_MAC_CMD_VIID(viid));
13609 c.freemacs_to_len16 = cpu_to_be32(
13610 V_FW_CMD_LEN16(1) |
13611 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
13612 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
13613 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
13614 V_FW_VI_MAC_CMD_IDX(idx));
13615 memcpy(p->macaddr, addr, sizeof(p->macaddr));
13616
13617 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
13618 if (ret == 0) {
13619 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
13620 if (ret >= max_mac_addr)
13621 return -ENOMEM;
13622 if (smt_idx) {
13623 /* Does fw supports returning smt_idx? */
13624 if (adap->params.viid_smt_extn_support)
13625 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
13626 else {
13627 /* In T4/T5, SMT contains 256 SMAC entries
13628 * organized in 128 rows of 2 entries each.
13629 * In T6, SMT contains 256 SMAC entries in
13630 * 256 rows.
13631 */
13632 if (chip_id(adap) <= CHELSIO_T5)
13633 *smt_idx = ((viid & M_FW_VIID_VIN) << 1);
13634 else
13635 *smt_idx = (viid & M_FW_VIID_VIN);
13636 }
13637 }
13638 }
13639
13640 return ret;
13641 }
13642